seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
71103649063 | import pygame
from pygame.rect import *
from pygame.locals import *
BLACK = (0, 0, 0)
GRAY = (127, 127, 127)
WHITE = (255, 255, 255)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
BLUE = (0, 0, 255)
YELLOW = (255, 255, 0)
CYAN = (0, 255, 255)
MAGENTA = (255, 0, 255)
SIZE = 600,600
imagePath = "Capitulo_4/python.png"
running = True
moving = False
background = GRAY
width, height = SIZE
screenCenter = width//2, height//2
pygame.init()
screen = pygame.display.set_mode(SIZE)
img0 = pygame.image.load(imagePath)
# Torna a imagem mais leve para o render
img0.convert()
scale = 1
angle = 0
rect0 = img0.get_rect()
pygame.draw.rect(img0, GREEN, rect0, 2)
img = img0
rect = img.get_rect()
rect.center = screenCenter
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
elif event.type == KEYDOWN:
if event.key == K_r:
if event.mod & KMOD_SHIFT:
angle -= 10
else:
angle += 10
img = pygame.transform.rotozoom(img0, angle, scale)
elif event.key == K_s:
if event.mod & KMOD_SHIFT:
scale /= 1.1
else:
scale *= 1.1
img = pygame.transform.rotozoom(img0, angle, scale)
elif event.key == K_h:
img = pygame.transform.flip(img, True, False)
elif event.key == K_v:
img = pygame.transform.flip(img, False, True)
elif event.key == K_o:
img = img0
scale = 1
angle = 0
elif event.key == K_1 or event.key == K_KP1:
img = pygame.transform.laplacian(img)
elif event.key == K_2 or event.key == K_KP2:
img = pygame.transform.scale2x(img)
rect = img.get_rect()
rect.center = screenCenter
screen.fill(GRAY)
screen.blit(img, rect)
if moving:
pygame.draw.rect(screen, RED, rect, 4)
else:
pygame.draw.rect(screen, GREEN, rect, 4)
pygame.display.update()
pygame.quit() | ArturBizon/estudo-pygame | Capitulo_4/ManipulacaoDeImagem.py | ManipulacaoDeImagem.py | py | 2,233 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pygame.init",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "pygame.display.set_mode",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.... |
6860598396 | # -*- coding: utf-8 -*-
# @Time : 2022/9/14
# @Author : youjiangyong
# @File : test_hotSpotRank.py
import allure
import pytest
from Common.Base import base
from Config.path_config import PathMessage
import os,jsonpath,datetime,random
def get_timestamp():
timestamp = datetime.datetime.now().replace(hour=9, minute=0, second=0, microsecond=0).timestamp()
timestamp = int(timestamp)
return timestamp
print(get_timestamp())
@allure.feature('官方热点榜')
@pytest.mark.flaky(reruns=5, reruns_delay=1)
class TestCase_Rank_hotSpotRank():
timestamp = get_timestamp()
@allure.story('验证官方热点榜遍历商品一级级分类返回的数据是否大于10条')
@pytest.mark.parametrize('times', base.return_time_message_nowday())
@pytest.mark.parametrize('timestamp', [timestamp])
@allure.title("官方热点榜查看榜点:{timestamp}")
def test_rank_promotionAweme_big_type(self, get_token, get_host, timestamp, times):
para = f'day_type={times[0]}&date={times[1]}&page=1&size=50&type=1×tamp={timestamp}'
response = base().return_request(method="get", path=PathMessage.rank_hotSpotRank, data=para, tokens=get_token,hosts=get_host, )
# print(response)
assert len(response["response_body"]["data"]["list"]) > 10
# aweme_id_list = jsonpath.jsonpath(response["response_body"], '$.data.list[*].aweme_id')
# print(aweme_id_list) | zhangmin123312/zhangmin | Testcase/aweme_material/test_rank_hotSpot.py | test_rank_hotSpot.py | py | 1,425 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "datetime.datetime.now",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "Common.Base.base",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "Config.... |
6995324450 | from lib.cuckoo.common.abstracts import Signature
utilities = [
"at ",
"at.exe",
"attrib",
"chcp",
"del ",
"del.exe",
"dir ",
"dir.exe",
"driverquery",
"erase ",
"erase.exe",
"fsutil",
"getmac",
"ipconfig",
"nbtstat",
"net ",
"net.exe",
"netsh",
"netstat",
"nslookup",
"pathping",
"ping ",
"ping.exe",
"qwinsta",
"reg ",
"reg.exe",
"regsrv32",
"route",
"runas",
"rwinsta",
"sc ",
"sc.exe",
"schtasks",
"shutdown",
"sigverif",
"systeminfo",
"tasklist",
"taskkill",
"telnet",
"whoami",
"wmic",
"wusa",
]
risk_utilities = [
"bitsadmin",
"cacls",
"csvde",
"dsquery",
"icacls",
"nltest",
"rexec",
"sdbinst",
"volumeid",
"vssadmin",
"wevtutil",
"whois",
"xcacls",
]
sysinternals = [
"accesschk",
"accessenum",
"adexplorer",
"adinsight",
"adrestore",
"autologon",
"autoruns",
"bginfo",
"bluescreen",
"clockres",
"contig",
"coreinfo",
"ctrl2cap",
"debugview",
"desktops",
"disk2vhd",
"diskext",
"diskmon",
"du ",
"du.exe",
"efsdump",
"findlinks",
"handle ",
"handle.exe",
"hex2dec",
"junction",
"ldmdump",
"listdlls",
"livekd",
"loadorder",
"logonsessions",
"movefile",
"notmyfault",
"ntfsinfo",
"pendmoves",
"pipelist",
"portmon",
"procdump",
"psexec",
"psfile",
"bginfo",
"psgetsid",
"psinfo",
"pskill",
"pslist",
"psloggedon",
"psloglist",
"pspasswd",
"psping",
"psservice",
"psshutdown",
"pssuspend",
"pstools",
"rammap",
"regdelnull",
"ru ",
"ru.exe",
"regjump",
"sdelete",
"shareenum",
"shellrunas",
"sigcheck",
"streams ",
"streams.exe",
"strings ",
"strings.exe",
"sync ",
"sync.exe",
"sysmon",
"tcpview",
"vmmap",
"volumeid",
"whois",
"winobj",
"zoomit",
]
class UsesWindowsUtilities(Signature):
name = "uses_windows_utilities"
description = "Uses Windows utilities for basic Windows functionality"
severity = 2
categories = ["commands", "lateral"]
authors = ["Cuckoo Technologies"]
minimum = "2.0"
ttp = ["T1053"]
references = ["http://blog.jpcert.or.jp/2016/01/windows-commands-abused-by-attackers.html"]
def on_complete(self):
for cmdline in self.get_command_lines():
for utility in utilities:
if utility in cmdline.lower():
self.mark_ioc("cmdline", cmdline)
return self.has_marks()
class SuspiciousCommandTools(Signature):
name = "suspicious_command_tools"
description = "Uses suspicious command line tools or Windows utilities"
severity = 3
categories = ["commands", "lateral"]
authors = ["Kevin Ross"]
minimum = "2.0"
def on_complete(self):
for cmdline in self.get_command_lines():
for utility in risk_utilities:
if utility in cmdline.lower():
self.mark_ioc("cmdline", cmdline)
return self.has_marks()
class SysInternalsToolsUsage(Signature):
name = "sysinternals_tools_usage"
description = "Uses Sysinternals tools in order to add additional command line functionality"
severity = 3
categories = ["commands", "lateral"]
authors = ["Kevin Ross"]
minimum = "2.0"
references = ["docs.microsoft.com/en-us/sysinternals/downloads/"]
def on_complete(self):
for cmdline in self.get_command_lines():
for utility in sysinternals:
if utility in cmdline.lower():
self.mark_ioc("cmdline", cmdline)
return self.has_marks()
class AddsUser(Signature):
name = "adds_user"
description = "Uses windows command to add a user to the system"
severity = 2
categories = ["commands"]
authors = ["Kevin"]
minimum = "2.0"
ttp = ["T1136"]
def on_complete(self):
for cmdline in self.get_command_lines():
if cmdline.lower().startswith("net") and "user /add" in cmdline.lower():
self.mark_ioc("cmdline", cmdline)
return self.has_marks()
class AddsUserAdmin(Signature):
name = "adds_user_admin"
description = "Uses windows command to add a user to the administrator group"
severity = 3
categories = ["commands"]
authors = ["Kevin"]
minimum = "2.0"
ttp = ["T1098"]
def on_complete(self):
for cmdline in self.get_command_lines():
if cmdline.lower().startswith("net") and "localgroup administrators" in cmdline.lower():
self.mark_ioc("cmdline", cmdline)
return self.has_marks()
| cuckoosandbox/community | modules/signatures/windows/windows_utilities.py | windows_utilities.py | py | 4,845 | python | en | code | 312 | github-code | 36 | [
{
"api_name": "lib.cuckoo.common.abstracts.Signature",
"line_number": 142,
"usage_type": "name"
},
{
"api_name": "lib.cuckoo.common.abstracts.Signature",
"line_number": 160,
"usage_type": "name"
},
{
"api_name": "lib.cuckoo.common.abstracts.Signature",
"line_number": 176,
... |
40477310437 |
from django.urls import path, include
from estado.views import demonio_print
from estado.views import Estadoview,Estadoinsertar,Estadoeditar,Estadoeliminar,Partecuerpoview,Partecuerpoinsertar,Partecuerpoeditar,Partecuerpoeliminar,Detallecuerpoview,Detallecuerpoinsertar,Detallecuerpoeditar,Detallecuerpoeliminar,Demonioview,Demonioinsertar,Demonioeditar,Demonioeliminar,Batallaview,Batallainsertar,Batallaeditar,Batallaeliminar
urlpatterns = [
path('Estado', Estadoview.as_view(), name='Estados'),
path('Estado/new', Estadoinsertar.as_view(), name='AgregarE'),
path('Estado/edit/<int:pk>', Estadoeditar.as_view(), name='EditarE'),
path('Estado/delete/<int:pk>', Estadoeliminar.as_view(), name='EliminarE'),
path('Partecuerpo', Partecuerpoview.as_view(), name='Partecuerpos'),
path('Partecuerpo/new', Partecuerpoinsertar.as_view(), name='AgregarP'),
path('Partecuerpo/edit/<int:pk>', Partecuerpoeditar.as_view(), name='EditarP'),
path('Partecuerpo/delete/<int:pk>', Partecuerpoeliminar.as_view(), name='EliminarP'),
path('Detallecuerpo', Detallecuerpoview.as_view(), name='Detallecuerpos'),
path('Detallecuerpo/new', Detallecuerpoinsertar.as_view(), name='AgregarDC'),
path('Detallecuerpo/edit/<int:pk>', Detallecuerpoeditar.as_view(), name='EditarDC'),
path('Detallecuerpo/delete/<int:pk>', Detallecuerpoeliminar.as_view(), name='EliminarDC'),
path('', Batallaview.as_view(), name='Batallas'),
path('Batalla/new', Batallainsertar.as_view(), name='AgregarB'),
path('Batalla/edit/<int:pk>', Batallaeditar.as_view(), name='EditarB'),
path('Batalla/delete/<int:pk>', Batallaeliminar.as_view(), name='EliminarB'),
path('Demonio', Demonioview.as_view(), name='Demonios'),
path('Demonio/new', Demonioinsertar.as_view(), name='AgregarD'),
path('Demonio/edit/<int:pk>', Demonioeditar.as_view(), name='EditarD'),
path('Demonio/delete/<int:pk>', Demonioeliminar.as_view(), name='EliminarD'),
path('Demonio/print',demonio_print,name='demonio_print'),
path('Demonio/print/<int:pk>',demonio_print,name='demonio_print_one'),
]
| MrDavidAlv/FullStack_python_Django_postgreSQL | guerrero/estado/urls.py | urls.py | py | 2,109 | python | es | code | 1 | github-code | 36 | [
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "estado.views.Estadoview.as_view",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "estado.views.Estadoview",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": ... |
33610092826 | import pygame,sys
from pygame.locals import *
class MySprite(pygame.sprite.Sprite):
def __init__(self,target):
pygame.sprite.Sprite.__init__(self)
self.master_image = None
self.frame =0
self.old_frame=-1
self.frame_width=1
self.frame_heiget=1
self.first_frame=0
self.last_frame=0
self.columns=1
self.last_time=0
def _getx(self):return self.rect.x
def _setx(self,value):self.rect.x =value
X = property(_getx,_setx)
def _gety(self):return self.rect.y
def _sety(self,value):self.rect.y =value
Y = property(_gety,_sety)
def _getpos(self): return self.rect.topleft
def _setpos(self,pos):self.rect.topleft = pos
position=property(_getpos,_setpos)
def load(self,filename,width,height,columns):
self.master_image=pygame.image.load("pic/dragon.png").convert_alpha()
self.frame_width=width
self.frame_heiget=height
self.rect=Rect(0,0,width,height)
self.columns =columns
rect =self.master_image.get_rect()
self.last_frame=(rect.width // width)*(rect.height // height -1)
def update(self,current_time,rate=30):
if current_time>self.last_time+rate:
self.frame +=1
if self.frame > self.last_frame:
self.frame = self.first_frame
self.last_time = current_time
if self.frame != self.old_frame:
frame_x = (self.frame % self.columns)*self.frame_width
frame_y = (self.frame // self.columns) * self.frame_heiget
rect = Rect(frame_x,frame_y,self.frame_width,self.frame_heiget)
self.image = self.master_image.subsurface(rect)
self.old_frame=self.frame
def __str__(self):
return str(self.frame)+","+str(self.first_frame)+\
","+str(self.last_frame)+","+str(self.frame_width)+\
","+str(self.frame_heiget)+","+str(self.columns)+\
","+str(self.rect)
def print_text(font,x,y,text,color=(255,255,255)):
imgText = font.render(text,True,color)
screen.blit(imgText,(x,y))
pygame.init()
screen = pygame.display.set_mode((800,600),0,32)
pygame.display.set_caption("Sprite Animation Demo")
font = pygame.font.Font(None,18)
framerate=pygame.time.Clock()
dragon = MySprite(screen)
dragon.load("pic/dragon.png",260,150,3)
group = pygame.sprite.Group()
group.add(dragon)
while True:
framerate.tick(30)
ticks=pygame.time.get_ticks()
for event in pygame.event.get():
if event.type == QUIT:
sys.exit()
key=pygame.key.get_pressed()
if key[pygame.K_ESCAPE]:
sys.exit()
screen.fill((0,0,100))
group.update(ticks)
group.draw(screen)
print_text(font,0,0,"Sprite:"+str(dragon))
pygame.display.update() | Ywp185/Planewar_workhouse | MySprite.py | MySprite.py | py | 2,810 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pygame.sprite",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "pygame.sprite.Sprite.__init__",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pygame.sprite",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "pyga... |
24019479027 | # Author: Yuchen Liu HID213, Wenxuan Han HID209, Junjie Lu HID214
# Data: 2017.12.01
# Reference: http://blog.csdn.net/tinkle181129/article/details/55261251
from datetime import datetime
import matplotlib.pyplot as plt
import pandas as pd
from numpy import *
from sklearn import svm
from sklearn import tree
from sklearn.cross_validation import cross_val_score
from sklearn.decomposition import PCA
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
# 1. read data from csv
def read_data():
data_set = pd.read_csv("train.csv")
data = data_set.values[0:, 1:]
label = data_set.values[0:, 0]
print("Data load completed.")
return data, label
# plot 70 samples
def show_pic(data):
print(shape(data))
plt.figure(figsize=(7, 7))
for digit_num in range(0, 70):
plt.subplot(7, 10, digit_num + 1)
grid_data = data[digit_num].reshape(28, 28)
plt.imshow(grid_data, interpolation="none", cmap="afmhot")
plt.xticks([])
plt.yticks([])
plt.tight_layout()
plt.savefig("data_samples.png")
# 2. Data Cleaning
# The data is from 0-255 for each cell.
# Normalize data by set all value > 0 to 1
def data_clean(data):
m, n = shape(data)
new_data = zeros((m, n))
for i in range(m):
for j in range(n):
if data[i, j] > 0:
new_data[i, j] = 1
else:
new_data[i, j] = 0
print("Data clean completed.")
return new_data
# 3. Feature Selection by PCA
def feature_selection(data):
# First, use explained_variance to get recommended number of component
pca = PCA()
# pca_parameter = pca.fit(data)
pca.fit(data)
ev = pca.explained_variance_
ev_ratio = []
for i in range(len(ev)):
ev_ratio.append(ev[i] / ev[0])
# select number of component which have a higher ratio
# than 0.05 with the first components
n = 0
for i in range(len(ev_ratio)):
if ev_ratio[i] < 0.05:
n = i
# print(n)
break
# Then, PCA the model by the number of components
# pca = PCA(n_components=n, whiten=True)
pca = PCA(n_components=n, whiten=True)
print("Feature selection completed.")
return pca.fit_transform(data)
# 4. Model Selection
def model_acc(data, label, model):
start = datetime.now()
acc = cross_val_score(model, data, label, cv=5, scoring="accuracy").mean()
end = datetime.now()
time_use = (end - start).seconds
print("Time use: ", time_use)
print("Accuracy by cross validation: ", acc)
def dt_classifier(data, label, data_type):
dt_model = tree.DecisionTreeRegressor()
dt_model.fit(data, label)
print("Test " + data_type + " using DT: ")
model_acc(data, label, dt_model)
def nb_classifier(data, label, data_type):
nb_model = GaussianNB()
nb_model.fit(data, label)
print("Test " + data_type + " using NB: ")
model_acc(data, label, nb_model)
def lr_classifier(data, label, data_type):
lr_model = LogisticRegression()
lr_model.fit(data, label)
print("Test " + data_type + " using LR: ")
model_acc(data, label, lr_model)
def rf_classifier(data, label, flag):
rf_model = RandomForestClassifier(n_estimators=100)
rf_model.fit(data, label)
print("Test " + flag + " using RF: ")
model_acc(data, label, rf_model)
def svm_classifier(data, label, flag):
svm_model = svm.SVC(kernel="rbf", C=10)
svm_model.fit(data, label)
# svc_clf = NuSVC(nu=0.1, kernel='rbf', verbose=True)
print("Test " + flag + " using SVM: ")
model_acc(data, label, svm_model)
def main():
data, label = read_data()
# show_pic(data)
clean_data = data_clean(data)
test_type = 3
for i in range(1, 3):
print("In %d test" % i)
if test_type == 0:
input_data = data
str = "raw data"
elif test_type == 1:
input_data = clean_data
str = "clean data"
elif test_type == 2:
input_data = feature_selection(data)
str = "pca data"
elif test_type == 3:
input_data = feature_selection(clean_data)
str = "pca clean data"
dt_classifier(input_data, label, str)
nb_classifier(input_data, label, str)
lr_classifier(input_data, label, str)
rf_classifier(input_data, label, str)
svm_classifier(input_data, label, str)
main()
| bigdata-i523/hid209 | project/code/523Project.py | 523Project.py | py | 4,677 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "matplotlib... |
9144803761 | import pytest
from qhub.render import render_default_template
@pytest.mark.parametrize('config_filename', [
'tests/assets/config_aws.yaml',
'tests/assets/config_gcp.yaml',
'tests/assets/config_do.yaml',
])
def test_render(config_filename, tmp_path):
output_directory = tmp_path / 'test'
output_directory.mkdir()
render_default_template(str(output_directory), config_filename)
| terminal-labs/qhub-kubernetes | tests/test_render.py | test_render.py | py | 404 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "qhub.render.render_default_template",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 6,
"usage_type": "attribute"
}
] |
28557630081 | from pymongo import MongoClient, InsertOne
import logging
from collections import defaultdict
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '../'))
from config import TTL_BATCH_SIZE, DB_NAME
client = MongoClient()
db = client[DB_NAME]
coll_path = db.cname_real_ip
new_coll = db.ttl_real
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO)
cnt = 0
requests = []
finished = set()
for doc in new_coll.find({}, {}):
finished.add(doc["_id"])
logging.info("Finished %d" % len(finished))
for doc in coll_path.find():
if doc["_id"] in finished:
continue
ttl = defaultdict(set)
new_doc = dict()
for rec in doc["A"]:
# ip = rec[1][0][2][0]
for line in rec[1]:
ttl[str(rec[2])] |= set(line[2])
for rec in doc["CNAMES"]:
for line in rec[1]:
ttl[str(rec[2])] |= set(line[2])
for ttl_time in ttl:
# find the min time with this domain in it
ip_set = ttl[ttl_time]
min_set = 1000000
min_ip = None
for ip in ip_set:
tmp_doc = db.ip_domain.find_one({"_id": ip})
tmp_an_set = set(tmp_doc["domain_set"])
if doc["_id"] in tmp_an_set:
tmp_size = tmp_doc["sz"]
if tmp_size < min_set:
min_set = tmp_size
min_ip = ip
if min_set < 1000000:
ttl[ttl_time] = [min_set, min_ip]
else:
ttl[ttl_time] = "NA"
new_doc["_id"] = doc["_id"]
new_doc["qname"] = doc["qname"]
new_doc["ttl"] = ttl
cnt += 1
requests.append(InsertOne(new_doc))
if cnt % TTL_BATCH_SIZE == 0:
new_coll.bulk_write(requests)
requests = []
logging.info(cnt)
new_coll.bulk_write(requests)
| liangz1/workflow | 5_ttl_analysis/2_infer_anonymity_set_size.py | 2_infer_anonymity_set_size.py | py | 1,799 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.path.append",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": ... |
29307491629 | import time, math, config
from machine import Pin, ADC
mtr_enable = Pin('P20', mode=Pin.OUT)
snsr_enable = Pin('P19', mode=Pin.OUT)
adc = ADC()
adc.vref(config.level_vref)
sensor = adc.channel(pin='P13', attn=ADC.ATTN_11DB)
def purge(purge_for = 1):
""" Purge air from the tube for given number of seconds """
# turn on the pump for x seconds to purge the pipe
mtr_enable.toggle()
time.sleep(purge_for)
mtr_enable.toggle()
def get():
""" Take a reading(in mm) from the Level Sensor """
# turn the sensor on and wait for it to boot up
snsr_enable.toggle()
time.sleep(1)
zero_offset = config.level_zero_offset or 330.00 # Minimum sensor voltage reading (millivolts). In theory should be 330.00 but 286 gives us about 0 so...
print('Offset = %s' % zero_offset)
Vsupply = 3300.00 # Supply voltage to sensor (millivolts)
pmax = 5.00 # Maximum sensor operating range (psi)
psi_to_pa = 6894.76 # Conversion factor from psi to Pa
g = 9.81 # Typical gravitational acceleration @ sea level (m/s^2)
rho = 1000.00 # Density of water (kg/m^3)
# take a bunch of readings and sum the result
# this is to metigate the ESP32's noisy ADC
reading = []
print('start reading...')
for i in range(100):
# Calculate water pressure in Pa
water_pressure = pmax * psi_to_pa * (sensor.voltage() - zero_offset) / (0.8 * Vsupply)
# Calculate water depth (mm) from pressure (kPa)
mm = round((1000 * water_pressure) / (rho * g))
# append this reading to our list
reading.append(mm)
# print(water_pressure)
print('{}mm'.format(mm))
# wait a tick for next reading
time.sleep(0.05) # 0.02 takes ~2 seconds to complete loop
# turn the sensor off
snsr_enable.toggle()
print('end reading')
rounded = math.ceil(sum(reading)/len(reading))
reading = rounded if rounded >= 0 else 0
return reading
def put(last_level):
""" Store the given level reading """
file = open(config.last_level_file, 'w')
file.write('value = %s' % last_level)
file.close()
def last():
""" Get the previous level reading """
output = {}
try:
execfile(config.last_level_file, None, output)
return output['value']
except:
return 0
| tobz-nz/firmware | lib/level.py | level.py | py | 2,374 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "machine.Pin",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "machine.Pin.OUT",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "machine.Pin",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "machine.Pin.OUT",
"line... |
17631143198 |
import pandas as pd
import numpy as np
# import seaborn as sb
import matplotlib.pyplot as plt
import statsmodels.formula.api as sm
from sklearn.model_selection import train_test_split # train and test
from sklearn import metrics
from sklearn.metrics import classification_report
#Importing Data
claimants1 = pd.read_csv("C:/Datasets_BA/Logistic regression/claimants.csv",sep=",")
#removing CASENUM
claimants1 = claimants1.drop('CASENUM', axis = 1)
claimants1.head(11)
claimants1.isna().sum()
# Imputating the missing values
########## Median Imputation ############
claimants1.fillna(claimants1.median(), inplace=True)
claimants1.isna().sum()
### Splitting the data into train and test data
# from sklearn.model_selection import train_test_split
train_data, test_data = train_test_split(claimants1, test_size = 0.3) # 30% test data
# Model building
# import statsmodels.formula.api as sm
logit_model = sm.logit('ATTORNEY ~ CLMAGE + LOSS + CLMINSUR + CLMSEX + SEATBELT', data = train_data).fit()
#summary
logit_model.summary2() # for AIC
logit_model.summary()
#prediction
train_pred = logit_model.predict(train_data.iloc[ :, 1: ])
# Creating new column
# filling all the cells with zeroes
train_data["train_pred"] = np.zeros(938)
# taking threshold value as 0.5 and above the prob value will be treated as correct value
train_data.loc[train_pred > 0.5, "train_pred"] = 1
# classification report
classification = classification_report(train_data["train_pred"], train_data["ATTORNEY"])
classification
# confusion matrix
confusion_matrx = pd.crosstab(train_data.train_pred, train_data['ATTORNEY'])
confusion_matrx
accuracy_train = (308 + 358)/(938)
print(accuracy_train)
#ROC CURVE AND AUC
fpr, tpr, threshold = metrics.roc_curve(train_data["ATTORNEY"], train_pred)
#PLOT OF ROC
plt.plot(fpr, tpr);plt.xlabel("False positive rate");plt.ylabel("True positive rate")
roc_auc = metrics.auc(fpr, tpr) #AUC = 0.76
# Prediction on Test data set
test_pred = logit_model.predict(test_data)
# Creating new column for storing predicted class of Attorney
# filling all the cells with zeroes
test_data["test_pred"] = np.zeros(402)
# taking threshold value as 0.5 and above the prob value will be treated as correct value
test_data.loc[test_pred > 0.5, "test_pred"] = 1
# confusion matrix
confusion_matrix = pd.crosstab(test_data.test_pred, test_data['ATTORNEY'])
confusion_matrix
accuracy_test = (124 + 158)/(402)
accuracy_test
# Based on ROC curv we can say that cut-off value should be 0.60, We can select it and check the acccuracy again.
| mdshaji/Logistic_Regression | LogisticRegression_Claimants.py | LogisticRegression_Claimants.py | py | 2,573 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "statsmodels.formula.api.logit",
"line_number": 33,
"usage_type": "call"
},
{... |
11090983949 | import setuptools
from setuptools import find_packages
_name = "data_streaming_pipeline"
_repo_name = "realtime_data_streaming_pipeline"
_license = 'Proprietary: Internal use only'
_description = "Realtime Data pipeline and Web application"
_github_username = "NourSamir"
setuptools.setup(
name=_name,
version='1.0.0',
description=_description,
license=_license,
url=f'https://github.com/{_github_username}/{_repo_name}.git',
author='Nour Samir',
author_email='Noursamir96@gmail.com',
python_requires='>=3.7',
classifiers=[
'Development Status :: Alpha',
'Environment :: cli',
'Intended Audience :: Developers',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.7',
'Topic :: Utilities',
],
packages=find_packages(exclude=['tests']),
install_requires=[
"Flask",
"Flask-Cors",
"python-dotenv",
"kafka-python",
"redis",
],
test_suite='unittest.TestCase',
include_package_data=True,
entry_points={
'console_scripts': [
"run_app_service = app_service.main:main",
"run_metrics_calculation_flow = metrics_calculation_flow.main:main",
"run_data_transformation_flow = data_transformation_flow.main:main",
"run_producer_app = producer_app.main:main",
],
},
) | NourSamir/realtime_data_streaming_pipeline | setup.py | setup.py | py | 1,440 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "setuptools.setup",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "setuptools.find_packages",
"line_number": 30,
"usage_type": "call"
}
] |
4942345877 | import numpy as np
import pygame
from tile import Tile
import algorithme
from button import Button
from timer import Timer
class Board:
def __init__(
self,
name: str, # Nom de la grille
window: pygame.Surface, # Paramètre spécifiant la surface d'affichage
matrix: np.ndarray, # Matrice numpy contenant la grille de sudoku
progress: np.ndarray, # Matrice contenant les saisies du joueur
notes: np.ndarray, # Matrice contenant les notes prises par l'utilisateur
noteMode, # Paramètre vérifiant si le mode note est activé
errors: int, # Compteur d'erreurs
solution: np.ndarray, # Matrice contenant la solution de la grille
elapsed_time: int = 0, # Temps écoulé depuis le lancement du code
play=True, # Si la grille est faite pour jouer dessus ou alors pour regarder la solution
font="Source Sans Pro" # Police d'écriture
):
self.active = True # Si cet objet board est actif, il va être affiché à l'écran
self.pause = False # Mode pause pour arrêter le timer
self.play = play # Si la grille est faite pour jouer dessus ou alors pour regarder la solution
self.name = name # Nom de la grille
self.init_board = np.array(matrix) # Grille de départ
self.size = self.init_board.shape[0] # Taille de la grille
self.bloc_size = int(self.size**0.5) # Taille des sections de la grille
self.cell_size = int(180 // self.bloc_size) # Taille d'une case
self.board_size = int(self.size * self.cell_size) # Taille en pixels de la grille
self.current_tile = None # Case actuellement selectionnée
self.offset = (
int((1280 - self.board_size) / 2),
int((720 - self.board_size) / 2),
) # Décalage permettant que la grille soit au milieu de l'écran
self.error_count = errors # Compteur d'erreurs
self.solution = np.array(solution) # Solution de la grille
self.screen = window # Surface de l'écran
self.window = window.subsurface(
pygame.Rect(*self.offset, self.board_size, self.board_size)
) # Surface d'affichage de la grille
self.tiles = [
[
Tile(
i * self.cell_size,
j * self.cell_size,
progress[i][j],
self.init_board[i][j],
notes[i][j] if len(notes) > 0 else [],
self.solution[i][j] == progress[i][j],
self.window,
self.cell_size,
self.bloc_size,
)
for j in range(self.size)
]
for i in range(self.size)
] # Objet Tile qui contient toutes les cases de la grille
self.history = [] # Tableau de l'historique du tableau
self.history.append(self.tiles)
self.noteMode = noteMode # Paramètre vérifiant si le mode note est activé
self.error_rect = window.subsurface(pygame.Rect(45, 150, 200, 30)) # Surface d'affichage du nombre d'erreurs
self.back_menu = None # Retour au menu
self.new_game = None # Nouvelle partie
self.menus = None # Tuple des menus de l'application
self.font = font
self.create_buttons(elapsed_time)
self.pause_lock = False
def is_running(self): # Indique si une partie est en cours ou non
return self.play and self.active and not self.pause
def draw_board(self): # Dessine le plateau de jeu
for i in range(self.size):
for j in range(self.size):
self.tiles[i][j].draw()
self.tiles[i][j].display()
for i in range(1, int(self.bloc_size)):
pygame.draw.line(
self.window,
(0, 0, 0),
(i * self.cell_size * self.bloc_size, 0),
(i * self.cell_size * self.bloc_size, self.board_size),
3,
)
pygame.draw.line(
self.window,
(0, 0, 0),
(0, i * self.cell_size * self.bloc_size),
(self.board_size, i * self.cell_size * self.bloc_size),
3,
)
font = pygame.font.SysFont("arial", 20)
text = font.render(f"Erreurs : {self.error_count}/3", True, "black")
self.error_rect.blit(
text, text.get_rect(center=self.error_rect.get_rect().center)
)
pygame.mouse.set_cursor(pygame.SYSTEM_CURSOR_ARROW)
self.note_button.process()
self.back_button.process()
self.back_home.process()
self.timer_button.process()
self.pause_button.process()
self.big_play_button.process()
if self.play:
if self.error_count == 3:
self.timer_button.stop = True
self.popup("Vous avez fait trop d'erreurs")
self.back_menu.process()
self.new_game.process()
compteur = 0
for i in range(self.size):
for j in range(self.size):
if self.tiles[i][j].value == self.solution[i][j]:
compteur += 1
self.timer_button.stop = not self.is_running() or compteur == self.size * self.size
if compteur == self.size * self.size:
self.popup("Vous avez gagné")
self.back_menu.process()
self.new_game.process()
def get_tile(self, x, y) -> Tile: # Renvoie un objet Tile en fonction de ses coordonnées
x -= self.offset[0]
y -= self.offset[1]
if 0 <= x < self.board_size and 0 <= y < self.board_size:
return self.tiles[y // self.cell_size][x // self.cell_size]
return None
def select_tile(self, tile: Tile): # Permet de sélectionner une case spécifique
pos = tile.get_pos() if tile is not None else (-1, -1)
for i in range(self.size):
for j in range(self.size):
self.tiles[i][j].selected = False
self.tiles[i][j].background_color = "white"
if tile is not None:
if (self.tiles[i][j].value != "0" or (i, j) == pos) and self.tiles[i][
j
].value == tile.value:
self.tiles[i][j].background_color = "green"
elif (
i == pos[0]
or j == pos[1]
or (i // self.bloc_size, j // self.bloc_size)
== (pos[0] // self.bloc_size, pos[1] // self.bloc_size)
):
self.tiles[i][j].background_color = "gray"
if tile is not None:
tile.selected = True
self.current_tile = tile
def move_tile(self, event): # Permet de sélectionner des cases avec les flèches directionnelles
if self.current_tile is None:
return
x, y = self.current_tile.get_pos()
if event.key == pygame.K_UP and x > 0:
x -= 1
elif event.key == pygame.K_DOWN and x < self.size - 1:
x += 1
elif event.key == pygame.K_LEFT and y > 0:
y -= 1
elif event.key == pygame.K_RIGHT and y < self.size - 1:
y += 1
self.select_tile(self.tiles[x][y])
def enter_char(self, char: str): # Permet de saisir un caractère dans la case sélectionnée
if self.is_running():
if char not in algorithme.get_allowed_characters(self.size):
return
for i in range(self.size):
for j in range(self.size):
if self.tiles[i][j].selected and self.init_board[i][j] == "0":
temp = [[t.clone() for t in self.tiles[i]] for i in range(len(self.tiles))]
self.tiles = temp
if self.noteMode:
if char != "0":
self.tiles[i][j].value = "0"
if char in self.tiles[i][j].notes:
self.tiles[i][j].notes.remove(char)
else:
self.tiles[i][j].notes.append(char)
elif not self.tiles[i][j].valid:
self.tiles[i][j].notes.clear()
self.tiles[i][j].value = char
if self.solution[i][j] == char:
self.tiles[i][j].valid = True
else:
self.tiles[i][j].valid = False
self.error_count += 1 if char != "0" else 0
self.select_tile(self.tiles[i][j])
else:
return
self.history.append(temp)
return
def save(self): # Permet de sauvegarder la partie en cours
with open(f"./grids/{self.name}.sudoku", "w") as f:
f.writelines(
[",".join([e for e in line]) + "\n" for line in self.init_board]
)
f.write(f"PROGRESS:{self.error_count}\n")
f.writelines(
[",".join([tile.value for tile in line]) + "\n" for line in self.history[-1]]
)
f.write(f"NOTES:{self.noteMode}\n")
f.writelines(
[
",".join(["|".join([note for note in tile.notes]) for tile in line])
+ "\n"
for line in self.tiles
]
)
f.write(f"TIMER:{self.timer_button.elapsed_time}\n")
def create_buttons(self, elapsed_time): # Permet de créer les boutons sur le plateau de jeu
def note_btn_f():
if self.play:
self.noteMode = not self.noteMode
self.note_button.image = pygame.image.load("./resources/note-button-" + ("on" if self.noteMode else "off") + ".png")
self.note_button: Button = Button(
1070,
90,
self.screen,
70,
70,
fontColor="#147DE6",
fontSize=20,
buttonText="Note",
onclickFunction=note_btn_f,
image="./resources/note-button-" + ("on" if self.noteMode else "off") + ".png",
fillColors={
"normal": "#ffffff",
"hover": "#ffffff",
"pressed": "#ffffff",
},
textOffset=(-10, 50)
)
def back_board_f():
if self.play and len(self.history) > 1:
self.history.pop(-1)
self.tiles = self.history[-1]
self.back_button: Button = Button(
1170,
90,
self.screen,
70,
70,
fontColor="#147DE6",
fontSize=20,
buttonText="Retour",
onclickFunction=back_board_f,
image="./resources/back.png",
fillColors={
"normal": "#ffffff",
"hover": "#ffffff",
"pressed": "#ffffff",
},
textOffset=(0, 51)
)
def back_menu_f():
self.save()
self.active = False
self.back_home: Button = Button(
1050,
30,
self.screen,
200,
30,
"#147DE6",
buttonText="Retour au menu principal",
fontSize=20,
onclickFunction=back_menu_f,
)
self.timer_button: Timer = Timer(
100,
50,
self.screen,
100,
50,
"black",
buttonText="",
fontSize=30,
start_time=pygame.time.get_ticks(),
elapsed_time=elapsed_time,
stop=not self.is_running()
)
def pause_f():
if self.play:
if self.pause:
self.pause_lock = True
self.pause = not self.pause
self.pause_button.image = pygame.image.load("./resources/" + ("play" if self.pause else "pause") + "-button.png")
self.big_play_button.visible = self.pause
self.select_tile(None)
self.pause_button: Button = Button(
130,
100,
self.screen,
32,
32,
"black",
buttonText=None,
image="./resources/pause-button.png",
onclickFunction=pause_f
)
self.big_play_button: Button = Button(
self.offset[0] + self.board_size // 2 - 64,
self.offset[1] + self.board_size // 2 - 64,
self.screen,
128,
128,
"black",
buttonText=None,
image="./resources/play.png",
fillColors={
"normal": None,
"hover": None,
"pressed": None
},
onclickFunction=pause_f,
visible=False
)
def popup(self, message): # Affiche des messages contextuels
self.pause = True
gray_surface = pygame.Surface(
(self.screen.get_width(), self.screen.get_height())
)
gray_surface.set_alpha(200)
gray_surface.fill((0, 0, 0))
self.screen.blit(gray_surface, (0, 0))
message_rect = pygame.Rect(
self.screen.get_width() // 2 - 200,
self.screen.get_height() // 2 - 100,
400,
200,
)
pygame.draw.rect(self.screen, (250, 250, 250), message_rect, border_radius=20)
font = pygame.font.SysFont("arial", 30)
font2 = pygame.font.SysFont("arial", 20)
text = font.render("Partie terminée", True, (0, 0, 0))
text2 = font2.render(message, True, (186, 186, 186))
text_rect = text.get_rect(center=message_rect.center)
text_rect = text_rect.move(0, -35)
self.screen.blit(text, text_rect)
text_rect = text.get_rect(center=message_rect.center)
text_rect = text_rect.move(-10, 5)
self.screen.blit(text2, text_rect)
def back_menu_f():
self.save()
self.active = False
self.back_menu: Button = (
self.back_menu
if self.back_menu is not None
else Button(
message_rect.left + 45,
message_rect.bottom - 80,
self.screen,
155,
40,
"white",
buttonText="Retour à l'accueil",
fontSize=20,
onclickFunction=back_menu_f,
fillColors={
"normal": "#0048f9",
"hover": "#666666",
"pressed": "#0093f9",
},
borderRadius=10,
)
)
def generate_menu_f():
self.save()
self.active = False
self.menus[0]._open(self.menus[2])
self.new_game: Button = (
self.new_game
if self.new_game is not None
else Button(
message_rect.right - 195,
message_rect.bottom - 80,
self.screen,
145,
40,
"black",
buttonText="Nouvelle partie",
fontSize=20,
onclickFunction=generate_menu_f,
fillColors={
"normal": "#ffffff",
"hover": "#dadada",
"pressed": "#ffffff",
},
borderRadius=10,
)
)
| Affell/depinfo_sudoku | board.py | board.py | py | 15,941 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pygame.Surface",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "numpy.ndarray",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "numpy.ndarray",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "numpy.ndarr... |
9210248768 | import discord
from redbot.core import commands, checks
from redbot.core.utils.menus import menu, DEFAULT_CONTROLS
from .core import Core
class Space(Core):
"""Show pics of space."""
@commands.group()
@checks.mod_or_permissions(manage_channels=True)
async def spaceset(self, ctx: commands.Context):
"""Group commands for Space cog settings."""
@spaceset.command()
async def autoapod(self, ctx: commands.Context, channel: discord.TextChannel = None):
"""
Choose if you want to automatically receive \"Astronomy Picture of the Day\" every day.
Set to actual channel by default. You can also use `[p]spaceset autoapod <channel_name>` if you want to receive APOD in others channels.
Use the same command to disable it.
"""
channel = ctx.channel if not channel else channel
auto_apod = await self.config.channel(channel).auto_apod()
await self.config.channel(channel).auto_apod.set(not auto_apod)
msg = (
"I will now automatically send Astronomy Picture of the Day every day in this channel."
if not auto_apod
else "No longer sending Astronomy Picture of the Day every day in this channel."
)
await channel.send(msg)
if not auto_apod:
data = await self.get_data("https://api.martinebot.com/images/apod")
apod_text = await self.apod_text(data, channel)
await self.maybe_send_embed(channel, apod_text)
await self.config.channel(channel).last_apod_sent.set(data["date"])
await ctx.tick()
@commands.command()
@commands.bot_has_permissions(embed_links=True)
async def apod(self, ctx: commands.Context):
"""Astronomy Picture of the Day."""
async with ctx.typing():
apod_text = await self.apod_text(
await self.get_data("https://api.martinebot.com/images/apod"),
ctx,
)
return await self.maybe_send_embed(ctx, apod_text)
@commands.command()
@commands.bot_has_permissions(embed_links=True)
async def spacepic(self, ctx: commands.Context, *, query: str):
"""
Lookup pictures from space!
Note - Some pictures are from presentations and other educational talks
"""
async with ctx.typing():
query = self.escape_query("".join(query))
space_data = await self.get_space_pic_data(ctx, query)
if space_data is None:
await ctx.send(f"Looks like you got lost in space looking for `{query}`")
return
if space_data is False:
return
if query.lower() == "star wars":
await ctx.send(self.star_wars_gifs())
return
pages = []
total_pages = len(space_data) # Get total page count
for c, i in enumerate(space_data, 1): # Done this so I could get page count `c`
space_data_clean = i.replace(" ", "%20")
embed = discord.Embed(
title="Results from space",
description=f"Query was `{query}`",
color=await ctx.embed_color(),
)
embed.set_image(url=space_data_clean)
embed.set_footer(text=f"Page {c}/{total_pages}")
# Set a footer to let the user
# know what page they are in
pages.append(embed)
# Added this embed to embed list that the menu will use
return await menu(ctx, pages, DEFAULT_CONTROLS)
@commands.command()
@commands.bot_has_permissions(embed_links=True)
async def isslocation(self, ctx: commands.Context):
"""Show the Current location of the ISS."""
async with ctx.typing():
data = await self.get_data("http://api.open-notify.org/iss-now.json")
if not data:
await ctx.send("I can't get the data from the API. Try again later.")
return
embed = discord.Embed(
title="Current location of the ISS",
description="Latitude and longitude of the ISS",
color=await ctx.embed_color(),
)
embed.add_field(name="Latitude", value=data["iss_position"]["latitude"], inline=True)
embed.add_field(name="Longitude", value=data["iss_position"]["longitude"], inline=True)
embed.set_thumbnail(url="https://photos.kstj.us/GrumpyMeanThrasher.jpg")
return await ctx.send(embed=embed)
@commands.command()
@commands.bot_has_permissions(embed_links=True)
async def astronauts(self, ctx: commands.Context):
"""Show who is currently in space."""
async with ctx.typing():
data = await self.get_data("http://api.open-notify.org/astros.json")
if not data:
await ctx.send("I can't get the data from the API. Try again later.")
return
astrosnauts = []
for astros in data["people"]:
astrosnauts.append(astros["name"])
embed = discord.Embed(title="Who's in space?", color=await ctx.embed_color())
embed.add_field(name="Current Astronauts in space", value="\n".join(astrosnauts))
return await ctx.send(embed=embed)
| kennnyshiwa/kennnyshiwa-cogs | space/space.py | space.py | py | 5,376 | python | en | code | 19 | github-code | 36 | [
{
"api_name": "core.Core",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "redbot.core.commands.Context",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "redbot.core.commands",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "redbo... |
40657050860 | import tensorflow as tf
import tensorlayer as tl
import numpy as np
import scipy
import time
import math
import argparse
import random
import sys
import os
from termcolor import colored, cprint
from model_dual import model_dual as model_net
import dataLoad_dual as dataLoad
from time import gmtime, strftime
import progressbar
from tensorflow.python import debug as tf_debug
parser = argparse.ArgumentParser(description="Run the NN for particle simulation")
parser.add_argument('datapath')
parser.add_argument('outpath')
parser.add_argument('-gpu', '--cuda-gpus')
parser.add_argument('-bs', '--batch-size', type = int, default = 64)
parser.add_argument('-ds', '--delta-step', type = int, default = 1, help = "How many steps ds will the network predict, step i -> step (i+ds), 0 for identity test")
parser.add_argument('-ls', '--latent-step', type = int, default = 1, help = "How many ds will the network simulate in latent space before predicting final result (decoding)")
parser.add_argument('-ss', '--start-step', type = int, default = 0, help = "Which step to start")
parser.add_argument('-conly', '--card-only', dest = 'card_only', action = 'store_const', default = False, const = True, help = "Only predicting card (use density map visualizer instead)")
parser.add_argument('-simit', '--sim-iter', type = int, default = 130, help = "How many steps (= latent steps) to simulate ( total steps = simit * ls * ds )")
parser.add_argument('-vm', '--velocity-multiplier', type = float, default = 1.0, help = "Multiplies the velocity by this factor")
parser.add_argument('-zdim', '--latent-dim', type = int, default = 256, help = "Length of the latent vector")
parser.add_argument('-res', '--res-size', type = int, default = 4, help = "Length of res layers (res block stacks)")
parser.add_argument('-maxpool', '--maxpool', dest = 'combine_method', action='store_const', default = tf.reduce_mean, const = tf.reduce_max, help = "use Max pooling instead of sum up for permutation invariance")
parser.add_argument('-size', '--size', type = int, default = 2560, help = "Total amount of particles we are going to deal with")
parser.add_argument('-vSize', '--voxel-size', type = int, default = 32, help = "Max amount of particles in a voxel")
parser.add_argument('-load', '--load', type = str, default = "None", help = "File to load to continue training")
parser.add_argument('-debug', '--debug', dest = "enable_debug", action = 'store_const', default = False, const = True, help = "Enable debugging")
args = parser.parse_args()
dataLoad.particleCount = args.size
dataLoad.maxParticlePerGrid = args.voxel_size
os.environ["CUDA_VISIBLE_DEVICES"] = args.cuda_gpus
# Create the model
optimizer = tf.train.AdamOptimizer()
# model = model_net(16, args.latent_dim, args.batch_size, optimizer)
model = model_net(args.size, args.voxel_size, args.latent_dim, args.batch_size, optimizer)
model.resSize = args.res_size
model.combine_method = args.combine_method
# Headers
headers = dataLoad.read_file_header(args.datapath)
model.total_world_size = 96.0
model.initial_grid_size = model.total_world_size / 16
model.latent_simulate_steps = args.latent_step
# model.initial_grid_size = model.total_world_size / 4
# model.build_model()
# Build the model
if args.card_only:
card_prediction, _loss = model.card_only_prediction(args.sim_iter)
else:
model.build_prediction()
# Create session
sess = tf.Session()
if args.enable_debug:
sess = tf_debug.LocalCLIDebugWrapperSession(sess)
sess.add_tensor_filter("has_inf_or_nan", tf_debug.has_inf_or_nan)
sess.run(tf.local_variables_initializer())
tl.layers.initialize_global_variables(sess)
# Save & Load
saver = tf.train.Saver()
if args.load != "None":
saver.restore(sess, args.load)
print("Model restored.")
# variables_names = [v.name for v in tl.layers.get_variables_with_name('', True)]
# values = sess.run(variables_names)
# cprint('Trainable vars', 'red')
# for k, v in zip(variables_names, values):
# cprint("Variable: " + str(k), 'yellow')
# cprint("Shape: " + str(v.shape), 'green')
# #print(v)
batch_idx = 0
current_step = 0
# Prediction
groundTruth_content = dataLoad.read_file(args.datapath, args.delta_step * args.velocity_multiplier)
batch_X = np.zeros((1, args.size, 7))
if args.card_only:
# 1st batch
batch_X[0] = groundTruth_content['data'][args.start_step, :, 0:7]
# batch_X[1] = groundTruth_content['data'][args.start_step, :, 0:7]
results, loss = sess.run([card_prediction, _loss], feed_dict = {model.ph_X: batch_X})
# results, loss, vloss, tloss = sess.run([card_prediction, _loss, _valloss, _trainloss], feed_dict = {model.ph_X: batch_X, model.ph_Y: batch_X})
print("Loss = %f" % (loss))
np.save(args.outpath, results)
print('Please run \'python convertNpyToRBin.py %s %s\'' % (args.outpath, args.outpath.split('.')[0] + '.rbin'))
else:
# Create result file
content = {}
content['gridCountX'] = 16
content['gridCountY'] = 16
content['gridCountZ'] = 16
content['gridCount'] = 4096
content['gridSize'] = 6
content['worldLength'] = 96
content['gravity'] = False
content['boundary'] = False
content['stepCount'] = args.sim_iter + 1
content['particleCount'] = np.zeros((content['stepCount'], content['gridCount']), dtype = np.int32)
content['data'] = np.zeros((content['stepCount'], content['gridCount'], args.voxel_size, 6))
# 1st batch
batch_X[0] = groundTruth_content['data'][args.start_step, :, 0:7]
# Write 1st batch to result
for p in range(batch_X.shape[1]):
gridX = batch_X[0, p, 0] // content['gridSize'] + content['gridCountX'] // 2
gridY = batch_X[0, p, 1] // content['gridSize'] + content['gridCountY'] // 2
gridZ = batch_X[0, p, 2] // content['gridSize'] + content['gridCountZ'] // 2
gridHash = int(gridX * content['gridCountY'] * content['gridCountZ'] + gridY * content['gridCountZ'] + gridZ)
# Translate particles from world space to voxel space
content['data'][current_step, gridHash, content['particleCount'][current_step, gridHash]] = np.asarray([\
batch_X[0, p, 0] - (gridX - content['gridCountX'] // 2) * content['gridSize'] - content['gridSize'] / 2,\
batch_X[0, p, 1] - (gridY - content['gridCountY'] // 2) * content['gridSize'] - content['gridSize'] / 2,\
batch_X[0, p, 2] - (gridZ - content['gridCountZ'] // 2) * content['gridSize'] - content['gridSize'] / 2,\
batch_X[0, p, 3],\
batch_X[0, p, 4],\
batch_X[0, p, 5]])
content['particleCount'][current_step, gridHash] += 1
current_step += 1
# Simulation loop
while current_step <= args.sim_iter:
print("Step %d" % current_step)
# Feed the data and run the net
card, data = sess.run([model.predict_cardinality, model.predict_outputs], feed_dict = {model.ph_X: batch_X})
# Write the data to result
# The velocity data here was multiplied by ds*vm, and it will be divided back when writing data back to files.
content['data'][current_step, :, :, :] = data[:, :, 0:6]
content['particleCount'][current_step, :] = card[:]
# Prepare next batch
batch_X = np.zeros((1, args.size, 7))
particle_idx = 0
for g in range(content['gridCount']):
gridPosX = g // (content['gridCountZ'] * content['gridCountX']) * content['gridSize'] - (content['gridCountX'] // 2 * content['gridSize'])
gridPosY = g % (content['gridCountZ'] * content['gridCountX']) // content['gridCountZ'] * content['gridSize'] - (content['gridCountY'] // 2 * content['gridSize'])
gridPosZ = g % content['gridCountZ'] * content['gridSize'] - (content['gridCountZ'] // 2 * content['gridSize'])
for p in range(content['particleCount'][current_step, g]):
# Translate particles from voxel space to world space
batch_X[0, particle_idx, 0] = content['data'][current_step, g, p, 0] + gridPosX + (content['gridSize'] / 2) # x
batch_X[0, particle_idx, 1] = content['data'][current_step, g, p, 1] + gridPosY + (content['gridSize'] / 2) # y
batch_X[0, particle_idx, 2] = content['data'][current_step, g, p, 2] + gridPosZ + (content['gridSize'] / 2) # z
batch_X[0, particle_idx, 3:6] = content['data'][current_step, g, p, 3:6]
batch_X[0, particle_idx, 6] = 1
particle_idx += 1
current_step += 1
# Save the results
dataLoad.save_file(content, args.outpath, args.delta_step * args.velocity_multiplier)
| betairylia/NNParticles | predict_dual.py | predict_dual.py | py | 8,640 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "tensorflow.reduce_mean",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.reduce_max",
"line_number": 37,
"usage_type": "attribute"
},
{
"ap... |
29467412193 | import urllib.parse
from flask import request
from sqlalchemy import select
from FashionCampus.database import session
from FashionCampus.common import get_image_url
from FashionCampus.model import Product, ProductImage
from FashionCampus.api.blueprints import home
@home.route('/home/banner', methods = ['GET'])
def home_get_banner():
db = session()
products = db.execute(select(
Product.id,
Product.name,
ProductImage.path.label('image_path')
).join(Product.images.and_(ProductImage.order == 1), isouter = True).where(Product.is_deleted == False).order_by(Product.created_at.desc()).limit(5)).fetchall()
return {
'data': [
{
'id': str(p.id),
'image': get_image_url(p.image_path),
'title': p.name
} for p in products
]
}, 200
| michaelrk02/FashionCampus | api/home/get_banner.py | get_banner.py | py | 866 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "FashionCampus.database.session",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.select",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "FashionCampus.model.Product.id",
"line_number": 18,
"usage_type": "attribute"
},
{
... |
25316015896 |
# coding: utf-8
# In[1]:
import numpy as np
import tensorflow as tf
dataPhase = np.load("trainLenetdataPhase.npy")
dataMag = np.load("trainLenetdataMag.npy")
dataY = np.load("trainLenetdataY.npy")
# In[2]:
from keras.models import Sequential
from keras.layers import Convolution2D
model = Sequential()
model.add(Convolution2D(filters=6,kernel_size=(5,5),strides=1,activation='relu',input_shape=(32,32,2)))
# In[3]:
from keras.layers import MaxPooling2D
model.add(MaxPooling2D(pool_size=(2,2),strides=(2,2)))
model.add(Convolution2D(filters=16,kernel_size=(5,5),strides=1,activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2),strides=(2,2)))
# In[4]:
from keras.layers import Flatten
model.add(Flatten())
# In[5]:
from keras.layers import Dense
model.add(Dense(120,input_shape=(400,),activation='relu'))
model.add(Dense(84,activation='relu'))
model.add(Dense(20,activation='softmax'))
# In[6]:
from keras.optimizers import Adam
optimizer = Adam(lr=0.0001)
model.compile(loss='categorical_crossentropy',optimizer=optimizer,metrics=['accuracy'])
# In[7]:
print(dataMag.shape,dataPhase.shape)
dataIn = np.stack((dataMag,dataPhase),axis=3)
dataIn = np.reshape(dataIn,(dataIn.shape[0],dataIn.shape[1],dataIn.shape[2],dataIn.shape[3]))
print(dataIn.shape)
# In[8]:
model.summary()
# In[9]:
model.fit(x=dataIn,y=dataY,epochs=20,batch_size=32)
# In[10]:
model.save('Lenet_model_with_MagAndPhase.h5')
| MarvinChung/sound-classification-CNN | Lenet_with_MagAndPhase.py | Lenet_with_MagAndPhase.py | py | 1,445 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.load",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "keras.models.Sequential",
"line_... |
74059845544 | # Largely stolen from https://github.com/jmoiron/humanize (MIT)
from datetime import datetime, timedelta
def _ngettext(message, plural, num):
return message if num == 1 else plural
def _now():
return datetime.now()
def abs_timedelta(delta):
"""Returns an "absolute" value for a timedelta, always representing a
time distance."""
if delta.days < 0:
now = _now()
return now - (now + delta)
return delta
def date_and_delta(value):
"""Turn a value into a date and a timedelta which represents how long ago
it was. If that's not possible, return (None, value)."""
now = _now()
if isinstance(value, datetime):
date = value
delta = now - value
elif isinstance(value, timedelta):
date = now - value
delta = value
else:
try:
value = int(value)
delta = timedelta(seconds=value)
date = now - delta
except (ValueError, TypeError):
return (None, value)
return date, abs_timedelta(delta)
def naturaldelta(value, months=True):
"""Given a timedelta or a number of seconds, return a natural
representation of the amount of time elapsed. This is similar to
``naturaltime``, but does not add tense to the result. If ``months``
is True, then a number of months (based on 30.5 days) will be used
for fuzziness between years."""
date, delta = date_and_delta(value)
if date is None:
return value
use_months = months
seconds = abs(delta.seconds)
days = abs(delta.days)
years = days // 365
days = days % 365
months = int(days // 30.5)
if not years and days < 1:
if seconds < 60:
return "a moment"
elif 60 <= seconds < 120:
return "a minute"
elif 120 <= seconds < 3600:
minutes = seconds // 60
return _ngettext("%d minute", "%d minutes", minutes) % minutes
elif 3600 <= seconds < 3600 * 2:
return "an hour"
elif 3600 < seconds:
hours = seconds // 3600
return _ngettext("%d hour", "%d hours", hours) % hours
elif years == 0:
if days == 1:
return "a day"
if not use_months:
return _ngettext("%d day", "%d days", days) % days
else:
if not months:
return _ngettext("%d day", "%d days", days) % days
elif months == 1:
return "a month"
else:
return _ngettext("%d month", "%d months", months) % months
elif years == 1:
if not months and not days:
return "a year"
elif not months:
return _ngettext("1 year, %d day", "1 year, %d days", days) % days
elif use_months:
if months == 1:
return "1 year, 1 month"
else:
return (
_ngettext("1 year, %d month", "1 year, %d months", months)
% months
)
else:
return _ngettext("1 year, %d day", "1 year, %d days", days) % days
else:
return _ngettext("%d year", "%d years", years) % years
def naturaltime(value, future=False, months=True):
"""Given a datetime or a number of seconds, return a natural representation
of that time in a resolution that makes sense. This is more or less
compatible with Django's ``naturaltime`` filter. ``future`` is ignored for
datetimes, where the tense is always figured out based on the current time.
If an integer is passed, the return value will be past tense by default,
unless ``future`` is set to True."""
now = _now()
date, delta = date_and_delta(value)
if date is None:
return value
# determine tense by value only if datetime/timedelta were passed
if isinstance(value, (datetime, timedelta)):
future = date > now
ago = "%s from now" if future else "%s ago"
delta = naturaldelta(delta, months)
if delta == "a moment":
return "now"
return ago % delta
suffixes = ("kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
def naturalsize(value, format="%.1f"):
"""Format a number of byteslike a human readable filesize (eg. 10 kB) """
base = 1024
bytes = float(value)
if bytes == 1:
return "1 Byte"
elif bytes < base:
return "%d Bytes" % bytes
for i, s in enumerate(suffixes):
unit = base ** (i + 2)
if bytes < unit:
return (format + " %s") % ((base * bytes / unit), s)
return (format + " %s") % ((base * bytes / unit), s)
| facultyai/faculty-sync | faculty_sync/screens/humanize.py | humanize.py | py | 4,625 | python | en | code | 10 | github-code | 36 | [
{
"api_name": "datetime.datetime.now",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "datetime.datetime",
"line_number": 27,
"usage_type": "argument"
},
{
"api_name": "datetim... |
17177496153 | from indexes.single_indexes.hnsw_hnswlib import HnswHnswlib
from indexes.testers.TimeStats import BuildTimeStats, QueryTimeStats
from indexes.utils.dataset import BasicDataset
from indexes.utils.distance_function import l2distance
import Index
from sklearn.cluster import KMeans
from overrides import overrides
from typing import List, Tuple
from time import time
class KMeansIndex(Index.Index):
def __init__(self, name, max_elements, dimensions, metric='l2', distance_function=None, num_threads=-1,
num_partitions=4, num_partitions_to_search=2, ef_construction=1000, ef=1000, m=64):
super().__init__(name, max_elements, dimensions, metric, distance_function, num_threads)
self.num_partitions = num_partitions
self.num_partitions_to_search = num_partitions_to_search
self.ef_construction = ef_construction
self.ef = ef
self.m = m
self.single_partitions_size = 0
self.mapping = None
self.centers = None
def partition(self, dataset):
data = dataset.get_data()
k_means = KMeans(n_clusters=self.num_partitions, random_state=0)
k_means.fit(data)
labels = k_means.labels_
self.centers = k_means.cluster_centers_
self.mapping = dict([(i, []) for i in range(self.num_partitions)])
p_indexes = [[] for _ in range(self.num_partitions)]
for i in range(labels.shape[0]):
p_indexes[labels[i]].append(i)
self.mapping[labels[i]].append(i)
partitions = [data[p_indexes[idx]] for idx in range(self.num_partitions)]
return partitions
@overrides
def build(self, dataset) -> BuildTimeStats:
partition_start_time = time()
partitions = self.partition(dataset)
elapsed_partition_time = time() - partition_start_time
build_start_time = time()
datasets = [BasicDataset(f'partition{idx}', '') for idx in range(self.num_partitions)]
for idx, dataset in enumerate(datasets):
dataset.load_dataset_from_numpy(partitions[idx])
self.index = [HnswHnswlib('hnsw_hnswlib', datasets[idx].get_size(), datasets[idx].get_dimensions(), self.metric,
self.distance_function, self.num_threads, self.ef_construction, self.ef, self.m) for
idx in range(self.num_partitions)]
for idx, index in enumerate(self.index):
index.build(datasets[idx])
elapsed_build_time = time() - build_start_time
return BuildTimeStats(elapsed_partition_time, elapsed_build_time)
@overrides
def search(self, query, k=5) -> Tuple[List[List[Tuple[float, int]]], QueryTimeStats, float]:
avg_ind = self.num_partitions_to_search
query_start_time = time()
index_list = [
list(sorted([(l2distance(q, self.centers[i]), i) for i in range(len(self.index))], key=lambda x: x[0]))[
:self.num_partitions_to_search] for q in query]
results = [[self.index[idx].search([query[q_id]], k)[0] for _, idx in id_list] for q_id, id_list in
enumerate(index_list)]
elapsed_query_time = time() - query_start_time
merge_start_time = time()
# Fix indexes
results = [[[(t[0], self.mapping[index_list[q_id][ind_id][1]][t[1]]) for t in index[0]] for ind_id, index in enumerate(res)]
for q_id, res in enumerate(results)]
result = [list(sorted([t for index in res for t in index], key=lambda x: x[0]))[:k] for res in results]
elapsed_merge_time = time() - merge_start_time
return result, QueryTimeStats(elapsed_query_time, elapsed_merge_time, len(query)), avg_ind
| war-and-peace/dss | indexes/distributed_indexes/KMeansIndex.py | KMeansIndex.py | py | 3,703 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "Index.Index",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "sklearn.cluster.KMeans",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "time.time",
"li... |
11625971052 | from typing import List
class Contact():
all_contacts: List["Contact"] = []
def __init__(self,nombre: str,email: str):
self.nombre = nombre
self.email = email
Contact.all_contacts.append(self)
def __repr__(self)->str:
return (f"ClassName: {self.__class__.__name__} Nombre:{self.nombre} Email:{self.email}")
class Supplier(Contact):
def ventas(self, venta:"Order")->None:
print(f'La venta fue pendida al contacto supplier {self.nombre}')
c_1 = Contact("Dusty", "dusty@example.com")
c_2 = Contact("Steve", "steve@itmaybeahack.com")
c = Contact("Some Body", "somebody@example.net")
s = Supplier("Sup Plier", "supplier@example.net")
print(Contact.all_contacts)
print(s.ventas('Holaperro')) | ujpinom/python-advanced | oop/Intro/inheritance.py | inheritance.py | py | 764 | python | es | code | 0 | github-code | 36 | [
{
"api_name": "typing.List",
"line_number": 6,
"usage_type": "name"
}
] |
18108370652 | #!/usr/bin/env python3
from pynput import keyboard as kb
import rospy
from std_msgs.msg import String
rospy.init_node("teclas")
pub = rospy.Publisher("/voice_ui", String, queue_size=10)
def callback(tecla):
s = String()
print("Se ha pulsado la tecla ")
if(str(tecla) == "'r'"):
print("R")
s.data = "sal"
elif(str(tecla) == "'g'"):
print("G")
s.data = "pimienta"
elif(str(tecla) == "'b'"):
print("B")
s.data = "azúcar"
pub.publish(s)
# Main
if __name__ == '__main__':
print("------- Start --------")
kb.Listener(callback).run()
| DanielFrauAlfaro/Proyecto_Servicios | controllers/scripts/teclas.py | teclas.py | py | 655 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "rospy.init_node",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "rospy.Publisher",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "std_msgs.msg.String",
"line_number": 8,
"usage_type": "argument"
},
{
"api_name": "std_msgs.msg.Str... |
32555585927 | from cvxpy import *
from cvxpy.tests.base_test import BaseTest
class TestSolvers(BaseTest):
""" Unit tests for solver specific behavior. """
def setUp(self):
self.a = Variable(name='a')
self.b = Variable(name='b')
self.c = Variable(name='c')
self.x = Variable(2, name='x')
self.y = Variable(3, name='y')
self.z = Variable(2, name='z')
self.A = Variable(2,2,name='A')
self.B = Variable(2,2,name='B')
self.C = Variable(3,2,name='C')
def test_solver_errors(self):
"""Tests that solver errors throw an exception.
"""
# For some reason CVXOPT can't handle this problem.
expr = 500*self.a + square(self.a)
prob = Problem(Minimize(expr))
with self.assertRaises(Exception) as cm:
prob.solve(solver=CVXOPT)
self.assertEqual(str(cm.exception),
"Solver 'CVXOPT' failed. Try another solver.")
def test_ecos_options(self):
"""Test that all the ECOS solver options work.
"""
# Test ecos
# feastol, abstol, reltol, feastol_inacc, abstol_inacc, and reltol_inacc for tolerance values
# max_iters for the maximum number of iterations,
EPS = 1e-4
prob = Problem(Minimize(norm(self.x, 1)), [self.x == 0])
for i in range(2):
prob.solve(solver=ECOS, feastol=EPS, abstol=EPS, reltol=EPS,
feastol_inacc=EPS, abstol_inacc=EPS, reltol_inacc=EPS,
max_iters=20, verbose=True, warm_start=True)
self.assertItemsAlmostEqual(self.x.value, [0, 0])
def test_ecos_bb_options(self):
"""Test that all the ECOS BB solver options work.
"""
# 'mi_maxiter'
# maximum number of branch and bound iterations (default: 1000)
# 'mi_abs_eps'
# absolute tolerance between upper and lower bounds (default: 1e-6)
# 'mi_rel_eps'
EPS = 1e-4
prob = Problem(Minimize(norm(self.x, 1)), [self.x == Bool(2)])
for i in range(2):
prob.solve(solver=ECOS_BB, mi_max_iters=100, mi_abs_eps=1e-6,
mi_rel_eps=1e-5, verbose=True, warm_start=True)
self.assertItemsAlmostEqual(self.x.value, [0, 0])
def test_scs_options(self):
"""Test that all the SCS solver options work.
"""
# Test SCS
# MAX_ITERS, EPS, ALPHA, UNDET_TOL, VERBOSE, and NORMALIZE.
# If opts is missing, then the algorithm uses default settings.
# USE_INDIRECT = True
EPS = 1e-4
prob = Problem(Minimize(norm(self.x, 1)), [self.x == 0])
for i in range(2):
prob.solve(solver=SCS, max_iters=50, eps=EPS, alpha=EPS,
verbose=True, normalize=True, use_indirect=False)
self.assertItemsAlmostEqual(self.x.value, [0, 0])
def test_cvxopt_options(self):
"""Test that all the CVXOPT solver options work.
"""
# TODO race condition when changing these values.
# 'maxiters'
# maximum number of iterations (default: 100).
# 'abstol'
# absolute accuracy (default: 1e-7).
# 'reltol'
# relative accuracy (default: 1e-6).
# 'feastol'
# tolerance for feasibility conditions (default: 1e-7).
# 'refinement'
# number of iterative refinement steps when solving KKT equations (default: 0 if the problem has no second-order cone or matrix inequality constraints; 1 otherwise).
EPS = 1e-7
prob = Problem(Minimize(norm(self.x, 1)), [self.x == 0])
for i in range(2):
prob.solve(solver=CVXOPT, feastol=EPS, abstol=EPS, reltol=EPS,
max_iters=20, verbose=True, kktsolver="chol",
refinement=2, warm_start=True)
self.assertItemsAlmostEqual(self.x.value, [0, 0])
def test_cvxopt_glpk(self):
"""Test a basic LP with GLPK.
"""
# Either the problem is solved or GLPK is not installed.
if GLPK in installed_solvers():
prob = Problem(Minimize(norm(self.x, 1)), [self.x == 0])
prob.solve(solver = GLPK)
self.assertItemsAlmostEqual(self.x.value, [0, 0])
# Example from http://cvxopt.org/userguide/coneprog.html?highlight=solvers.lp#cvxopt.solvers.lp
objective = Minimize(-4 * self.x[0] - 5 * self.x[1])
constraints = [ 2 * self.x[0] + self.x[1] <= 3,
self.x[0] + 2 * self.x[1] <= 3,
self.x[0] >= 0,
self.x[1] >= 0]
prob = Problem(objective, constraints)
prob.solve(solver = GLPK)
self.assertItemsAlmostEqual(self.x.value, [1, 1])
else:
with self.assertRaises(Exception) as cm:
prob = Problem(Minimize(norm(self.x, 1)), [self.x == 0])
prob.solve(solver = GLPK)
self.assertEqual(str(cm.exception), "The solver %s is not installed." % GLPK)
def test_installed_solvers(self):
"""Test the list of installed solvers.
"""
from cvxpy.problems.solvers.utilities import SOLVERS
prob = Problem(Minimize(norm(self.x, 1)), [self.x == 0])
for solver in SOLVERS.keys():
if solver in installed_solvers():
prob.solve(solver=solver)
self.assertItemsAlmostEqual(self.x.value, [0, 0])
else:
with self.assertRaises(Exception) as cm:
prob.solve(solver = solver)
self.assertEqual(str(cm.exception), "The solver %s is not installed." % solver)
| riadnassiffe/Simulator | src/tools/ecos/cvxpy/cvxpy/tests/test_solvers.py | test_solvers.py | py | 5,663 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "cvxpy.tests.base_test.BaseTest",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "cvxpy.problems.solvers.utilities.SOLVERS.keys",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "cvxpy.problems.solvers.utilities.SOLVERS",
"line_number": 125,
... |
8617197984 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Fetching user description and subscriptions given a channel id.
Usage: python 11_scrape_youtube_subscriptions.py
Input data files: data/mbfc/to_crawl_users.csv
Output data files: data/mbfc/active_user_subscription.json.bz2
"""
import up # go to root folder
import os, json, bz2, time
from utils.helper import Timer
from utils.crawlers import get_subscriptions_from_channel
def main():
timer = Timer()
timer.start()
input_filepath = 'data/mbfc/to_crawl_users.csv'
output_filepath = 'data/mbfc/active_user_subscription.json.bz2'
visited_channel_set = set()
if os.path.exists(output_filepath):
with bz2.BZ2File(output_filepath, 'r') as fin:
for line in fin:
line = line.decode('utf-8')
channel_id = json.loads(line.rstrip())['channel_id']
visited_channel_set.add(channel_id)
print('visited {0} channels in the past, continue...'.format(len(visited_channel_set)))
num_user = len(visited_channel_set)
with bz2.open(output_filepath, 'at') as fout:
with open(input_filepath, 'r') as fin:
for line in fin:
user_id = line.rstrip().split(',')[0]
if user_id not in visited_channel_set:
num_request = 0
found = False
print('get description and subscriptions for user {0}'.format(user_id))
while num_request < 5:
try:
profile_json = get_subscriptions_from_channel(user_id, target='subscription')
found = True
except:
num_request += 1
if found:
fout.write('{0}\n'.format(json.dumps(profile_json)))
num_user += 1
print('{0} subscriptions are obtained for user {1}: {2}\n'.format(len(profile_json['subscriptions']), num_user, user_id))
time.sleep(1)
break
timer.stop()
if __name__ == '__main__':
main()
| avalanchesiqi/youtube-crosstalk | crawler/10_scrape_youtube_subscriptions.py | 10_scrape_youtube_subscriptions.py | py | 2,192 | python | en | code | 11 | github-code | 36 | [
{
"api_name": "utils.helper.Timer",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "bz2.BZ2File",
"lin... |
1534341836 | from __future__ import annotations
from typing import Optional
from urllib.parse import urlencode
from jupyterhub.tests.utils import async_requests, public_host
from jupyterhub.utils import url_path_join
from traitlets import Unicode, default
from jupyterhub_moss import MOSlurmSpawner
def request(
app,
method: str,
path: str,
data: Optional[dict] = None,
cookies: Optional[dict] = None,
**kwargs,
):
"""Send a GET or POST request on the hub
Similar to jupyterhub.tests.utils.get_page
"""
if data is None:
data = {}
base_url = url_path_join(public_host(app), app.hub.base_url)
url = url_path_join(base_url, path)
if method == "POST":
if cookies is not None and "_xsrf" in cookies:
data["_xsrf"] = cookies["_xsrf"]
return async_requests.post(url, data=data, cookies=cookies, **kwargs)
assert method == "GET"
if data: # Convert data to query string
url += f"?{urlencode(data)}"
return async_requests.get(url, cookies=cookies, **kwargs)
class MOSlurmSpawnerMock(MOSlurmSpawner):
"""MOSlurmSpawner with some overrides to mock some features.
Adapted from jupyterhub.tests.mocking.MockSpawner and
batchspawner.tests.test_spawner.BatchDummy
"""
exec_prefix = Unicode("")
batch_submit_cmd = Unicode("cat > /dev/null; sleep 1")
batch_query_cmd = Unicode("echo PENDING")
batch_cancel_cmd = Unicode("echo STOP")
req_homedir = Unicode(help="The home directory for the user")
@default("req_homedir")
def _default_req_homedir(self):
return f"/tmp/jupyterhub_moss_tests/{self.user.name}"
def user_env(self, env):
env["USER"] = self.user.name
env["HOME"] = self.req_homedir
env["SHELL"] = "/bin/bash"
return env
| silx-kit/jupyterhub_moss | test/utils.py | utils.py | py | 1,809 | python | en | code | 14 | github-code | 36 | [
{
"api_name": "typing.Optional",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "jupyterhub.utils.url_path_join",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "jupyte... |
29117927396 | from flask import Flask,request
from sklearn.pipeline import Pipeline
import numpy as np
import joblib
import json
app = Flask(__name__)
# inisialize predected volume
predected_data = 0.0
# load pipeline
pipeline = joblib.load('transform_predict.joblib')
@app.route("/", methods=["POST", "GET"])
def home():
global predected_data
global pipeline
if request.method == "POST":
# get data
test_data = request.get_json()
# data preparation
test_data = test_data["volume"][-29:]
test_data = np.array(test_data).reshape(-1, 1)
# using pipeline
transformed_data = pipeline['scaler'].transform(test_data).T
transformed_data = np.reshape(transformed_data, (transformed_data.shape[0], 1, transformed_data.shape[1]))
predected_data = pipeline['model'].predict(transformed_data)
# invert predictions (output)
predected_data = pipeline['scaler'].inverse_transform(predected_data)[0][0]
pred_obj = {"prediction": float(predected_data)}
json_dump = json.dumps(pred_obj)
return json_dump
if __name__ == "__main__":
app.run(host="0.0.0.0", port="8080", threaded=False)
| MEZZINE-1998/ML-app-for-traffic-prediction-with-Azure-DevOps | app.py | app.py | py | 1,203 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "joblib.load",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
... |
73313383785 | # %%
import numpy as np
import pandas as pd
import torch
import os
from matplotlib import pyplot as plt
colors2 = ['#FD6D5A', '#FEB40B', '#6DC354', '#994487', '#518CD8', '#443295']
line_styles = ['-', '--', '-', '--', '-', '--']
color = ['red', 'blue', '#FEB40B']
n = 5
length = 50
filenames = [
'results/fed_avg_topk_residual_Cifar10/s=0.01 test.csv',
'results/fed_avg_topk_residual_Cifar10/s=0.05 test.csv',
'results/fed_avg_topk_residual_Cifar10/s=0.1 test.csv',
'results/fed_avg_topk_residual_Cifar10/s=0.5 test.csv',
'results/fed_avg_topk_residual_Cifar10/s=1.0 test.csv',
]
pic_title = "LeNet-5 in CIFAR-10 TopK"
line_names = [
's=0.01',
's=0.05',
's=0.1',
's=0.5',
's=1.0',
]
df = pd.read_csv(filenames[0]).iloc[:length, 1]
for i in range(n-1):
df = pd.concat([df, pd.read_csv(filenames[1+i]).iloc[:length, 1]], axis=1, join='outer')
df.columns = line_names
# %% 平滑最大值
for i in range(n):
test_max = 0
for j in range(length):
if df.iloc[:, i][j] > test_max:
test_max = df.iloc[:, i][j]
else:
df.iloc[:, i][j] = test_max
# %%
df.plot(linewidth=0.5, color=colors2, style=line_styles)
plt.title(pic_title)
plt.xlabel("epoch")
plt.ylabel("test acc")
plt.show()
| zhengLabs/FedLSC | painting/compare_test2.py | compare_test2.py | py | 1,273 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.title... |
25625685943 | from collections import defaultdict
class Classy:
def __init__(self):
self.head = defaultdict()
def createTrie(self, phases):
if not phases: return 0
for word in phases:
temp = self.head
for each in word.split():
if each not in temp:
temp[each] = {}
temp = temp[each]
temp['phase'] = True
def searchWord(self,phrase, stream):
if not phrase: return 0
self.createTrie(phrase)
ls_word = stream.split()
temp_ds = self.head
i = 0
rs = []
while i < len(ls_word):
if ls_word[i] in temp_ds:
rs.append(ls_word[i])
temp_ds = temp_ds[ls_word[i]]
if 'phase' in temp_ds:
temp_head = self.head
rs_temp = []
for temp_word in rs[1:]:
if temp_word in temp_head:
temp_head = temp_head[temp_word]
rs_temp.append (temp_word)
if 'phase' in temp_head and temp_head['phase']:
print (' '.join (rs_temp))
else:
rs_temp = []
temp_head = self.head
print(' '.join(rs))
rs = []
else:
rs =[]
temp_ds = self.head
i+=1
phrases = ['a cat', 'through the grass', 'i saw a cat running']
_stream = 'i was walking through the park and saw a cat running through the grass then i saw a cat running from the bushes'
obj = Classy()
print (obj.searchWord (phrases, _stream))
| Akashdeepsingh1/project | 2020/Stream&Phase2.py | Stream&Phase2.py | py | 1,765 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "collections.defaultdict",
"line_number": 5,
"usage_type": "call"
}
] |
13213708677 | from bson import ObjectId
from pydantic import BaseModel, Field
from typing import Optional
class PyObjectId(ObjectId):
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
def validate(cls, v):
if not ObjectId.is_valid(v):
raise ValueError("Invalid objectid")
return ObjectId(v)
@classmethod
def __modify_schema__(cls, field_schema):
field_schema.update(type="string")
class AddressModel(BaseModel):
id: PyObjectId = Field(default_factory=PyObjectId, alias="_id")
userId: str = Field(...)
addresses: Optional[tuple]
class Config:
allow_population_by_field_name = True
arbitrary_types_allowed = True
json_encoders = {ObjectId: str}
schema_extra = {
"example": {
"id": "abc123",
"userId": "dansId123",
"addresses": [
{
"firstLine": "1 dan road",
"lastLine": "1 second line",
"townCity": "Huddersfield",
"postcode": "HD1 111",
"country": "England"
}
]
}
}
class AddAddressModel(BaseModel):
firstLine: str = Field(...)
secondLine: str = Field(...)
TownCity: str = Field(...)
Postcode: str = Field(...)
Country: str = Field(...)
class Config:
allow_population_by_field_name = True
arbitrary_types_allowed = True
json_encoders = {ObjectId: str}
schema_extra = {
"example": {
"id": "abc123",
"userId": "dansId123",
"addresses": [
{
"firstLine": "1 dan road",
"lastLine": "1 second line",
"townCity": "Huddersfield",
"postcode": "HD1 111",
"country": "England"
}
]
}
}
| danbeaumont95/e-commerce-app-backend | app/address/model.py | model.py | py | 2,057 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "bson.ObjectId",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "bson.ObjectId.is_valid",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "bson.ObjectId",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "bson.ObjectId",
... |
44156571393 | import importlib
import model.trainer
import data.VCTK
import torch
import signal
if __name__ == "__main__":
dataset = data.VCTK.VCTKDataset(
text_file_paths=["resources/tomscott/txt/tomscott.txt"],
audio_file_paths=["resources/tomscott/wav48/tomscott.wav"]
)
print(f"Loaded {len(dataset)} entries")
trainer = model.trainer.Trainer(
device=torch.device("cuda"),
checkpoint=300802,
checkpoint_dir="checkpoints/tom",
load_from_checkpoint=True,
)
# allow graceful exit
def exit(*args):
trainer.save()
signal.signal(signal.SIGINT, exit)
signal.signal(signal.SIGTERM, exit)
try:
trainer.train(
epochs=1000,
dataset=dataset,
save_every_n=50,
batch_size=4,
run_name="runs/tom2"
)
except Exception as e:
x = trainer.checkpoint
trainer.checkpoint=str(trainer.checkpoint)+"failure"
trainer.save()
trainer.checkpoint = x+1
print(str(e))
trainer.checkpoint+=1
trainer.save() | CISC-867/Project | afktrain-targeted.py | afktrain-targeted.py | py | 1,091 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "data.VCTK.VCTK.VCTKDataset",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "data.VCTK.VCTK",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "data.VCTK",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "model.trainer.t... |
35842733652 | import os
import numpy as np
os.environ.setdefault('DJANGO_SETTINGS_MODULE',
'cafe_star_project.settings')
import django
django.setup()
from CafeStar.models import User, Drink, Order, ShopStatus
def populate():
drinks = [
{'DrinkID': 0,
'Name': 'Latte',
'Picture': 'static/image/Latte.png',
'Description': 'This is Latte',
'Nutrition': 'This is nutrition for Latte',
'Ingredients': 'This is ingredients for Latte',
'Price': 20,
'Point': 2,
'Rating': 5, },
]
orders = [
{'OrderID': 1,
'UserID': 0,
'DrinkID': 0,
'Status': False,
'Drink': 'Latte',
'Sweetness': 'Sweet',
'Milk': 'Milk',
'PickupTime': '12:00',
'Price': 20,
'Point': 2, }
]
users = [
{'UserID': 0,
'Manager': False,
'Fullname': 'John',
'Username': 'IT',
'Email': '12345678J@student.gla.ac.uk',
'Password': '123456',
'PhoneNumber': '12345654321',
'Point': 0, }
]
for drink in drinks:
add_drink(drink)
for order in orders:
add_order(order)
for user in users:
add_user(user)
def add_drink(drink):
d = Drink.objects.get_or_create(DrinkID=drink['DrinkID'])[0]
d.Name = drink['Name']
d.Picture = drink['Picture']
d.Description = drink['Description']
d.Nutrition = drink['Nutrition']
d.Ingredients = drink['Ingredients']
d.Price = drink['Price']
d.Point = drink['Point']
d.Rating = drink['Rating']
d.save()
print('drink saved')
return d
def add_order(order):
o = Order.objects.get_or_create(OrderID=order['OrderID'],
UserID=order['UserID'],
DrinkID=order['DrinkID'],
Status=order['Status'],
Drink=order['Drink'],
Sweetness=order['Sweetness'],
Milk=order['Milk'],
PickupTime=order['PickupTime'],
Price=order['Price'],
Point=order['Point'])[0]
o.save()
print('order saved')
return o
def add_user(user):
u = User.objects.get_or_create(UserID=user['UserID'],
Manager=user['Manager'],
Fullname=user['Fullname'],
Username=user['Username'],
Email=user['Email'],
Password=user['Password'],
PhoneNumber=user['PhoneNumber'],
Point=user['Point'])[0]
u.save()
print('user saved')
return u
# Start execution here!
if __name__ == '__main__':
print('Starting CafeStar population script...')
populate()
| zhengx-2000/CafeStar | populate_cafestar.py | populate_cafestar.py | py | 3,038 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "os.environ.setdefault",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "django.setup",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "CafeStar.models.Drink... |
32910781669 | from pyspark import SparkContext, SparkConf
from pyspark.sql import Row, SQLContext
if __name__ == "__main__":
conf = SparkConf()
conf.setAppName("MinhaAPP")
sc = SparkContext(conf=conf)
linhas = sc.textFile('hdfs://elephant:8020/user/labdata/pessoas.txt')
cols = linhas.map(lambda linha: linha.split(';'))
dados = cols.map(lambda coluna: Row(nome=str(coluna[0]), idade=int(coluna[1]), altura=float(coluna[2])))
sqlContext = SQLContext(sc)
df = sqlContext.createDataFrame(dados)
df.registerTempTable("pessoas")
pessoas = sqlContext.sql("SELECT nome FROM pessoas WHERE idade >= 20 AND idade <= 30")
nomes = pessoas.collect()
print("Imprimindo os nomes: ")
for n in nomes:
print(n.nome)
print("Existem {} pessoas que tem entre 20 e 30 anos".format(len(nomes)))
| dinomagri/cluster-conf-labdata | testing/minhaapp.py | minhaapp.py | py | 838 | python | pt | code | 1 | github-code | 36 | [
{
"api_name": "pyspark.SparkConf",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pyspark.SparkContext",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.Row",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.SQLC... |
10677267231 | import logging
from datetime import datetime
import flask_rebar
from flask_rebar import errors
from app.app import v1_registry
from app.entities.author import Author
from app.schemas.request.author import AuthorRequestSchema
from app.schemas.response.author import AuthorResponseSchema
from app.services import author as author_service
@v1_registry.handles(
rule="/author",
method="POST",
response_body_schema={201: AuthorResponseSchema()},
request_body_schema=AuthorRequestSchema()
)
def create_author():
body = flask_rebar.get_validated_body()
author = Author(**body)
author = author_service.save(author)
return author, 201
@v1_registry.handles(
rule="/author/<int:author_id>",
method="GET",
response_body_schema=AuthorResponseSchema()
)
def get_author_by_id(author_id: int):
author = author_service.get_by_id(author_id)
if author is None:
logging.error("Author is not found for [author_id=%s]", author_id)
raise errors.NotFound(msg="Author is not found for [author_id={}]".format(author_id),
additional_data={'timestamp': datetime.now().strftime("%Y-%m-%d %H:%M:%S")})
return author
@v1_registry.handles(
rule="/author/<string:author_name>",
method="GET",
response_body_schema=AuthorResponseSchema(many=True)
)
def get_author_by_name(author_name: str):
authors = author_service.get_by_name(author_name)
if not authors:
logging.error("Author is not found for [author_name=%s]", author_name)
raise errors.NotFound(msg="Author is not found for [name={}]".format(author_name)
, additional_data={'timestamp': datetime.now().strftime("%Y-%m-%d %H:%M:%S")})
return authors
@v1_registry.handles(
rule="/author/<int:author_id>",
method="PUT",
response_body_schema={200: AuthorResponseSchema()},
request_body_schema=AuthorRequestSchema()
)
def update_author(author_id: int):
body = flask_rebar.get_validated_body()
author = author_service.update(author_id, body)
if author is None:
logging.error("Author is not found for [author_id=%s]", author_id)
raise errors.NotFound(msg="Author is not found for [author_id={}]".format(author_id)
, additional_data={'timestamp': datetime.now().strftime("%Y-%m-%d %H:%M:%S")})
return author, 200
| Sunoyon/flask-foundation-service | app/controllers/author.py | author.py | py | 2,388 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask_rebar.get_validated_body",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "app.entities.author.Author",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "app.services.author.save",
"line_number": 23,
"usage_type": "call"
},
{
... |
30395166391 | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
#from collections import OrderedDict
import matplotlib.pyplot as plt
#import torch.optim as optim
import math
import random
def plot3D(imgA,imgB,l1,l2,dim,mode="max"):
if mode == "max":
img2D_A,_ = torch.max(imgA[0,l1,:,:,:],dim)
img2D_B,_ = torch.max(imgB[0,l2,:,:,:],dim)
if mode == "half":
if dim == 0:
img2D_A = imgA[0,l1,imgA.shape[2]//2,:,:]
img2D_B = imgB[0,l2,imgB.shape[2]//2,:,:]
if dim == 1:
img2D_A = imgA[0,l1,:,imgA.shape[3]//2,:]
img2D_B = imgB[0,l2,:,imgB.shape[3]//2,:]
if dim == 2:
img2D_A = imgA[0,l1,:,:,imgA.shape[4]//2]
img2D_B = imgB[0,l2,:,:,imgB.shape[4]//2]
imgRGB = np.zeros(tuple(img2D_A.shape)+(3,))
imgRGB[:,:,0] = img2D_A.numpy()
imgRGB[:,:,1] = img2D_B.numpy()
return imgRGB
def sliceplot(imgA_,imgB,d=10):
imgA = torch.zeros(imgB.shape)
imgA[:imgA_.shape[0],:imgA_.shape[1],:imgA_.shape[2]] = imgA_
print(imgA.shape)
print(imgB.shape)
f = plt.figure()
#f.tight_layout()
ax = f.gca()
s = imgA.shape[-1]
ns = s // d
nr = 1
nc = ns
if ns>4:
nc = 4
nr = math.ceil(ns / 4.0)
for a in range(1,ns-1):
imgA_2D = imgA[:,:,a*d]
imgB_2D = imgB[:,:,a*d]
imgRGB = np.zeros(tuple(imgA_2D.shape)+(3,))
imgRGB[:,:,0] = imgA_2D.numpy()
imgRGB[:,:,1] = imgB_2D.numpy()
ax = plt.subplot(nr,nc,a)
ax.imshow(imgRGB)
ax.axis('off')
def plot_patch_batch(x,index,offsets,f=None):
if f is None:
f = plt.figure()
ax = f.gca()
ax.axis('off')
num_scales = len(x.patchlist)
for a in range(0,num_scales):
tensor = x.patchlist[a].tensor.cpu().numpy()
shape = tensor.shape[2:4]
ax = plt.subplot(2,num_scales,a+1)
im = ax.imshow(tensor[index,0,:,:])
ax = plt.subplot(2,num_scales,num_scales+a+1)
x0 = offsets[a,0]
y0 = offsets[a,0]
x1 = x0 + shape[0]//2**a
y1 = y0 + shape[1]//2**a
im = ax.imshow(tensor[index,0,x0:x1,y0:y1])
def plot_patch(x,offsets,f=None):
if f is None:
f = plt.figure()
ax = f.gca()
ax.axis('off')
num_scales = x.shape[1]
shape = x.shape[2:4]
for a in range(0,num_scales):
#print(num_scales)
ax = plt.subplot(2,num_scales,a+1)
im = ax.imshow(x[0,a,:,:])
ax = plt.subplot(2,num_scales,num_scales+a+1)
x0 = offsets[a,0]
y0 = offsets[a,0]
x1 = x0 + shape[0]//2**a
y1 = y0 + shape[1]//2**a
#print([x0,y0,x1,y1])
im = ax.imshow(x[0,a,x0:x1,y0:y1])
| febrianrachmadi/BIA_ATLAS2 | deep_patchwork/pw/vis.py | vis.py | py | 3,013 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "torch.max",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "torch.max",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 34,
... |
21200829619 | # coding: utf-8
import json
import traceback
from collections import deque
import requests
import time
from threading import Event, Thread, Lock
from datetime import datetime, timedelta
from datetime import time as datetime_time
import copy
import yaml
import os
from socket import socket, AF_INET, SOCK_DGRAM, SOCK_STREAM
class Parameters():
def __init__(self, logger):
self._logger = logger
self.strategy_file = ''
self.strategy_config_file = ''
self._strategy = {}
self._config = {}
self._strategy_class = None
# 複数のクラスからアクセスされる変数をここに置いています
self.latency_history = deque([0], maxlen=600)
self.superbusy_happend = False
self.parentorder_id_list = deque(maxlen=1000) # 発行済みの親注文ID(照合用のリスト)
self.parentorder_method_dict = {}
self.parentorder_detail_param = {}
self.childorder_id_list = deque(maxlen=1000) # 発行済みの子注文ID(照合用のリスト)
self.childorder_information = deque(maxlen=1000) # 発行済みの親注文IDとサイズ
self.order_id_list_lock = Lock()
# 約定判定済みのIDとサイズと価格 → 後でポジション管理へ回して処理
self.executed_order = deque(maxlen=500)
self.executed_order_lock = Lock()
self.executed_order_history = deque(maxlen=100) # 処理済みのデータ
# キャンセル済みのID → 後でポジション管理へ回して処理
self.canceled_child_order = deque(maxlen=500)
self.canceled_parent_order = deque(
maxlen=500) # キャンセル済みのID → 後でポジション管理へ回して処理
# 自分以外の約定を突っ込んでおく(acceptance_idをもらうよりも先にchild_orderイベントが来た場合の対応用)
self.executed_order_pending = deque(maxlen=100)
# 自分以外の約定を突っ込んでおく(acceptance_idをもらうよりも先にchild_orderイベントが来た場合の対応用)
self.executed_order_pending_detail = deque(maxlen=100)
self.executed_order_pending_rollback = False
self.executed_order_pending_rollback_lock = Lock()
self.server_accepted_time_lock = Lock()
# acceptance_id をキーにしてオーダーやキャンセルの時刻を入れておく辞書(板乗り時間計測用)
self.server_accepted_time_detail = deque(maxlen=2000)
# acceptance_id だけを突っ込んでおく(該非判定用)
self.cancel_child_id_list = deque(maxlen=200)
self.childorders = [] # 定期的にapiから取得するオーダーリスト
self._estimated_position = 0 # 上記から推定されるポジション
self.estimated_position2 = 0 # 上記から推定されるポジション
self.estimated_profit = 0 # 計算上の利益(確定利益のみ)
self.estimated_profit_unrealized = 0 # 計算上の利益(含み損益)
self.current_position_list = deque(maxlen=2000) # 建玉のリスト
self.counted_position = 0 # 建玉のリストを集計したポジション
self.counted_average_price = 0 # 建玉のリストを集計したポジション
self.ltp = 0 # LTP
self.best_ask = 0 # best_bid
self.best_bid = 0 # best_ask
# mm関連
self.execution_handler = None
self.board_updated_handler = None
self.spot_execution_handler = None
self.spot_board_updated_handler = None
self.execution_timestamp = time.time()
self.board_timestamp = time.time()
self.drive_by_executions = False
self.drive_by_spot_ticker = False
self.handle_spot_realtime_api = False
self.ordered_count = deque([0], maxlen=61) # 出した指値の数
self.order_filled_count = deque([0], maxlen=61) # 完全約定された数
self.order_not_filled_count = deque([0], maxlen=61) # 全く約定しなかった数
self.order_partial_filled_count = deque([0], maxlen=61) # 部分的に約定した数
self.order_taked = deque([0], maxlen=61) # 成り行きで約定した数
self.order_retry_count = deque([0], maxlen=61) # オーダーリトライの回数
self.executed_size = deque([0], maxlen=61) # 取引高
self.executed_size_today = 0 # 取引高
self.execution_event = Event()
self.execution_event.clear()
self.api_sendorder_speed = deque(maxlen=5000) # api速度 (プロット用・毎時クリア)
self.api_getorder_speed = deque(maxlen=5000) # api速度 (プロット用・毎時クリア)
self.api_cancel_speed = deque(maxlen=5000) # api速度 (プロット用・毎時クリア)
self.execution_counter = deque(maxlen=60) # api速度 (統計用・直近1分)
self.api_sendorder_speed_history = deque(maxlen=60) # api速度 (統計用・直近1分)
self.ordered_speed_history = deque(maxlen=60) # api速度 (統計用・直近1分)
self.api_cancel_speed_history = deque(maxlen=60) # api速度 (統計用・直近1分)
self.canceled_speed_history = deque(maxlen=60) # api速度 (統計用・直近1分)
self.execution_counter.append(0)
self.api_sendorder_speed_history.append(deque(maxlen=10))
self.ordered_speed_history.append(deque(maxlen=10))
self.api_cancel_speed_history.append(deque(maxlen=10))
self.canceled_speed_history.append(deque(maxlen=10))
self.all_latency_history = deque(maxlen=100000)
self.server_latency_history = deque(maxlen=4000)
self.server_order_delay_history = deque(maxlen=4000)
self.server_cancel_delay_history = deque(maxlen=4000)
self.slipage_history = deque(maxlen=20000) # 約定時のオーダー価格との差
# apiアクセス回数カウンター(300秒の累積アクセス数)
self.api_counter = deque([0]*300, maxlen=300)
self.api_counter_small_order = deque(
[0]*60, maxlen=60) # apiアクセス回数カウンター(60秒の累積アクセス数)
# Cryptowatch
self.cryptowatch_candle = 0
self.use_lightning_candle = False
# 現在がno_trade期間かどうか
self.no_trade_period = False
# リトライカウンター
self.order_retry = 30
self.cancel_retry = 10
# Adjust Position
self.adjust_position_with_api = False
# Check cross trade
self.check_cross_trade = True
# 約定履歴が無い区間もダミーのローソク足を生成してlogic()を呼ぶかどうか
self.renew_candle_everysecond = False
self.server_health = "OTHER"
self.collateral = {"collateral": 0, "open_position_pnl": 0,
"require_collateral": 0, "keep_rate": 0}
self.balance = {"currency_code": "JPY", "amount": 0, "available": 0}
self.position_event = Event()
self.sfd_commission = 0
self.sfd_profit = 0
self.sfd_loss = 0
# discord bot関連
self.on_message_handler = None
self.on_reaction_add_handler = None
def start_position_thread(self):
# ポジションをUDPで送信するスレッドで起動
self.position_thread = Thread(target=self.send_position)
self.position_thread.start()
def send_position(self):
self._logger.info("Start position thread")
self.socket = socket(AF_INET, SOCK_DGRAM)
while True:
self.position_event.wait(10)
self.position_event.clear()
base_position = self._config['base_position'] if 'base_position' in self._config else 0
message = "{:>10} : {:>15.8f} : {:>15.8f} : {:>+9.0f} : {:>3} : {:>3} : {}".format(
self._config['product'][:10],
self._strategy_class.current_pos,
base_position,
self._strategy_class.current_profit,
self._strategy_class.api_count,
self._strategy_class.api_count2,
self.strategy_config_file)
if 'pos_server' in self._config and self._config['pos_server'] != None:
self.socket.sendto(message.encode(
'utf-8'), (self._config['pos_server'][0], self._config['pos_server'][1]))
def load_config_file(self, config_file):
self._config_file = config_file
config = yaml.safe_load(
open(self._config_file, 'r', encoding='utf-8_sig'))
# execution_check_with_public_channelのデフォルトはFalse
if 'execution_check_with_public_channel' not in config:
config['execution_check_with_public_channel'] = False
for key, value in config.items():
if key != 'apikey' and key != 'secret' and (not 'strategy_' in key):
if key in self._config:
if self._config[key] != config[key]:
self._logger.info("{:<40} ; {} -> {}".format(
"Parameter Changed [{}]".format(key), self._config[key], config[key]))
else:
self._logger.info("{:<40} ; {}".format(
"Current Parameter [{}]".format(key), config[key]))
for key, value in self._config.items():
if key != 'apikey' and key != 'secret' and key != 'created_at':
if not key in config:
self._logger.info(
"{:<40} ; ({}) -> none".format("Parameter Deleted [{}]".format(key), self._config[key]))
self._logger.info(
"Load bot configfile: config = {}".format(self._config_file))
self._config = copy.deepcopy(config)
self.adjust_position_with_api = False
if 'adjust_position_with_api' in self._config:
self.adjust_position_with_api = self._config['adjust_position_with_api']
self.check_cross_trade = True
if 'check_cross_trade' in self._config:
self.check_cross_trade = self._config['check_cross_trade']
if self.strategy_file == '':
self.strategy_file = self._config['strategy_py']
if self.strategy_config_file == '':
self.strategy_config_file = self._config['strategy_yaml']
# strategy config の timestamp を記憶(auto reloadのため)
self._config['created_at'] = os.path.getmtime(self._config_file)
def load_strategy_config_file(self, strategy_config_file):
self.strategy_config_file = strategy_config_file
strategy_config = yaml.safe_load(
open(self.strategy_config_file, 'r', encoding='utf-8_sig'))
if 'handle_spot_realtime_api' in strategy_config:
self.handle_spot_realtime_api = strategy_config['handle_spot_realtime_api']
if 'use_lightning_candle' in strategy_config:
self.use_lightning_candle = strategy_config['use_lightning_candle']
if 'drive_by_spot_ticker' in strategy_config:
self.drive_by_spot_ticker = strategy_config['drive_by_spot_ticker']
if 'drive_by_executions' in strategy_config:
self.drive_by_executions = strategy_config['drive_by_executions']
if 'order_retry' in strategy_config:
self.order_retry = strategy_config['order_retry']
if 'cancel_retry' in strategy_config:
self.cancel_retry = strategy_config['cancel_retry']
if 'cryptowatch_candle' in strategy_config:
self.cryptowatch_candle = strategy_config['cryptowatch_candle']
# close_position_while_no_tradeのデフォルトはTrue
if 'close_position_while_no_trade' in strategy_config:
self.close_position_while_no_trade = strategy_config['close_position_while_no_trade']
else:
self.close_position_while_no_trade = True
# logic_loop_periodのデフォルトは1秒
if 'logic_loop_period' not in strategy_config:
strategy_config['logic_loop_period'] = 1
# renew_candle_everysecondのデフォルトはFalse
if 'renew_candle_everysecond' in strategy_config:
self.renew_candle_everysecond = strategy_config['renew_candle_everysecond']
else:
self.renew_candle_everysecond = False
for key, value in strategy_config.items():
if key in self._strategy:
if self._strategy[key] != strategy_config[key]:
if key == 'parameters':
for param_key, param_value in strategy_config[key].items():
if param_key in self._strategy[key]:
if self._strategy[key][param_key] != strategy_config[key][param_key]:
self._parameter_message(True, "[parameters]{:<40} ; {} -> {}".format("Parameter Changed [{}]".format(
param_key), self._strategy[key][param_key], strategy_config[key][param_key]))
else:
self._parameter_message(True, "[parameters]{:<40} ; {}".format(
"Current Parameter [{}]".format(param_key), strategy_config[key][param_key]))
else:
self._parameter_message(True, "{:<40} ; {} -> {}".format(
"Parameter Changed [{}]".format(key), self._strategy[key], strategy_config[key]))
else:
self._parameter_message(True, "{:<40} ; {}".format(
"Current Parameter [{}]".format(key), strategy_config[key]))
for key, value in self._strategy.items():
if key != 'created_at':
if not key in strategy_config:
self._parameter_message(True, "{:<40} ; ({}) -> none".format(
"Parameter Deleted [{}]".format(key), self._strategy[key]))
self._strategy = copy.deepcopy(strategy_config)
# strategy config の timestamp を記憶(auto reloadのため)
self._strategy['created_at'] = os.path.getmtime(
self.strategy_config_file)
# lotsizeからクローズロットを計算
if 'parameters' in self._strategy and self._strategy['parameters'] != None and 'lotsize' in self._strategy['parameters']:
self.close_lot = self._strategy['parameters']['lotsize']*3
else:
self.close_lot = 1
self._logger.info("Load strategy configfile: config = {}".format(
self.strategy_config_file))
self._parameter_message_send()
_message = ''
def _parameter_message(self, discord_send, message):
if discord_send == True and (not 'http' in message) and (not 'discord_bot_token' in message):
self._logger.info(message)
self._message += message + '\n'
else:
self._logger.debug(message)
def _parameter_message_send(self):
try:
webhooks = self._strategy['position_discord_webhooks'] if 'position_discord_webhooks' in self._strategy else ''
if webhooks != '' and self._message != '':
payload = {'content': '{} {}\n{}'.format((datetime.utcnow(
)+timedelta(hours=9)).strftime('%H:%M:%S'), self.strategy_config_file, self._message)}
requests.post(webhooks, data=payload, timeout=10)
except Exception as e:
self._logger.error(
'Failed sending status to Discord: {}'.format(e))
time.sleep(1)
self._message = ''
def renew(self):
updated = False
if self._config['created_at'] != os.path.getmtime(self._config_file):
self.load_config_file(self._config_file)
self.load_strategy_config_file(self.strategy_config_file)
updated = True
if self._strategy['created_at'] != os.path.getmtime(self.strategy_config_file):
self.load_strategy_config_file(self.strategy_config_file)
updated = True
# トレード中断期間であれば、強制的にclose_positionモードとする
if self.check_no_trade_period():
self.no_trade_period = True
if self.close_position_while_no_trade == True:
self._strategy['close_position'] = True
self._strategy['emergency_wait'] = 10
# トレード中断期間からの復帰時にはパラメータを読み直す
elif self.no_trade_period == True:
self.no_trade_period = False
self.load_strategy_config_file(self.strategy_config_file)
if 'parameters' in self._strategy and self._strategy['parameters'] != None and 'lotsize' in self._strategy['parameters']:
self.close_lot = self._strategy['parameters']['lotsize']*3
else:
self.close_lot = 1
return updated
def check_no_trade_period(self):
# 現在時刻が範囲内かどうか https://codeday.me/jp/qa/20190219/264470.html
def time_in_range(start, end, x):
"""Return true if x is in the range [start, end]"""
if start <= end:
return start <= x <= end
else:
return start <= x or x <= end
within_period = False
now = datetime_time((datetime.utcnow()+timedelta(hours=9)).hour,
(datetime.utcnow()+timedelta(hours=9)).minute, 0)
weekday = (datetime.utcnow()+timedelta(hours=9)).weekday()
if 'no_trade' in self._config:
try:
if self._config['no_trade'] != None:
for p in self._config['no_trade']:
start = datetime_time(
int(p['period'][0:2]), int(p['period'][3:5]), 0)
end = datetime_time(
int(p['period'][6:8]), int(p['period'][9:11]), 0)
if (len(p['period']) <= 11 or int(p['period'][12]) == weekday) and time_in_range(start, end, now):
self._logger.info(
'no_trade period : {}'.format(p['period']))
within_period = True
except Exception as e:
self._logger.error(
'no_trade period is not correct: {}'.format(e))
self._logger.info('no_trade : {}'.format(
self._config['no_trade']))
return within_period
@property
def estimated_position(self):
return self._estimated_position
@estimated_position.setter
def estimated_position(self, pos):
self._estimated_position = pos
self.position_event.set()
| PP-lib/BFS | BFS-X/libs/parameters.py | parameters.py | py | 19,180 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "collections.deque",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "collections.deque",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "collections.deque",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "collections.deq... |
75260503145 | import json, jsonlines
class vocab():
def __init__(self):
self.word2index = {}
self.word2count = {}
self.index2word = []
self.n_words = 0 # Count word tokens
self.num_start = 0
def add_sen_to_vocab(self, sentence): # add words of sentence to vocab
for word in sentence:
word = word.lower()
if word not in self.index2word:
self.word2index[word] = self.n_words
self.word2count[word] = 1
self.index2word.append(word)
self.n_words += 1
else:
self.word2count[word] += 1
def trim(self, min_count): # trim words below a certain count threshold
keep_words = []
for k, v in self.word2count.items():
if v >= min_count:
keep_words.append(k)
print('keep_words %s / %s = %.4f' % (
len(keep_words), len(self.index2word), len(keep_words) / len(self.index2word)
))
# Reinitialize dictionaries
self.word2index = {'PAD':0, '<s>':1, '</s>':2, 'UNKN':3}
self.word2count = {}
self.index2word = ['PAD', '<s>', '</s>', 'UNKN']
self.n_words = 4 # Count default tokens
for word in keep_words:
self.word2index[word] = self.n_words
self.index2word.append(word)
self.n_words += 1
def Data_loader(train_file, test_file, max_len, output_train, output_test):
vo = vocab()
train_dataset = []
test_dataset = []
with open(train_file) as f:
for line in f:
d = json.loads(line.strip())
vo.add_sen_to_vocab(d['question']['sentence'])
for item in d['sample']:
vo.add_sen_to_vocab(item['sentence'])
vo.trim(2)
word2index = vo.word2index
index2word = vo.index2word
with open(train_file) as f:
for line in f:
d = json.loads(line.strip())
question = ['<s>'] + d['question']['sentence'] + ['<\s>']
new_question = []
for word in question:
if word in index2word:
new_question.append(word2index[word])
else:
new_question.append(word2index['UNKN'])
if len(new_question) < max_len:
new_question.extend([0]*(max_len-len(new_question)))
else:
print(1)
for item in d['sample']:
answer = ['<s>'] + item['sentence'] + ['<\s>']
new_answer = []
for word in answer:
if word in index2word:
new_answer.append(word2index[word])
else:
new_answer.append(word2index['UNKN'])
if len(new_answer) < max_len:
new_answer.extend([0] * (max_len - len(new_answer)))
else:
print(0)
temp = {}
temp['question'] = new_question
temp['question_edges'] = d['question']['edges']
temp['answer'] = new_answer
temp['answer_edges'] = item['edges']
temp['label'] = item['label']
train_dataset.append(temp)
with open(test_file) as f:
for line in f:
d = json.loads(line.strip())
question = ['<s>'] + d['question']['sentence'] + ['<\s>']
new_question = []
for word in question:
if word in index2word:
new_question.append(word2index[word])
else:
new_question.append(word2index['UNKN'])
if len(new_question) < max_len:
new_question.extend([0]*(max_len-len(new_question)))
else:
print(0)
for item in d['sample']:
answer = ['<s>'] + item['sentence'] + ['<\s>']
new_answer = []
for word in answer:
if word in index2word:
new_answer.append(word2index[word])
else:
new_answer.append(word2index['UNKN'])
if len(new_answer) < max_len:
new_answer.extend([0] * (max_len - len(new_answer)))
else:
print(0)
temp = {}
temp['question'] = new_question
temp['question_edges'] = d['question']['edges']
temp['answer'] = new_answer
temp['answer_edges'] = item['edges']
temp['label'] = item['label']
temp['output_result'] = item['answer']
test_dataset.append(temp)
with jsonlines.open(output_train, 'w') as writer:
writer.write_all(train_dataset)
with jsonlines.open(output_test, 'w') as writer:
writer.write_all(test_dataset)
def merge(file_list):
res = []
for file in file_list:
with open(file) as f:
for line in f:
d=json.loads(line.strip())
res.append(d)
with jsonlines.open('all_test.json', 'w') as writer:
writer.write_all(res)
if __name__ == '__main__':
#file_list = ['./train1_100.json', './train101_200.json', './train201_300.json', './train301_400.json', './train401_500.json', './train501_600.json', './train601_700.json', './train701_800.json', './train801_900.json', './train901_1000.json']
#merge(file_list)
#test_file = ['./test1_100.json', './test101_200.json', './test168_300.json', './test301_500.json','./test501_700.json', './test701_900.json', './test901_1000.json']
#merge(test_file)
Data_loader('./all_train.json', './all_test.json', 256, 'output_all_train.json', 'output_all_test.json')
| ttt-77/CS546_project | preprocess/pre_data.py | pre_data.py | py | 5,813 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "json.loads",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "jsonlines.open",
"line_number": ... |
71804654824 | import pygame
import time
import random
pygame.init()
wh = (255, 255, 255) # White
ye = (255, 255, 102) # Yellow
bk = (0, 0, 0) # Black
re = (213, 50, 80) # Red
gr = (0, 255, 0) # Green
bl = (50, 153, 213) # Blue
screenWidth = 600
screenHeight = 400
display = pygame.display.set_mode((screenWidth, screenHeight))
pygame.display.set_caption("Snake")
clock = pygame.time.Clock()
snakeSize = 10
snakeSpeed = 10
fontS = pygame.font.SysFont("bahnschrift", 25)
score = pygame.font.SysFont("comicsansms", 35)
def yourScore(score):
value = fontS.render("Your score = " + str(score), True, ye)
display.blit(value, [0, 0])
def snake(snakeSize, snakeList):
for tmp in snakeList:
pygame.draw.rect(display, bk, [tmp[0], tmp[1], snakeSize, snakeSize])
def msg(msgs, color):
message = fontS.render(msgs, True, color)
display.blit(message, [int(screenWidth/6), int(screenHeight/3)])
def game():
gameOver = False
gameClose = False
x = screenWidth / 2
y = screenHeight / 2
xMove = 0
yMove = 0
snakeList = []
length = 1
pointX = round(random.randrange(0, screenWidth - snakeSize) / 10.0) * 10.0
pointY = round(random.randrange(0, screenHeight - snakeSize) / 10.0) * 10.0
while not gameOver:
while gameClose:
display.fill(bl)
msg("You lost. Press N to play one more time or Q to quit.", re)
yourScore(length - 1)
pygame.display.update()
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_q:
gameOver = True
gameClose = False
if event.key == pygame.K_n:
game()
for event in pygame.event.get():
if event.type == pygame.QUIT:
gameOver = True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT or event.key == pygame.K_a:
xMove = -snakeSize
yMove = 0
elif event.key == pygame.K_RIGHT or event.key == pygame.K_d:
xMove = snakeSize
yMove = 0
elif event.key == pygame.K_UP or event.key == pygame.K_w:
xMove = 0
yMove = -snakeSize
elif event.key == pygame.K_DOWN or event.key == pygame.K_s:
xMove = 0
yMove = snakeSize
if x >= screenWidth or x < 0 or y >= screenHeight or y < 0:
gameClose = True
x += xMove
y += yMove
display.fill(bl)
pygame.draw.rect(display, gr, [int(pointX), int(pointY), int(snakeSize), int(snakeSize)])
snakeHead = []
snakeHead.append(x)
snakeHead.append(y)
snakeList.append(snakeHead)
if len(snakeList) > length:
del snakeList[0]
for tmp in snakeList[:-1]:
if tmp == snakeHead:
gameClose = True
snake(snakeSize, snakeList)
yourScore(length - 1)
pygame.display.update()
if x == pointX and y == pointY:
pointX = round(random.randrange(0, screenWidth - snakeSize) / 10.0) * 10.0
pointY = round(random.randrange(0, screenHeight - snakeSize) / 10.0) * 10.0
length += 1
print("X")
clock.tick(snakeSpeed)
pygame.quit()
quit()
def main():
game()
main()
| Feleur/Snake | main.py | main.py | py | 3,541 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pygame.init",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "pygame.display.set_mode",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "pygame.display... |
19793680016 | import logging
import gevent
from binascii import hexlify
from steam.client import SteamClient
from steam.core.msg import MsgProto
from steam.enums.emsg import EMsg
from steam.utils.proto import proto_to_dict
import vdf
LOG = logging.getLogger("Steam Worker")
class SteamWorker(object):
def __init__(self):
self.logged_on_once = False
self.steam = client = SteamClient()
client.set_credential_location(".")
@client.on("error")
def handle_error(result):
LOG.info("Logon result: %s", repr(result))
@client.on("connected")
def handle_connected():
LOG.info("Connected to %s", client.current_server_addr)
@client.on("channel_secured")
def send_login():
if self.logged_on_once and self.steam.relogin_available:
self.steam.relogin()
@client.on("logged_on")
def handle_after_logon():
self.logged_on_once = True
LOG.info("-"*30)
LOG.info("Logged on as: %s", client.user.name)
LOG.info("Community profile: %s", client.steam_id.community_url)
LOG.info("Last logon: %s", client.user.last_logon)
LOG.info("Last logoff: %s", client.user.last_logoff)
LOG.info("-"*30)
@client.on("disconnected")
def handle_disconnect():
LOG.info("Disconnected.")
if self.logged_on_once:
LOG.info("Reconnecting...")
client.reconnect(maxdelay=30)
@client.on("reconnect")
def handle_reconnect(delay):
LOG.info("Reconnect in %ds...", delay)
def prompt_login(self):
self.steam.cli_login()
def close(self):
if self.steam.logged_on:
self.logged_on_once = False
LOG.info("Logout")
self.steam.logout()
if self.steam.connected:
self.steam.disconnect()
def get_product_info(self, appids=[], packageids=[]):
resp = self.steam.send_job_and_wait(MsgProto(EMsg.ClientPICSProductInfoRequest),
{
'apps': map(lambda x: {'appid': x}, appids),
'packages': map(lambda x: {'packageid': x}, packageids),
},
timeout=10
)
if not resp: return {}
resp = proto_to_dict(resp)
for app in resp.get('apps', []):
app['appinfo'] = vdf.loads(app.pop('buffer')[:-1].decode('utf-8', 'replace'))['appinfo']
app['sha'] = hexlify(app['sha']).decode('utf-8')
for pkg in resp.get('packages', []):
pkg['appinfo'] = vdf.binary_loads(pkg.pop('buffer')[4:])[str(pkg['packageid'])]
pkg['sha'] = hexlify(pkg['sha']).decode('utf-8')
return resp
def get_product_changes(self, since_change_number):
resp = self.steam.send_job_and_wait(MsgProto(EMsg.ClientPICSChangesSinceRequest),
{
'since_change_number': since_change_number,
'send_app_info_changes': True,
'send_package_info_changes': True,
},
timeout=10
)
return proto_to_dict(resp) or {}
def get_player_count(self, appid):
resp = self.steam.send_job_and_wait(MsgProto(EMsg.ClientGetNumberOfCurrentPlayersDP),
{'appid': appid},
timeout=10
)
return proto_to_dict(resp) or {}
| ValvePython/steam | recipes/2.SimpleWebAPI/steam_worker.py | steam_worker.py | py | 3,921 | python | en | code | 934 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "steam.client.SteamClient",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "steam.core.msg.MsgProto",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "st... |
71893420264 | #!/usr/bin/python3
""" Write a script that adds the State object
“Louisiana” to the database hbtn_0e_6_usa """
import sys
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from model_state import Base, State
if __name__ == "__main__":
engine = create_engine(
'mysql+mysqldb://{}:{}@localhost/{}'
.format(sys.argv[1], sys.argv[2], sys.argv[3]), pool_pre_ping=True
)
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
session = Session()
Add = State(name="Louisiana")
state = session.add(Add)
state = session.query(State).filter(
State.name == "Louisiana").first()
if state is not None:
print("{}".format(state.id))
session.commit()
session.close()
| ericpo1sh/holbertonschool-higher_level_programming | python-object_relational_mapping/11-model_state_insert.py | 11-model_state_insert.py | py | 776 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sqlalchemy.create_engine",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "model_state.Base.metadata.create_all",
"line_number": 14,
"usage_type": "call"
},
{
"api_n... |
38100149531 | from django.db import models
# Create your models here.
class BaseModel(models.Model):
"""
All models (in other apps) should subclass BaseModel.
This is just a convenient place to add common functionality and fields
between models.
FSM_FIELDS (if used) must be defined on models that inherit from BaseModel.
This takes care of excluding those fields when calling .save on the model.
"""
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
| iBala/bluetie | base/models.py | models.py | py | 569 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.db.models.Model",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "django.db.models.DateTimeField",
"line_number": 14,
"usage_type": "call"
},
{
"api_nam... |
16354953325 | import configparser # implements a basic configuration language for Python programs
import os # provides a portable way of using operating system dependent functionality
import sys # system-specific parameters and functions
import numpy as np # the fundamental package for scientific computing with Python
from logzero import logger # robust and effective logging for Python
from torch.utils import data # used to import data.Dataset
# get config file path
generators_dir = os.path.dirname(os.path.abspath(__file__))
nets_dir = os.path.dirname(generators_dir)
model_dir = os.path.dirname(nets_dir)
src_dir = os.path.dirname(model_dir)
config_filepath = os.path.join(src_dir, 'config.ini')
# instantiate config parser and read config file
config = configparser.ConfigParser()
config.read(config_filepath)
# get variables from config file
training_n_samples = config['sorel20mDataset']['total_training_samples']
validation_n_samples = config['sorel20mDataset']['total_validation_samples']
test_n_samples = config['sorel20mDataset']['total_test_samples']
# instantiate key-n_samples dict
total_n_samples = {'train': training_n_samples,
'validation': validation_n_samples,
'test': test_n_samples}
class Dataset(data.Dataset):
""" Pre-processed dataset class. """
# list of malware tags
tags = ["adware", "flooder", "ransomware", "dropper", "spyware", "packed",
"crypto_miner", "file_infector", "installer", "worm", "downloader"]
# create tag-index dictionary for the joint embedding
tags2idx = {tag: idx for idx, tag in enumerate(tags)}
# tags2idx = {'adware': 0, 'flooder': 1, ...}
# create list of tag indices (tags encoding)
encoded_tags = [idx for idx in range(len(tags))]
def __init__(self,
ds_root, # pre-processed dataset root directory (where to find .dat files)
mode='train', # mode of use of the dataset object (may be 'train', 'validation' or 'test')
n_samples=None, # number of samples to consider (used just to access the right pre-processed files)
return_malicious=True, # whether to return the malicious label for the data point or not
return_counts=True, # whether to return the counts for the data point or not
return_tags=True, # whether to return the tags for the data points or not
return_shas=False): # whether to return the sha256 of the data points or not
""" Initialize Dataset class.
Args:
ds_root: Pre-processed dataset root directory (where to find .dat files)
mode: Mode of use of the dataset object (it may be 'train', 'validation' or 'test') (default: 'train')
n_samples: Number of samples to consider (used just to access the right pre-processed files) (default: None)
return_malicious: Whether to return the malicious label for the data point or not (default: True)
return_counts: Whether to return the counts for the data point or not (default: True)
return_tags: Whether to return the tags for the data points or not (default: True)
return_shas: Whether to return the sha256 of the data points or not (default: False)
"""
self.return_counts = return_counts
self.return_tags = return_tags
self.return_malicious = return_malicious
self.return_shas = return_shas
# if mode is not in one of the expected values raise an exception
if mode not in {'train', 'validation', 'test'}:
raise ValueError('invalid mode {}'.format(mode))
# if n_samples is not set or it is <= 0 -> set it to the max
if n_samples is None or n_samples <= 0:
n_samples = total_n_samples[mode]
# set feature dimension
ndim = 2381
# set labels dimension to 1 (malware) + 1 (count) + n_tags (tags)
labels_dim = 1 + 1 + len(Dataset.tags)
# generate X (features vector), y (labels vector) and S (shas) file names
X_path = os.path.join(ds_root, "X_{}_{}.dat".format(mode, n_samples))
y_path = os.path.join(ds_root, "y_{}_{}.dat".format(mode, n_samples))
S_path = os.path.join(ds_root, "S_{}_{}.dat".format(mode, n_samples))
# log error and exit if at least one of the dataset files (X, y, S) does not exist
if not (os.path.exists(X_path) and os.path.exists(y_path) and os.path.exists(S_path)):
logger.error("X, y, S files for mode {} and amount {} not found.".format(mode, n_samples))
sys.exit(1)
logger.info('Opening Dataset at {} in {} mode.'.format(ds_root, mode))
# open S (shas) memory map in Read+ mode (+ because pytorch does not support read only ndarrays)
self.S = np.memmap(S_path, dtype=np.dtype('U64'), mode="r+")
# get number of elements from S vector
self.N = self.S.shape[0]
# open y (labels) memory map in Read+ mode (+ because pytorch does not support read only ndarrays)
self.y = np.memmap(y_path, dtype=np.float32, mode="r+", shape=(self.N, labels_dim))
# open X (features) memory map in Read+ mode (+ because pytorch does not support read only ndarrays)
self.X = np.memmap(X_path, dtype=np.float32, mode="r+", shape=(self.N, ndim))
logger.info("{} samples loaded.".format(self.N))
def __len__(self):
""" Get dataset total length.
Returns:
Dataset length.
"""
return self.N # return the total number of samples
def __getitem__(self,
index): # index of the item to get
""" Get item from dataset.
Args:
index: Index of the item to get
Returns:
Sha256 (if required), features and labels associated to the sample with index 'index'.
"""
# initialize labels set for this particular sample
labels = {}
# get feature vector
features = self.X[index]
if self.return_malicious:
# get malware label for this sample through the index
labels['malware'] = self.y[index][0]
if self.return_counts:
# get count for this sample through the index
labels['count'] = self.y[index][1]
if self.return_tags:
# get tags list for this sample through the index
labels['tags'] = self.y[index][2:]
if self.return_shas:
# get sha256
sha = self.S[index]
# return sha256, features and labels associated to the sample with index 'index'
return sha, features, labels
else:
# return features and labels associated to the sample with index 'index'
return features, labels
| cmikke97/Automatic-Malware-Signature-Generation | src/Model/nets/generators/dataset.py | dataset.py | py | 6,823 | python | en | code | 9 | github-code | 36 | [
{
"api_name": "os.path.dirname",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"l... |
4778471079 | from typing import Tuple, Sequence
from functools import partial, reduce
import operator
import jax
import jax.numpy as jnp
from transformer_engine_jax import DType as TEDType
from .cpp_extensions import cast_transpose, gemm, jax_dtype_to_te_dtype
from .fp8 import FP8Helper, FP8GemmPackage
from .sharding import ShardingType, get_dot_sharding_meta, get_fp8_meta_sharding_meta
from .sharding import is_dp_enabled, is_tp_enabled, merge_axis_resources
from .sharding import xmap_runner, extend_fsdp_sharding_meta
jax.config.update('experimental_xmap_spmd_lowering', True)
jax.config.update('experimental_xmap_spmd_lowering_manual', True)
def fp8_dot(fp8_gemm_pkg: FP8GemmPackage,
fwd_dtype: TEDType,
bwd_dtype: TEDType,
contracting_dims: Tuple[Sequence[int], Sequence[int]] = ((-1,), (0,)),
sharding_type: ShardingType = ShardingType.SINGLE,
dp_dim_index: int = 0) -> jnp.ndarray:
"""
FP8 dot wrapper
"""
assert fp8_gemm_pkg.num_of_gemm == 1
inputs = fp8_gemm_pkg.inputs
kernel = fp8_gemm_pkg.kernels[0]
fp8_max = fp8_gemm_pkg.fp8_max
amax = fp8_gemm_pkg.amax
scale = fp8_gemm_pkg.scale
scale_inv = fp8_gemm_pkg.scale_inv
if sharding_type is ShardingType.SINGLE:
res = _fp8_dot(inputs,
kernel,
fp8_max,
amax,
scale,
scale_inv,
fwd_dtype=fwd_dtype,
bwd_dtype=bwd_dtype,
contracting_dims=contracting_dims,
sharding_type=sharding_type,
dp_axis_name="",
tp_axis_name="",
fsdp_axis_name="")
else:
dp_axis_name = "batch"
tp_axis_name = "model"
kernel_tp_index = None
# TODO (Ming Huang): Should we add a new argument to support general sharding to kernel? # pylint: disable=fixme
if sharding_type in (ShardingType.TP_COL, ShardingType.DP_TP_COL):
kernel_tp_index = len(kernel.shape) - 1
elif sharding_type in (ShardingType.TP_ROW, ShardingType.DP_TP_ROW):
kernel_tp_index = 0
input_tp_index = len(inputs.shape) - 1
sharding_meta = get_dot_sharding_meta(sharding_type, inputs.shape, kernel.shape,
dp_dim_index, input_tp_index, kernel_tp_index,
contracting_dims, dp_axis_name, tp_axis_name)
sharding_meta, fsdp_axis_name = extend_fsdp_sharding_meta(sharding_meta, {0: dp_dim_index})
inputs_ = jnp.reshape(inputs, sharding_meta.input_shapes[0]) # 0 for input
kernel_ = jnp.reshape(kernel, sharding_meta.input_shapes[1]) # 1 for kernel
num_of_fp8_meta_kind = 4 # fp8_max, amax, scale, scale_inv
fp8_sharding_meta = get_fp8_meta_sharding_meta(sharding_type, num_of_fp8_meta_kind,
dp_axis_name, tp_axis_name)
axis_resources = merge_axis_resources(
[sharding_meta.axis_resources, fp8_sharding_meta.axis_resources])
partial_fp8_dot = partial(_fp8_dot,
fwd_dtype=fwd_dtype,
bwd_dtype=bwd_dtype,
contracting_dims=contracting_dims,
sharding_type=sharding_type,
dp_axis_name=dp_axis_name,
tp_axis_name=tp_axis_name,
fsdp_axis_name=fsdp_axis_name)
res = xmap_runner(partial_fp8_dot, (*sharding_meta.in_axes, *fp8_sharding_meta.in_axes),
sharding_meta.out_axes, axis_resources,
(inputs_, kernel_, fp8_max, amax, scale, scale_inv))
res = jnp.reshape(res, sharding_meta.output_shapes[0])
return res
@partial(jax.custom_vjp, nondiff_argnums=(6, 7, 8, 9, 10, 11, 12))
def _fp8_dot(inputs: jnp.ndarray, kernel: jnp.ndarray, fp8_maxs: jnp.ndarray, amax: jnp.ndarray,
scale: jnp.ndarray, scale_inv: jnp.ndarray, fwd_dtype: TEDType, bwd_dtype: TEDType,
contracting_dims: Tuple[Sequence[int], Sequence[int]], sharding_type: ShardingType,
dp_axis_name: str, tp_axis_name: str, fsdp_axis_name: str):
res, _ = _fp8_dot_fwd(inputs,
kernel,
fp8_maxs,
amax,
scale,
scale_inv,
fwd_dtype,
bwd_dtype,
contracting_dims=contracting_dims,
sharding_type=sharding_type,
dp_axis_name=dp_axis_name,
tp_axis_name=tp_axis_name,
fsdp_axis_name=fsdp_axis_name)
return res
def _fp8_dot_fwd(
inputs,
kernel,
fp8_maxs,
amax,
scale,
scale_inv,
fwd_dtype,
bwd_dtype, # pylint: disable=unused-argument
contracting_dims,
sharding_type,
dp_axis_name, # pylint: disable=unused-argument
tp_axis_name,
fsdp_axis_name): # pylint: disable=unused-argument
lhs_contracting_dims, rhs_contracting_dims = contracting_dims
input_shape_pre = inputs.shape[:min(lhs_contracting_dims)]
input_shape_suf = inputs.shape[min(lhs_contracting_dims):]
kernel_shape_pre = kernel.shape[:max(rhs_contracting_dims) + 1]
kernel_shape_suf = kernel.shape[max(rhs_contracting_dims) + 1:]
input_contracting_size = reduce(operator.mul, input_shape_suf)
kernel_contracting_size = reduce(operator.mul, kernel_shape_pre)
assert input_contracting_size == kernel_contracting_size
inputs_ = jnp.reshape(inputs, (-1, input_contracting_size))
kernel_ = jnp.reshape(kernel, (kernel_contracting_size, -1))
amax = FP8Helper.update_amax_history(amax)
gemm_input_idx, gemm_kernel_idx, _ = FP8Helper.get_fp8_meta_indices(0)
input_amax = amax[gemm_input_idx, 0:1]
input_scale = scale[gemm_input_idx]
input_scale_inv = scale_inv[gemm_input_idx]
input_cast, input_cast_trans, input_amax = cast_transpose(inputs_, input_amax, input_scale,
input_scale_inv, fwd_dtype)
kernel_amax = amax[gemm_kernel_idx, 0:1]
kernel_scale = scale[gemm_kernel_idx]
kernel_scale_inv = scale_inv[gemm_kernel_idx]
kernel_cast, kernel_cast_trans, kernel_amax = cast_transpose(kernel_, kernel_amax, kernel_scale,
kernel_scale_inv, fwd_dtype)
res = gemm(kernel_cast_trans, kernel_scale_inv, fwd_dtype, True, input_cast, input_scale_inv,
fwd_dtype, False, jax_dtype_to_te_dtype(inputs.dtype), FP8Helper.FP8_2X_ACC_FPROP)
if sharding_type in (ShardingType.TP_ROW, ShardingType.DP_TP_ROW):
res = jax.lax.psum(res, tp_axis_name)
# (input_shape_pre, input_shape_suf)
# x (kernel_shape_pre, kernel_shape_suf)
# = (input_shape_pre, kernel_shape_suf)
output_shape = input_shape_pre + kernel_shape_suf
res = jnp.reshape(res, output_shape)
ctx = (input_cast_trans, kernel_cast, fp8_maxs, amax, scale, scale_inv, input_amax, kernel_amax,
inputs.shape, kernel.shape)
return res, ctx
def _fp8_dot_bwd(
fwd_dtype,
bwd_dtype,
contracting_dims, # pylint: disable=unused-argument
sharding_type,
dp_axis_name,
tp_axis_name,
fsdp_axis_name,
ctx,
g):
input_cast_trans, kernel_cast, \
fp8_maxs, amax, scale, scale_inv, \
input_amax, kernel_amax, \
inputs_shape, kernel_shape = ctx
gemm_input_idx, gemm_kernel_idx, gemm_grad_idx = FP8Helper.get_fp8_meta_indices(0)
grad_amax = amax[gemm_grad_idx, 0:1]
grad_scale = scale[gemm_grad_idx]
grad_scale_inv = scale_inv[gemm_grad_idx]
g = jnp.reshape(g, (input_cast_trans.shape[1], -1))
grad_cast, grad_cast_trans, grad_amax = cast_transpose(g, grad_amax, grad_scale, grad_scale_inv,
bwd_dtype)
input_scale_inv = scale_inv[gemm_input_idx]
wgrad = gemm(grad_cast_trans, grad_scale_inv, bwd_dtype,
True, input_cast_trans, input_scale_inv, fwd_dtype, False,
jax_dtype_to_te_dtype(g.dtype), FP8Helper.FP8_2X_ACC_WGRAD)
kernel_scale_inv = scale_inv[gemm_kernel_idx]
dgrad = gemm(kernel_cast, kernel_scale_inv, fwd_dtype, True, grad_cast, grad_scale_inv,
bwd_dtype, False, jax_dtype_to_te_dtype(g.dtype), FP8Helper.FP8_2X_ACC_DGRAD)
amax = amax.at[gemm_input_idx, 0].set(input_amax[0])
amax = amax.at[gemm_kernel_idx, 0].set(kernel_amax[0])
amax = amax.at[gemm_grad_idx, 0].set(grad_amax[0])
if is_dp_enabled(sharding_type.value[0]):
wgrad = jax.lax.psum(wgrad, dp_axis_name)
amax = jax.lax.pmax(amax, dp_axis_name)
if len(fsdp_axis_name) > 0:
wgrad = jax.lax.psum(wgrad, fsdp_axis_name)
amax = jax.lax.pmax(amax, fsdp_axis_name)
if is_tp_enabled(sharding_type.value[0]):
amax = jax.lax.pmax(amax, tp_axis_name)
if sharding_type in (ShardingType.TP_COL, ShardingType.DP_TP_COL):
dgrad = jax.lax.psum(dgrad, tp_axis_name)
dgrad = jnp.reshape(dgrad, inputs_shape)
wgrad = jnp.reshape(wgrad, kernel_shape)
return dgrad, wgrad, fp8_maxs, amax, scale, scale_inv
_fp8_dot.defvjp(_fp8_dot_fwd, _fp8_dot_bwd)
| NVIDIA/TransformerEngine | transformer_engine/jax/dot.py | dot.py | py | 9,710 | python | en | code | 1,056 | github-code | 36 | [
{
"api_name": "jax.config.update",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "jax.config",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "jax.config.update",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "jax.config",
... |
818390269 | """ Simple static vocabularies
"""
from eea.faceted.vocabularies.utils import IVocabularyFactory
from zope.interface import implements
from zope.schema.vocabulary import SimpleVocabulary
from zope.schema.vocabulary import SimpleTerm
from eea.faceted.vocabularies import EEAMessageFactory as _
#
# Use catalog
#
class UseCatalogVocabulary(object):
""" Use catalog vocabulary
"""
implements(IVocabularyFactory)
def __call__(self, context=None):
""" See IVocabularyFactory interface
"""
items = (
SimpleTerm('', '', _('No')),
SimpleTerm('portal_catalog', 'portal_catalog', _('Yes')),
)
return SimpleVocabulary(items)
UseCatalogVocabularyFactory = UseCatalogVocabulary()
#
# JsTree themes
#
class JsTreeThemes(object):
""" Widget position in page
"""
implements(IVocabularyFactory)
def __call__(self, context=None):
items = (
SimpleTerm('default', 'default', _('Default')),
SimpleTerm('classic', 'classic', _('Classic')),
SimpleTerm('apple', 'apple', _('Apple')),
SimpleTerm('green', 'green', _('Green')),
)
return SimpleVocabulary(items)
JsTreeThemesFactory = JsTreeThemes()
| RedTurtle/eea.faceted.vocabularies | eea/faceted/vocabularies/simple.py | simple.py | py | 1,247 | python | en | code | null | github-code | 36 | [
{
"api_name": "zope.interface.implements",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "eea.faceted.vocabularies.utils.IVocabularyFactory",
"line_number": 16,
"usage_type": "argument"
},
{
"api_name": "zope.schema.vocabulary.SimpleTerm",
"line_number": 22,
"u... |
22985633862 | import os
import yaml
class TestConfig:
def get_config(self, property_name: str):
data = self.get_config_data()
if data.get(property_name):
return data.get(property_name)
raise RuntimeError("Incorrect Config")
@staticmethod
def get_config_data():
cur_dir = os.path.dirname(os.path.realpath(__file__))
file_name = "test_config.yaml"
complete_path = os.path.join(cur_dir, file_name)
with open(complete_path, 'r') as file:
data = yaml.safe_load(file)
return data
| muthukrishnanmce/python_selenium_pom | utilities/test_config.py | test_config.py | py | 566 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.dirname",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "os.path.realpath",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"lin... |
25210551689 | #!/usr/bin/env python3
"""Train several models for phone-play detector using synthetic data"""
from os import path
import random
import pickle
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import classification_report
from hmmlearn.hmm import MultinomialHMM
from map_pred import map_pred
from infer import infer
# Load dataset
project_dir = path.dirname(path.dirname(path.abspath(__file__)))
with open(path.join(project_dir, "data/synthetic.npy"), "rb") as f:
dataset = pickle.load(f)
# Split dataset to training set and test set
random.shuffle(dataset)
split_pos = int(len(dataset) * 0.8)
feature_train = np.vstack([d["X"] for d in dataset[:split_pos]])
feature_test = np.vstack([d["X"] for d in dataset[split_pos:]])
target_train = np.hstack([d["label"] for d in dataset[:split_pos]])
target_test = np.hstack([d["label"] for d in dataset[split_pos:]])
# Train scaler
scaler = StandardScaler()
scaler.fit(feature_train)
feature_train = scaler.transform(feature_train)
# Train random forest classifier
clf = RandomForestClassifier()
clf.fit(feature_train, target_train)
# Train HMM
pred_probs = clf.predict_proba(feature_train)[:, 1]
pred_labels = np.array([map_pred(x) for x in pred_probs], dtype=np.int64)
hmm = MultinomialHMM(n_components=2,
startprob_prior=np.array([0.5, 0.5]),
transmat_prior=np.array([
[0.8, 0.2],
[0.2, 0.8],
]))
hmm.fit(pred_labels.reshape(-1, 1))
# Evaluation of the entire procedure
predict_results = infer(feature_test, scaler, clf, hmm)
print(classification_report(target_test, predict_results))
# Save models
pickle.dump(scaler, open(path.join(project_dir, "models/scaler.pkl"), "wb"))
pickle.dump(clf, open(path.join(project_dir, "models/clf.pkl"), "wb"))
pickle.dump(hmm, open(path.join(project_dir, "models/hmm.pkl"), "wb"))
| bxkftechteam/onnx-ml-demo | train/train.py | train.py | py | 1,981 | python | en | code | 9 | github-code | 36 | [
{
"api_name": "os.path.dirname",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "os.path.abspath",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_numb... |
44095373893 | from typing import List
from test_framework import generic_test
import math
def maximum_revenue(coins: List[int]) -> int:
memo = [[None for j in range(len(coins))] for i in range(len(coins))]
def pick(start,end):
if start > end:
return 0
if memo[start][end] != None:
return memo[start][end]
front = coins[start] + min(pick(start+2, end), pick(start+1, end-1))
back = coins[end] + min(pick(start,end-2),pick(start+1,end-1))
memo[start][end] = max(front,back)
return memo[start][end]
return pick(0,len(coins)-1)
if __name__ == '__main__':
exit(
generic_test.generic_test_main('picking_up_coins.py',
'picking_up_coins.tsv',
maximum_revenue))
| kchen1025/Python-EPI | epi_judge_python/picking_up_coins.py | picking_up_coins.py | py | 829 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "typing.List",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "test_framework.generic_test.generic_test_main",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "test_framework.generic_test",
"line_number": 26,
"usage_type": "name"
}
] |
4090267017 | # This code is mostly from here: https://thedatafrog.com/en/articles/word-embedding-sentiment-analysis/
# Adapted for Ling471 by Olga Zamaraeva
# May 2021
import matplotlib.pyplot as plt
import os
import math
# you must install tensorflow version 2.5.0 (the latest one)
import tensorflow as tf
from tensorflow import keras
import numpy as np
from numpy.random import seed
# DEADBEEF is a famous hexadecimal number :). See here: https://en.wikipedia.org/wiki/Deadbeef
seed(0xdeadbeef)
# In order to minimize randomness where we can, we will tell numpy and tensorflow to always start in the same place.
tf.random.set_seed(0xdeadbeef)
def plot_review(idx):
# plot the distribution of points
enc_words = test_data[idx]
emb_words = get_embed_out([enc_words])[0]
plt.figure(figsize=(8, 8))
plt.scatter(emb_words[:, 0], emb_words[:, 1])
# use the label as title: 1 is positive,
# 0 is negative
plt.title(test_labels[idx])
# for words that are far enough from (0,0),
# print the word
for i, (enc_word, emb_word) in enumerate(zip(enc_words, emb_words)):
word = index[enc_word]
x, y = emb_word
if math.sqrt(x**2 + y**2) > 0.2:
plt.annotate(word, (x, y))
# fix the range in x and y to be able to compare
# the distributions of different reviews
axes = plt.gca()
axes.set_xlim([-0.5, 0.5])
axes.set_ylim([-0.5, 0.5])
axes.set_aspect('equal', adjustable='box')
plt.savefig('Review{}.png'.format(idx))
def decode_review(text):
'''converts encoded text to human readable form.
each integer in the text is looked up in the index, and
replaced by the corresponding word.
'''
return ' '.join([index.get(i, '?') for i in text])
def plot_accuracy(history, miny=None):
acc = history.history['accuracy']
test_acc = history.history['val_accuracy']
epochs = range(len(acc))
plt.plot(epochs, acc)
plt.plot(epochs, test_acc)
if miny:
plt.ylim(miny, 1.0)
plt.title('accuracy')
plt.xlabel('epoch')
plt.savefig('accuracy.png')
# Another version of the familiar dataset. It is not a pandas object but a keras object.
imdb = keras.datasets.imdb
num_words = 20000
(train_data, train_labels), (test_data, test_labels) = imdb.load_data(
seed=1, num_words=num_words)
# print(train_data[0])
# print('label:', train_labels[0])
# A dictionary mapping words to an integer index
vocabulary = imdb.get_word_index()
# The first indices are reserved
vocabulary = {k: (v+3) for k, v in vocabulary.items()}
# Smoothing and n-gram support.
vocabulary["<PAD>"] = 0
vocabulary["<START>"] = 1
vocabulary["<UNK>"] = 2 # unknown
vocabulary["<UNUSED>"] = 3
# reversing the vocabulary.
# in the index, the key is an integer,
# and the value is the corresponding word.
index = dict([(value, key) for (key, value) in vocabulary.items()])
# print(decode_review(train_data[0]))
train_data = keras.preprocessing.sequence.pad_sequences(train_data,
value=vocabulary["<PAD>"],
padding='post',
maxlen=256)
test_data = keras.preprocessing.sequence.pad_sequences(test_data,
value=vocabulary["<PAD>"],
padding='post',
maxlen=256)
# print(train_data[1])
model = keras.Sequential()
# the first layer is the embedding layer.
# we indicate the number of possible words,
# the dimension of the embedding space,
# and the maximum size of the text.
model.add(keras.layers.Embedding(len(vocabulary), 2, input_length=256))
# the output of the embedding is multidimensional,
# with shape (256, 2)
# for each word, we obtain two values,
# the x and y coordinates
# we flatten this output to be able to
# use it in a dense layer
model.add(keras.layers.Flatten())
# dropout regularization
model.add(keras.layers.Dropout(rate=0.5))
# small dense layer. It's role is to analyze
# the distribution of points from embedding
model.add(keras.layers.Dense(5))
# final neuron, with sigmoid (similar to logistic function) activation
# for binary classification
model.add(keras.layers.Dense(1, activation='sigmoid'))
# print(model.summary())
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
history = model.fit(train_data,
train_labels,
epochs=5,
batch_size=100,
validation_data=(test_data, test_labels),
verbose=1)
plot_accuracy(history)
# This code will return a resulting embedding given X and Y coordinates of a word.
# We mapped words to 2D!
get_embed_out = keras.backend.function(
[model.layers[0].input],
[model.layers[1].output])
layer_output = get_embed_out(test_data[0])
#print(type(layer_output), len(layer_output), layer_output[0].shape)
words = layer_output[0]
plt.cla()
# [:,0] is so called "slicing" notation in python.
# It means the first column in a matrix, so, all rows in column 0.
plt.scatter(words[:, 0], words[:, 1])
plt.savefig('words.png')
# Plot reviews number 15 and 17:
plot_review(15)
plot_review(17)
# A fake/dummy "test" review. We assume we've seen most of these words in training!
review = ['great', 'brilliant', 'crap', 'bad',
'fantastic', 'movie', 'seagal']
# Use our pretrained encodings to get an encoding for this unseen review:
enc_review = tf.constant([vocabulary[word] for word in review])
# print(enc_review)
words = get_embed_out([enc_review])[0]
plt.cla() # clear the previous plot "canvas"
plt.scatter(words[:, 0], words[:, 1])
# The loop will annotate each point in the 2D plot.
# Note the enumerate() function; it is very convenient.
# It returnes a tuple: an object from the list as well as its index in the list.
# So, at the first iteration txt will be "great" and i will be 0,
# because "great" is the first word in our review.
for i, txt in enumerate(review):
plt.annotate(txt, (words[i, 0], words[i, 1]), ha='center')
plt.savefig('my_review1.png')
# TODO: Write your own "review" similar to above, with some words which interest you,
# how they will map out in the vector space that you trained on IMDB.
# Then uncomment the remaining code and observe what happens.
my_review = []
# enc_review = tf.constant([vocabulary[word] for word in my_review])
# # print(enc_review)
# words = get_embed_out([enc_review])[0]
# plt.cla() # clear the previous plot "canvas"
# plt.scatter(words[:, 0], words[:, 1])
# for i, txt in enumerate(review):
# plt.annotate(txt, (words[i, 0], words[i, 1]), ha='center')
# plt.savefig('my_review2.png')
| sam-testings/Ling471-SP2021-HW5 | imdb_neural.py | imdb_neural.py | py | 6,831 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.random.seed",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "tensorflow.random.set_seed",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "tensorflow.random",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "m... |
24499826950 | from django.shortcuts import render, redirect
from .models import Notes, Teacher ,Student, Pdfbooks, Papers, User, Answer, Post
from .forms import ContributionNoteForm, SignUpForm, ContributionBookForm,SignUpFormFaculty, PostForm, AnswerForm, ContributionPaperForm
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.forms import AuthenticationForm
from django.contrib import messages
from django.shortcuts import get_object_or_404
from django.db.models import F
from django.dispatch import receiver
from django.db.models.signals import post_save
from django.views.generic import CreateView
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.utils import timezone
from django.db.models import Q
from django.http import HttpResponseRedirect
import requests
from bs4 import BeautifulSoup
def home(request, ):
if request.method == 'POST':
form = AuthenticationForm(request=request, data=request.POST)
print(form)
if form.is_valid():
username = form.cleaned_data.get('username')
password = form.cleaned_data.get('password')
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
return redirect('home')
form = AuthenticationForm()
return render(request,"Notes/home.html",{"form":form})
def notes(request):
return render(request, 'Notes/notesyearwise.html')
def notesyrbranch(request ,year, branch):
notes_cs_details = Notes.objects.filter(branch=branch).filter(year=year).order_by('subject')
return render(request,'Notes/notes.html/',{'notes_cs_details':notes_cs_details,'year':year,'branch':branch})
def likes_notes(request, year, branch):
notes=get_object_or_404(Notes, id=request.POST.get('notes_id'))
if request.user in notes.disliked.all():
print("H")
notes.disliked.remove(request.user)
notes.liked.add(request.user)
else:
notes.liked.add(request.user)
return redirect('notesyrbranch' ,year, branch)
def dislikes_notes(request, year, branch):
notes=get_object_or_404(Notes, id=request.POST.get('notes_id'))
if request.user in notes.liked.all():
notes.liked.remove(request.user)
notes.disliked.add(request.user)
else:
notes.disliked.add(request.user)
return redirect('notesyrbranch' ,year, branch)
def books(request):
return render(request,'Notes/bookyearwise.html')
def Notes_form(request):
form = ContributionNoteForm()
if request.method == "POST":
form = ContributionNoteForm(request.POST, request.FILES)
print(form)
if form.is_valid():
notes = form.save(commit=False)
notes.save()
return render(request,'Notes/home.html')
teachers=Teacher.objects.all()
return render(request, 'Notes/contributionform.html', {'form': form, 'teachers': teachers})
class StudentSignupView(CreateView):
model=User
form_class=SignUpForm
template_name='Notes/signup.html'
def form_valid(self,form):
user=form.save()
login(self.request,user)
return redirect('home')
class TeacherSignupView(CreateView):
model=User
form_class=SignUpFormFaculty
template_name='Notes/signupteacher.html'
def form_valid(self,form):
user=form.save()
login(self.request,user)
return redirect('home')
def logout_account(request):
logout(request)
return redirect('home')
def booksyrbranch(request, year, branch):
books_cs_details = Pdfbooks.objects.filter(branch=branch).filter(year=year).order_by('subject')
return render(request,'Notes/books.html/',{'books_cs_details':books_cs_details,'year':year,'branch':branch})
def likes_books(request, year, branch):
books=get_object_or_404(Pdfbooks, id=request.POST.get('books_id'))
if request.user in books.disliked.all():
books.disliked.remove(request.user)
books.liked.add(request.user)
else:
books.liked.add(request.user)
return redirect('booksyrbranch' ,year, branch)
def dislikes_books(request, year, branch):
books=get_object_or_404(Pdfbooks, id=request.POST.get('books_id'))
if request.user in books.liked.all():
books.liked.remove(request.user)
books.disliked.add(request.user)
else:
books.disliked.add(request.user)
return redirect('booksyrbranch' ,year, branch)
def Books_form(request):
if request.method == "POST":
form = ContributionBookForm(request.POST, request.FILES)
if form.is_valid():
books= form.save(commit=False)
books.save()
return redirect('home')
else:
form = ContributionBookForm()
return render(request, 'Notes/contributionbookform.html', {'form': form})
def Papers_form(request):
if request.method == "POST":
form = ContributionPaperForm(request.POST, request.FILES)
if form.is_valid():
papers= form.save(commit=False)
papers.save()
return redirect('home')
else:
form = ContributionPaperForm()
return render(request, 'Notes/contributionpaperform.html', {'form': form})
def papers(request):
return render(request, 'Notes/paperyearwise.html')
def papersyrbranch(request, year, branch):
papers_cs_details = Papers.objects.filter(branch=branch).filter(year=year).order_by('Type_of_upload').order_by('subject')
return render(request,'Notes/papers.html/',{'papers_cs_details':papers_cs_details,'year':year,'branch':branch})
def facultylist(request, branch):
facultydetails = Teacher.objects.filter(Department=branch)
return render(request,'Notes/faculty_list.html/',{'facultydetails':facultydetails,'branch':branch})
def likes_papers(request, year, branch):
papers=get_object_or_404(Papers, id=request.POST.get('papers_id'))
if request.user in papers.disliked.all():
papers.disliked.remove(request.user)
papers.liked.add(request.user)
else:
papers.liked.add(request.user)
return redirect('papersyrbranch' ,year, branch)
def dislikes_papers(request, year, branch):
papers=get_object_or_404(Papers, id=request.POST.get('papers_id'))
if request.user in papers.liked.all():
papers.liked.remove(request.user)
papers.disliked.add(request.user)
else:
papers.disliked.add(request.user)
return redirect('papersyrbranch' ,year, branch)
def announcement(request):
return render(request, 'Notes/announcements.html')
def post_list(request):
post_list = Post.objects.filter(published_date__lte=timezone.now()).order_by('published_date')
query = request.GET.get('q')
print(query)
if query:
que=list(query.split(" "))
print(que)
post_list = Post.objects.filter(
Q(tags__name__in=que)).distinct().order_by('published_date')
print(post_list)
if not post_list:
return render(request,'Notes/discussion_list.html', {'posts':post_list})
paginator = Paginator(post_list,3,allow_empty_first_page=True)
page=request.GET.get('page',1)
posts=paginator.page(page)
return render(request, 'Notes/discussion_list.html', {'posts': posts })
def post_detail(request, pk):
post = get_object_or_404(Post, pk=pk)
if request.method == "POST":
form = AnswerForm(request.POST)
if form.is_valid():
answer = form.save(commit=False)
answer.user=request.user
answer.created_date=timezone.now()
answer.post = post
answer.save()
else:
form = AnswerForm()
return render(request, 'Notes/discussion_detail.html', {'post':post,'form': form,})
def post_new(request):
if request.method == "POST":
form = PostForm(request.POST, request.FILES)
if form.is_valid():
post = form.save(commit=False)
post.user= request.user
post.published_date = timezone.now()
post.save()
form.save_m2m()
return redirect('disccusion_detail', pk=post.pk)
else:
form = PostForm()
return render(request, 'Notes/ask_doubt.html', {'form': form})
def post_edit(request, pk):
post = get_object_or_404(Post, pk=pk)
if request.method == "POST":
form = PostForm(request.POST, instance=post)
if form.is_valid():
post = form.save(commit=False)
post.author = request.user
post.published_date = timezone.now()
post.save()
return redirect('post_detail', pk=post.pk)
else:
form = PostForm(instance=post)
return render(request, 'Notes/discussion_edit.html', {'form': form})
def add_answer_to_post(request, pk):
if request.method == "POST":
form = AnswerForm(request.POST)
if form.is_valid():
answer = form.cleaned_data['text']
print(request.user)
print("Hello")
answer.user=request.user
answer.created_date=timezone.now()
ost = get_object_or_404(Post, pk=pk)
answer.post = post
answer.save()
return HttpResponseRedirect('reverse("Notes.views.add_answer_to_post")')
else:
form = AnswerForm()
return render(request, 'Notes/discussion_detail', {'form': form})
def likes_answer(request, pk):
answer=get_object_or_404(Answer, id=request.POST.get('answer_id'))
if request.user in answer.disliked.all():
answer.disliked.remove(request.user)
answer.liked.add(request.user)
else:
answer.liked.add(request.user)
return redirect('disccusion_detail' ,pk)
def dislikes_answer(request, pk):
answer=get_object_or_404(Answer, id=request.POST.get('answer_id'))
if request.user in answer.liked.all():
answer.liked.remove(request.user)
answer.disliked.add(request.user)
else:
answer.disliked.add(request.user)
return redirect('disccusion_detail' ,pk)
def news(request):
url = ('http://newsapi.org/v2/top-headlines?country=in&category=technology&apiKey=a9e121ac903943919a0490ef5a38ef75')
response = requests.get(url).json()
author=[]
title=[]
description=[]
urls=[]
publishedAt=[]
for i in response:
if i=="articles":
for j in response[i]:
for z, p in j.items():
if z=="author":
author.append(p)
elif z=="title":
title.append(p)
elif z=="description":
description.append(p)
elif z=="url":
urls.append(p)
elif z=="publishedAt":
publishedAt.append(p[:10])
context={}
zipped=zip(author, title, description, urls, publishedAt)
context['zipped_data']=zipped
print(author)
return render(request, 'Notes/news.html',context)
| TanuAgrawal123/StudyApp | Notes/views.py | views.py | py | 10,440 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "django.contrib.auth.forms.AuthenticationForm",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.authenticate",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.login",
"line_number": 29,
"usage_type"... |
19780157392 | from torch_geometric.data import InMemoryDataset
import os.path as osp
import torch
from tqdm import tqdm
import argparse
import os, sys
sys.path.append('./data/graph_construction/prepare_notes')
from ConstructDatasetByNotes import *
IMDB_PATH = './data/IMDB_HCUT' # path to save output hypergraphs
PRE_PATH = './data/DATA_PRE'
RAW_PATH = './data/DATA_RAW'
class PygNotesGraphDataset(InMemoryDataset):
def __init__(self, name, split, tokenizer, dictionary, data_type='hyper', transform=None, pre_transform=None):
self.imdb_path = osp.join(IMDB_PATH, name)
self.name = name # in-hospital-mortality
self.split = split # train / test
self.tokenizer= tokenizer
self.dictionary = dictionary
self.data_type = data_type
self.pre_path = osp.join(PRE_PATH)
super(PygNotesGraphDataset, self).__init__(osp.join(self.imdb_path,f'{self.split}_{self.data_type}'), transform, pre_transform)
self.data, self.slices = torch.load(osp.join(self.processed_dir, self.processed_file_names))
@property
def raw_file_names(self):
return []
@property
def processed_dir(self):
return osp.join(self.imdb_path)
@property
def processed_file_names(self):
if self.data_type == 'hyper':
return f'{self.split}_{self.data_type}/{self.split}_{self.data_type}.pt'
else:
return f'{self.split}.pt'
def process(self):
# construct graph by note list.
cdbn = ConstructDatasetByNotes(pre_path=self.pre_path, split=self.split, dictionary=self.dictionary, task=self.name)
# create categories.txt
if self.split == 'train':
cdbn.create_all_cats(path=self.pre_path)
print("categories.txt created")
if self.data_type == 'hyper':
print("Data Type : hyper")
data_list = cdbn.construct_hypergraph_datalist()
else:
ValueError('error type, must be one of {cooc, hyper}...')
print('\n'+'<Collate Data List...>')
data, slices = self.collate(data_list)
print('\n'+'<Collate Done, Start Saving...>')
torch.save((data, slices), osp.join(self.processed_dir, self.processed_file_names)) # '/IMDB/in-hospital-mortality' + 'train.pt'
print('Saving Done')
print('<<Created', self.split, 'Cutoff HyperGraph Dataset from Note List>>')
class Load_PygNotesGraphDataset(InMemoryDataset):
def __init__(self, name, split, tokenizer, dictionary, data_type='hyper', transform=None, pre_transform=None):
self.imdb_path = osp.join(IMDB_PATH, name)
self.name = name # in-hospital-mortality
self.split = split # train / test
self.tokenizer= tokenizer
self.dictionary = dictionary
self.data_type = data_type
self.pre_path = osp.join(PRE_PATH)
super(Load_PygNotesGraphDataset, self).__init__(self.imdb_path, transform, pre_transform)
self.data, self.slices = torch.load(osp.join(self.processed_dir, self.processed_file_names))
@property
def raw_file_names(self):
return []
@property
def processed_dir(self):
return osp.join(self.imdb_path)
@property
def processed_file_names(self):
if self.data_type == 'hyper':
return f'{self.split}_{self.data_type}/{self.split}_{self.data_type}.pt'
else:
return f'{self.split}.pt'
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Create data for in-hospital mortality prediction task.")
parser.add_argument('--name', type=str, default='in-hospital-mortality')
parser.add_argument('--pre_path', type=str, default=PRE_PATH)
parser.add_argument('--split', type=str, default='train')
parser.add_argument('--action', type=str, default='create')
args, _ = parser.parse_known_args()
# vocabs generated from benchmark codes
dictionary = open(os.path.join('./data/DATA_RAW/root/vocab.txt')).read().split()
if args.action == 'create':
PygNotesGraphDataset(name='in-hospital-mortality', split=args.split, tokenizer='word2vec', dictionary=dictionary)
else:
NotImplementedError('error action!')
| ny1031/TM-HGNN | graph_construction/prepare_notes/PygNotesGraphDataset.py | PygNotesGraphDataset.py | py | 4,404 | python | en | code | 7 | github-code | 36 | [
{
"api_name": "sys.path.append",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "torch_geometric.data.InMemoryDataset",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "os.p... |
2465730198 | import copy
import cv2
import glob
import os
import pathlib
import sys
import time
import xml
import xml.etree.ElementTree as ET
from . import utils
from .. import procs
# Add import path for submodules
currentdir = pathlib.Path(__file__).resolve().parent
sys.path.append(str(currentdir) + "/../../submodules/separate_pages_ssd")
sys.path.append(str(currentdir) + "/../../submodules/ndl_layout")
sys.path.append(str(currentdir) + "/../../submodules/deskew_HT")
sys.path.append(str(currentdir) + "/../../submodules/text_recognition_lightning")
sys.path.append(str(currentdir) + "/../../submodules/reading_order")
# supported image type list
supported_img_ext = ['.jpg', '.jpeg', '.jp2']
class OcrInferrer:
"""
推論実行時の関数や推論の設定値を保持します。
Attributes
----------
full_proc_list : list
全推論処理のリストです。
proc_list : list
本実行処理における推論処理のリストです。
cfg : dict
本実行処理における設定情報です。
"""
def __init__(self, cfg):
"""
Parameters
----------
cfg : dict
本実行処理における設定情報です。
"""
# inference process class list in order
self.full_proc_list = [
procs.PageSeparation, # 0: ノド元分割 出力:(画像:あり、XML:なし、TXT:なし)
procs.PageDeskewProcess, # 1: 傾き補正 出力:(画像:あり、XML:なし、TXT:なし)
procs.LayoutExtractionProcess, # 2: レイアウト抽出 出力:(画像:あり、XML:あり、TXT:なし)
procs.LineOcrProcess, # 3: 文字認識(OCR) 出力:(画像:あり、XML:あり、TXT:あり)
]
self.proc_list = self._create_proc_list(cfg)
self.cfg = cfg
self.total_time_statistics = []
self.proc_time_statistics = {}
for proc in self.proc_list:
self.proc_time_statistics[proc.proc_name] = []
self.xml_template = '<?xml version="1.0" encoding="utf-8" standalone="yes"?>\n<OCRDATASET></OCRDATASET>'
def run(self):
"""
self.cfgに保存された設定に基づいた推論処理を実行します。
"""
if len(self.cfg['input_dirs']) == 0:
print('[ERROR] Input directory list is empty', file=sys.stderr)
return
# input dir loop
for input_dir in self.cfg['input_dirs']:
if self.cfg['input_structure'] in ['t']:
single_outputdir_data_list = self._get_single_dir_data_from_tosho_data(input_dir)
else:
single_outputdir_data_list = self._get_single_dir_data(input_dir)
if single_outputdir_data_list is None:
print('[ERROR] Input data list is empty', file=sys.stderr)
continue
print(single_outputdir_data_list)
# do infer with input data for single output data dir
for single_outputdir_data in single_outputdir_data_list:
if single_outputdir_data is None:
continue
if self.cfg['ruby_only']:
pred_list = self._infer_ruby_only(single_outputdir_data)
else:
pred_list = self._infer(single_outputdir_data)
# save inferenced xml in xml directory
if (self.cfg['save_xml'] or self.cfg['partial_infer']) and (self.cfg['proc_range']['end'] > 1):
self._save_pred_xml(single_outputdir_data['output_dir'], [single_data['xml'] for single_data in pred_list], self.cfg['line_order'])
if len(self.total_time_statistics) == 0:
print('================== NO VALID INFERENCE ==================')
else:
print('================== PROCESSING TIME ==================')
for proc_name, proc_time_list in self.proc_time_statistics.items():
proc_averaege = sum(proc_time_list) / len(proc_time_list)
print(f'Average processing time ({proc_name})'.ljust(45, ' ') + f': {proc_averaege:8.4f} sec / image file ')
total_average = sum(self.total_time_statistics) / len(self.total_time_statistics)
print(f'Average processing time (total)'.ljust(45, ' ') + f': {total_average:8.4f} sec / image file ')
return
def _infer_ruby_only(self, single_outputdir_data):
"""
self.cfgに保存された設定に基づき、XML一つ分のデータに対するルビ推定処理を実行します。
Parameters
----------
single_outputdir_data : dict
XML一つ分のデータ(基本的に1書籍分を想定)の入力データ情報。
入力となるXMLデータを含みます。
Returns
-------
pred_list : list
1ページ分の推論結果を要素に持つ推論結果のリスト。
各結果は辞書型で保持されています。
"""
# single_outputdir_data dictionary include [key, value] pairs as below
# [key, value]: ['img', None], ['xml', xml_tree]
pred_list = []
pred_xml_dict_for_dump = {}
for page_idx, page_xml in enumerate(single_outputdir_data['xml'].findall('PAGE')):
single_image_file_data = self._get_single_image_file_data(page_idx, single_outputdir_data)
if single_image_file_data is None:
print('[ERROR] Failed to get single page input data.')
continue
print('######## START PAGE INFERENCE PROCESS ########')
start_page = time.time()
for proc in self.proc_list:
start_proc = time.time()
single_page_output = []
for idx, single_data_input in enumerate(single_image_file_data):
single_data_output = proc.do(idx, single_data_input)
single_page_output.extend(single_data_output)
single_image_file_data = single_page_output
self.proc_time_statistics[proc.proc_name].append(time.time() - start_proc)
single_image_file_output = single_image_file_data
self.total_time_statistics.append(time.time() - start_page)
# save inferenced result text for this page
sum_main_txt = ''
sum_cap_txt = ''
sum_ruby_txt = ''
# check if xml output for this image is vertical text
vertical_text_page = 0
for single_data_output in single_image_file_output:
if self._is_vertical_text_xml(single_data_output['xml']):
vertical_text_page += 1
# reverse order of page if all pages are vertical text
single_image_file_output_for_txt = single_image_file_output
if vertical_text_page >= len(single_image_file_output):
single_image_file_output_for_txt = list(reversed(single_image_file_output))
for single_data_output in single_image_file_output_for_txt:
main_txt, cap_txt = self._create_result_txt(single_data_output['xml'])
sum_main_txt += main_txt + '\n'
sum_cap_txt += sum_cap_txt + '\n'
sum_ruby_txt += single_data_output['ruby_txt'] + '\n'
self._save_pred_txt(sum_main_txt, sum_cap_txt, sum_ruby_txt, page_xml.attrib['IMAGENAME'], single_outputdir_data['output_dir'])
# add inference result for single image file data to pred_list, including XML data
pred_list.extend(single_image_file_output)
print('######## END PAGE INFERENCE PROCESS ########')
return pred_list
def _infer(self, single_outputdir_data):
"""
self.cfgに保存された設定に基づき、XML一つ分のデータに対する推論処理を実行します。
Parameters
----------
single_outputdir_data : dict
XML一つ分のデータ(基本的に1書籍分を想定)の入力データ情報。
画像ファイルパスのリスト、それらに対応するXMLデータを含みます。
Returns
-------
pred_list : list
1ページ分の推論結果を要素に持つ推論結果のリスト。
各結果は辞書型で保持されています。
"""
# single_outputdir_data dictionary include [key, value] pairs as below
# (xml is not always included)
# [key, value]: ['img', numpy.ndarray], ['xml', xml_tree]
pred_list = []
pred_xml_dict_for_dump = {}
if self.cfg['dump']:
dump_dir = os.path.join(single_outputdir_data['output_dir'], 'dump')
os.makedirs(dump_dir, exist_ok=True)
for proc in self.proc_list:
pred_xml_dict_for_dump[proc.proc_name] = []
proc_dump_dir = os.path.join(dump_dir, proc.proc_name)
os.makedirs(proc_dump_dir, exist_ok=True)
for img_path in single_outputdir_data['img_list']:
single_image_file_data = self._get_single_image_file_data(img_path, single_outputdir_data)
output_dir = single_outputdir_data['output_dir']
if single_image_file_data is None:
print('[ERROR] Failed to get single page input data for image:{0}'.format(img_path), file=sys.stderr)
continue
print('######## START PAGE INFERENCE PROCESS ########')
start_page = time.time()
for proc in self.proc_list:
start_proc = time.time()
single_page_output = []
for idx, single_data_input in enumerate(single_image_file_data):
single_data_output = proc.do(idx, single_data_input)
single_page_output.extend(single_data_output)
# save inference result data to dump
if self.cfg['dump'] and 'xml' in single_image_file_data[0].keys():
pred_xml_dict_for_dump[proc.proc_name].append(single_image_file_data[0]['xml'])
single_image_file_data = single_page_output
self.proc_time_statistics[proc.proc_name].append(time.time() - start_proc)
single_image_file_output = single_image_file_data
self.total_time_statistics.append(time.time() - start_page)
if self.cfg['save_image'] or self.cfg['partial_infer']:
# save inferenced result drawn image in pred_img directory
for single_data_output in single_image_file_output:
# save input image while partial inference
if self.cfg['partial_infer']:
img_output_dir = os.path.join(output_dir, 'img')
self._save_image(single_data_output['img'], single_data_output['img_file_name'], img_output_dir)
pred_img = self._create_result_image(single_data_output, self.proc_list[-1].proc_name)
img_output_dir = os.path.join(output_dir, 'pred_img')
self._save_image(pred_img, single_data_output['img_file_name'], img_output_dir)
# save inferenced result text for this page
if self.cfg['proc_range']['end'] > 2:
sum_main_txt = ''
sum_cap_txt = ''
sum_ruby_txt = None
if self.cfg['ruby_read']:
sum_ruby_txt = ''
# check if xml output for this image is vertical text
vertical_text_page = 0
for single_data_output in single_image_file_output:
if self._is_vertical_text_xml(single_data_output['xml']):
vertical_text_page += 1
# reverse order of page if it's vertical text
single_image_file_output_for_txt = single_image_file_output
if vertical_text_page >= len(single_image_file_output):
single_image_file_output_for_txt = list(reversed(single_image_file_output))
for single_data_output in single_image_file_output_for_txt:
main_txt, cap_txt = self._create_result_txt(single_data_output['xml'])
sum_main_txt += main_txt + '\n'
sum_cap_txt += sum_cap_txt + '\n'
if self.cfg['ruby_read']:
sum_ruby_txt += single_data_output['ruby_txt'] + '\n'
self._save_pred_txt(sum_main_txt, sum_cap_txt, sum_ruby_txt, os.path.basename(img_path), single_outputdir_data['output_dir'])
# add inference result for single image file data to pred_list, including XML data
pred_list.extend(single_image_file_output)
print('######## END PAGE INFERENCE PROCESS ########')
return pred_list
def _get_single_dir_data(self, input_dir):
"""
XML一つ分の入力データに関する情報を整理して取得します。
Parameters
----------
input_dir : str
XML一つ分の入力データが保存されているディレクトリパスです。
Returns
-------
# Fixme
single_dir_data : dict
XML一つ分のデータ(基本的に1PID分を想定)の入力データ情報です。
画像ファイルパスのリスト、それらに対応するXMLデータを含みます。
"""
single_dir_data = {'input_dir': os.path.abspath(input_dir)}
single_dir_data['img_list'] = []
# get img list of input directory
if not self.cfg['ruby_only']:
if self.cfg['input_structure'] in ['w']:
for ext in supported_img_ext:
single_dir_data['img_list'].extend(sorted(glob.glob(os.path.join(input_dir, '*{0}'.format(ext)))))
elif self.cfg['input_structure'] in ['f']:
stem, ext = os.path.splitext(os.path.basename(input_dir))
if ext in supported_img_ext:
single_dir_data['img_list'] = [input_dir]
else:
print('[ERROR] This file is not supported type : {0}'.format(input_dir), file=sys.stderr)
elif not os.path.isdir(os.path.join(input_dir, 'img')):
print('[ERROR] Input img diretctory not found in {}'.format(input_dir), file=sys.stderr)
return None
else:
for ext in supported_img_ext:
single_dir_data['img_list'].extend(sorted(glob.glob(os.path.join(input_dir, 'img/*{0}'.format(ext)))))
# check xml file number and load xml data if needed
if (self.cfg['proc_range']['start'] > 2) or self.cfg['ruby_only']:
if self.cfg['input_structure'] in ['f']:
print('[ERROR] Single image file input mode does not support partial inference wich need xml file input.', file=sys.stderr)
return None
input_xml = None
xml_file_list = glob.glob(os.path.join(input_dir, 'xml/*.xml'))
if len(xml_file_list) > 1:
print('[ERROR] Input xml file must be only one, but there is {0} xml files in {1}.'.format(
len(xml_file_list), os.path.join(self.cfg['input_root'], 'xml')), file=sys.stderr)
return None
elif len(xml_file_list) == 0:
print('[ERROR] There is no input xml files in {0}.'.format(os.path.join(input_dir, 'xml')), file=sys.stderr)
return None
else:
input_xml = xml_file_list[0]
try:
single_dir_data['xml'] = ET.parse(input_xml)
except xml.etree.ElementTree.ParseError as err:
print("[ERROR] XML parse error : {0}".format(input_xml), file=sys.stderr)
return None
# prepare output dir for inferensce result with this input dir
if self.cfg['input_structure'] in ['f']:
stem, ext = os.path.splitext(os.path.basename(input_dir))
output_dir = os.path.join(self.cfg['output_root'], stem)
elif self.cfg['input_structure'] in ['i', 's']:
dir_name = os.path.basename(input_dir)
output_dir = os.path.join(self.cfg['output_root'], dir_name)
elif self.cfg['input_structure'] in ['w']:
input_dir_names = input_dir.split('/')
dir_name = input_dir_names[-3][0] + input_dir_names[-2] + input_dir_names[-1]
output_dir = os.path.join(self.cfg['output_root'], dir_name)
else:
print('[ERROR] Unexpected input directory structure type: {}.'.format(self.cfg['input_structure']), file=sys.stderr)
return None
# output directory existence check
output_dir = utils.mkdir_with_duplication_check(output_dir)
single_dir_data['output_dir'] = output_dir
return [single_dir_data]
def _get_single_dir_data_from_tosho_data(self, input_dir):
"""
XML一つ分の入力データに関する情報を整理して取得します。
Parameters
----------
input_dir : str
tosho data形式のセクションごとのディレクトリパスです。
Returns
-------
single_dir_data_list : list
XML一つ分のデータ(基本的に1PID分を想定)の入力データ情報のリストです。
1つの要素に画像ファイルパスのリスト、それらに対応するXMLデータを含みます。
"""
if self.cfg['ruby_only']:
print("[ERROR] tosho_data input mode doesn't support ruby_only mode.", file=sys.stderr)
return None
single_dir_data_list = []
# get img list of input directory
tmp_img_list = sorted(glob.glob(os.path.join(input_dir, '*.jp2')))
tmp_img_list.extend(sorted(glob.glob(os.path.join(input_dir, '*.jpg'))))
pid_list = []
for img in tmp_img_list:
pid = os.path.basename(img).split('_')[0]
if pid not in pid_list:
pid_list.append(pid)
for pid in pid_list:
single_dir_data = {'input_dir': os.path.abspath(input_dir),
'img_list': [img for img in tmp_img_list if os.path.basename(img).startswith(pid)]}
# prepare output dir for inferensce result with this input dir
output_dir = os.path.join(self.cfg['output_root'], pid)
# output directory existance check
os.makedirs(output_dir, exist_ok=True)
single_dir_data['output_dir'] = output_dir
single_dir_data_list.append(single_dir_data)
return single_dir_data_list
def _get_single_image_file_data(self, img_path, single_dir_data):
"""
1ページ分の入力データに関する情報を整理して取得します。
Parameters
----------
img_path : str
入力画像データのパスです。
single_dir_data : dict
1書籍分の入力データに関する情報を保持する辞書型データです。
xmlファイルへのパス、結果を出力するディレクトリのパスなどを含みます。
Returns
-------
single_image_file_data : dict
1ページ分のデータの入力データ情報です。
画像ファイルのパスとnumpy.ndarray形式の画像データ、その画像に対応するXMLデータを含みます。
"""
single_image_file_data = [{
'img_path': img_path,
'img_file_name': os.path.basename(img_path) if isinstance(img_path, str) else None,
'output_dir': single_dir_data['output_dir']
}]
full_xml = None
if 'xml' in single_dir_data.keys():
full_xml = single_dir_data['xml']
# get img data for single page
if isinstance(img_path, str):
orig_img = cv2.imread(img_path)
if orig_img is None:
print('[ERROR] Image read error : {0}'.format(img_path), file=sys.stderr)
return None
single_image_file_data[0]['img'] = orig_img
# return if this proc needs only img data for input
if full_xml is None:
return single_image_file_data
if self.cfg['ruby_only']:
tmp_idx = 0
for page in full_xml.getroot().iter('PAGE'):
if tmp_idx == img_path:
node = ET.fromstring(self.xml_template)
node.append(page)
tree = ET.ElementTree(node)
single_image_file_data[0]['xml'] = tree
break
tmp_idx += 1
return single_image_file_data
# get xml data for single page
image_name = os.path.basename(img_path)
for page in full_xml.getroot().iter('PAGE'):
if page.attrib['IMAGENAME'] == image_name:
node = ET.fromstring(self.xml_template)
node.append(page)
tree = ET.ElementTree(node)
single_image_file_data[0]['xml'] = tree
break
if 'xml' not in single_image_file_data[0].keys():
print('[ERROR] Input PAGE data for page {} not found in XML data.'.format(img_path), file=sys.stderr)
return None
return single_image_file_data
def _create_proc_list(self, cfg):
"""
推論の設定情報に基づき、実行する推論処理のリストを作成します。
Parameters
----------
cfg : dict
推論実行時の設定情報を保存した辞書型データ。
"""
if cfg['ruby_only']:
return [procs.RubyReadingProcess(cfg, 'ex2')]
proc_list = []
for i in range(cfg['proc_range']['start'], cfg['proc_range']['end'] + 1):
proc_list.append(self.full_proc_list[i](cfg, i))
if cfg['line_order']:
if cfg['proc_range']['end'] <= 2:
print('[WARNING] LineOrderProcess will be skipped(this process needs LineOcrProcess output).')
else:
proc_list.append(procs.LineOrderProcess(cfg, 'ex1'))
if cfg['ruby_read']:
if cfg['proc_range']['end'] <= 2 or not cfg['line_order']:
print('[WARNING] RubyReadingProcess will be skipped(this process needs LineOrderProcess output).')
else:
proc_list.append(procs.RubyReadingProcess(cfg, 'ex2'))
if cfg['line_attribute']['add_title_author']:
if cfg['proc_range']['end'] <= 2:
print('[WARNING] LineAttributeProcess will be skipped(this process needs LineOcrProcess output).')
else:
proc_list.append(procs.LineAttributeProcess(cfg, 'ex3'))
return proc_list
def _save_pred_xml(self, output_dir, pred_list, sorted):
"""
推論結果のXMLデータをまとめたXMLファイルを生成して保存します。
Parameters
----------
output_dir : str
推論結果を保存するディレクトリのパスです。
pred_list : list
1ページ分の推論結果を要素に持つ推論結果のリスト。
各結果は辞書型で保持されています。
sorted : bool
読み順認識が実行されたかどうかのフラグ。
"""
xml_dir = os.path.join(output_dir, 'xml')
os.makedirs(xml_dir, exist_ok=True)
# basically, output_dir is supposed to be PID, so it used as xml filename
if sorted:
xml_path = os.path.join(xml_dir, '{}.sorted.xml'.format(os.path.basename(output_dir)))
else:
xml_path = os.path.join(xml_dir, '{}.xml'.format(os.path.basename(output_dir)))
pred_xml = self._parse_pred_list_to_save(pred_list)
utils.save_xml(pred_xml, xml_path)
return
def _save_image(self, pred_img, orig_img_name, img_output_dir, id=''):
"""
指定されたディレクトリに画像データを保存します。
画像データは入力に使用したものと推論結果を重畳したものの2種類が想定されています。
Parameters
----------
pred_img : numpy.ndarray
保存する画像データ。
orig_img_name : str
もともとの入力画像のファイル名。
基本的にはこのファイル名と同名で保存します。
img_output_dir : str
画像ファイルの保存先のディレクトリパス。
id : str
もともとの入力画像のファイル名に追加する処理結果ごとのidです。
一つの入力画像から複数の画像データが出力される処理がある場合に必要になります。
"""
os.makedirs(img_output_dir, exist_ok=True)
stem, ext = os.path.splitext(orig_img_name)
orig_img_name = stem + '.jpg'
if id != '':
stem, ext = os.path.splitext(orig_img_name)
orig_img_name = stem + '_' + id + ext
img_path = os.path.join(img_output_dir, orig_img_name)
try:
cv2.imwrite(img_path, pred_img)
except OSError as err:
print("[ERROR] Image save error: {0}".format(err), file=sys.stderr)
raise OSError
return
def _save_pred_txt(self, main_txt, cap_txt, ruby_txt, orig_img_name, output_dir):
"""
指定されたディレクトリに推論結果のテキストデータを保存します。
Parameters
----------
main_txt : str
本文+キャプションの推論結果のテキストデータです
cap_txt : str
キャプションのみの推論結果のテキストデータです
ruby_txt : str
ルビのみの推論結果のテキストデータです
orig_img_name : str
もともとの入力画像ファイル名。
基本的にはこのファイル名と同名で保存します。
img_output_dir : str
画像ファイルの保存先のディレクトリパス。
"""
txt_dir = os.path.join(output_dir, 'txt')
os.makedirs(txt_dir, exist_ok=True)
stem, _ = os.path.splitext(orig_img_name)
txt_path = os.path.join(txt_dir, stem + '_cap.txt')
try:
with open(txt_path, 'w') as f:
f.write(cap_txt)
except OSError as err:
print("[ERROR] Caption text save error: {0}".format(err), file=sys.stderr)
raise OSError
stem, _ = os.path.splitext(orig_img_name)
txt_path = os.path.join(txt_dir, stem + '_main.txt')
try:
with open(txt_path, 'w') as f:
f.write(main_txt)
except OSError as err:
print("[ERROR] Main text save error: {0}".format(err), file=sys.stderr)
raise OSError
if ruby_txt is not None:
stem, _ = os.path.splitext(orig_img_name)
txt_path = os.path.join(txt_dir, stem + '_ruby.txt')
try:
with open(txt_path, 'w') as f:
f.write(ruby_txt)
except OSError as err:
print("[ERROR] Ruby text save error: {0}".format(err), file=sys.stderr)
raise OSError
return
def _parse_pred_list_to_save(self, pred_list):
"""
推論結果のXMLを要素に持つリストから、ファイルに保存するための一つのXMLデータを生成します。
Parameters
----------
pred_list : list
推論結果のXMLを要素に持つリスト。
"""
ET.register_namespace('', 'NDLOCRDATASET')
node = ET.fromstring(self.xml_template)
for single_xml_tree in pred_list:
root = single_xml_tree.getroot()
for element in root:
node.append(element)
tree = ET.ElementTree(node)
return tree
def _create_result_image(self, result, proc_name):
"""
推論結果を入力画像に重畳した画像データを生成します。
Parameters
----------
result : dict
1ページ分の推論結果を持つ辞書型データ。
proc_name : str
重畳を行う結果を出力した推論処理の名前。
"""
if 'dump_img' in result.keys():
dump_img = copy.deepcopy(result['dump_img'])
else:
dump_img = copy.deepcopy(result['img'])
if 'xml' in result.keys() and result['xml'] is not None:
# draw inference result on input image
cv2.putText(dump_img, proc_name, (0, 50),
cv2.FONT_HERSHEY_PLAIN, 4, (0, 0, 0), 5, cv2.LINE_AA)
pass
else:
cv2.putText(dump_img, proc_name, (0, 50),
cv2.FONT_HERSHEY_PLAIN, 4, (0, 0, 0), 5, cv2.LINE_AA)
return dump_img
def _create_result_txt(self, xml_data):
"""
推論結果のxmlデータからテキストデータを生成します。
Parameters
----------
xml_data :
1ページ分の推論結果を持つxmlデータ。
"""
main_txt = ''
cap_txt = ''
for page_xml in xml_data.iter('PAGE'):
for line_xml in page_xml.iter('LINE'):
main_txt += line_xml.attrib['STRING']
main_txt += '\n'
if line_xml.attrib['TYPE'] == 'キャプション':
cap_txt += line_xml.attrib['STRING']
cap_txt += '\n'
return main_txt, cap_txt
def _is_vertical_text_xml(self, xml_data):
"""
与えられたxmlデータが縦書きテキストかどうか判定します
Parameters
----------
xml_data :
1ページ分の推論結果を持つxmlデータ。
"""
all_line_num = 0
vertical_line_num = 0
for page_xml in xml_data.iter('PAGE'):
for line_xml in page_xml.iter('LINE'):
w = int(line_xml.attrib['WIDTH'])
h = int(line_xml.attrib['HEIGHT'])
if w < h:
vertical_line_num += 1
all_line_num += 1
return (all_line_num / 2) < vertical_line_num
| ndl-lab/ndlocr_cli | cli/core/inference.py | inference.py | py | 30,875 | python | ja | code | 325 | github-code | 36 | [
{
"api_name": "pathlib.Path",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "sys.path.append",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "sys.path.append",
"lin... |
28144596558 | """Script for picking certain number of sampels."""
from argparse import ArgumentParser
from streaming import StreamingDataset, MDSWriter
def parse_args():
args = ArgumentParser()
args.add_argument('--input_dir', type=str, required=True)
args.add_argument('--output_dir', type=str, required=True)
args.add_argument('--compression', type=str, default='zstd:16')
args.add_argument('--hashes', type=str, default='sha1,xxh3_64')
args.add_argument('--size_limit', type=int, default=1 << 26)
args.add_argument('--num_examples_to_pick', type=int, default=10000)
return args.parse_args()
def main(args):
dataset = StreamingDataset(local=args.input_dir, shuffle=False)
columns = {
'input_ids': 'bytes',
'input_mask': 'bytes',
'segment_ids': 'bytes',
'masked_lm_positions': 'bytes',
'masked_lm_ids': 'bytes',
'masked_lm_weights': 'bytes',
'next_sentence_labels': 'bytes',
}
hashes = args.hashes.split(',') if args.hashes else []
with MDSWriter(args.output_dir, columns, args.compression, hashes, args.size_limit) as writer:
pick_ratio = dataset.index.total_samples / args.num_examples_to_pick
for i in range(args.num_examples_to_pick):
sample = dataset[int(i * pick_ratio)]
writer.write(sample)
if __name__ == '__main__':
main(parse_args())
| sophiawisdom/streaming | streaming/text/convert/enwiki/mds/pick_eval_samples.py | pick_eval_samples.py | py | 1,392 | python | en | code | null | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "streaming.StreamingDataset",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "streaming.MDSWriter",
"line_number": 31,
"usage_type": "call"
}
] |
8343985378 | from typing import Dict, Iterator, NewType, Tuple
FILEPATH = "data/data_05.txt"
OverlapedPoints = NewType('OverlapedPoints', Dict[Tuple[int, int], int])
def read_lines(filepath: str) -> Iterator:
with open(filepath, 'r') as fp:
for line in fp.readlines():
start, end = line.split(' -> ')
x1, y1 = [int(i) for i in start.split(',')]
x2, y2 = [int(i) for i in end.split(',')]
yield x1, y1, x2, y2
def add_points(matrix: OverlapedPoints, x1: int, y1: int, x2: int, y2: int) -> OverlapedPoints:
if x1 != x2 and y1 == y2:
for iter_x in range(min(x1, x2), max(x1, x2) + 1):
matrix[(iter_x, y1)] = matrix.get((iter_x, y1), 0) + 1
elif y1 != y2 and x1 == x2:
for iter_y in range(min(y1, y2), max(y1, y2) + 1):
matrix[(x1, iter_y)] = matrix.get((x1, iter_y), 0) + 1
else:
# Deals with diagonals for star 2
x_range = list(range(min(x1, x2), max(x1, x2) + 1))
y_range = list(range(min(y1, y2), max(y1, y2) + 1))
if x1 > x2:
x_range.reverse()
if y1 > y2:
y_range.reverse()
for (x, y) in zip(x_range, y_range):
matrix[(x, y)] = matrix.get((x, y), 0) + 1
return matrix
if __name__ == "__main__":
overlap: OverlapedPoints = OverlapedPoints({})
for points in read_lines(FILEPATH):
overlap = add_points(overlap, *points)
counter = len([item for item in overlap.values() if item >= 2])
print(counter)
| fabiolab/adventOfCode2021 | day05.py | day05.py | py | 1,515 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "typing.NewType",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "typing.Dict",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "typing.Iterator",
"line_numb... |
72177096103 |
import argparse
from utils.wandb import Wandb
from utils.config import config
import os, sys, time, shortuuid, pathlib, json, logging, os.path as osp
import torch
import numpy as np
import pandas as pd
from sklearn.utils import shuffle
import random
import torch.nn as nn
import torch.nn.functional as F
import os
sys.path.insert(1, '/home/lbrahiea/OHrate')
from ipynb.fs.full.Utils import set_seed
from ipynb.fs.full.Model import build_model, model_initialize
from ipynb.fs.full.Data_loader import make_data_loader, Scaler, map_to_scale
from ipynb.fs.full.Get_fingerprints import get_rdkit_fingerprint, get_cp_fingerprint
from ipynb.fs.full.Evaluate import split_data
from sklearn.utils import shuffle
from train_model_wandb2 import train_model
from sklearn.model_selection import KFold
from torch.utils.tensorboard import SummaryWriter
import wandb
hyperparameter_defaults = dict(
n_ensamble = 0,
lr = 0.001,
seed=10,
scheduler_param=0.5,
n_epochs = 100,
weights = [10,1,1,1],
bweights = [1,1,1,1],
scheduler_gamma = 0.1,
batch_size =128,
num_folds = 5,
input_type = 'Morgan',
model_type = 'Point',
dropout = 0.2,
scale_features = True,
scale_targets = False,
batch_norm = True,
target = 'OH',
teston = 'All',
scale_k= False
)
resume = sys.argv[-1] == "--resume"
wandb.init(config=hyperparameter_defaults, project="RRP-All", resume=resume)
config = wandb.config
def main():
# setup param
config.scheduler_step = config.scheduler_param * config.n_epochs
hidden = np.zeros([config.n_layers],dtype=int)
temp = [config.h1,config.h2,config.h3,config.h4,config.h5,config.h6]
for i in range(config.n_layers):
hidden[i] = temp[i]
config.hidden = list(hidden)
param = config
# load in dataframe
df = pd.read_csv('/home/lbrahiea/OHrate/Data/df.csv')
df = df.join(get_rdkit_fingerprint(df))
cp_input = pd.read_csv('/home/lbrahiea/OHrate/Data/VOCensamble{}.csv'.format(config.n_ensamble))
cp_features = cp_input.iloc[:,cp_input.columns != 'smiles']
df = df.join(cp_features,)
# run ensemble
vvss=0
test=0
seeds=5
clist = np.ndarray([seeds,1])
vlist = np.ndarray([seeds,1])
for i in range(seeds):
set_seed(param.seed)
seed = param.seed+i*10
unique = df['smiles'].unique()
K = int(unique.shape[0]*.2)
options_test = unique[np.random.choice(unique.shape[0],size=K,replace=False)]
train_mols, test_mols = split_data(df,options_test,param)
train_molecules = shuffle(train_mols['smiles'].unique())
kf = KFold(n_splits=param.num_folds)
vvs=0
# Do cross validation
for j,(train_index, test_index) in enumerate(kf.split(train_molecules)):
set_seed(param.seed)
# Split data to train/test folds
logging.info(f'####### Evaluating fold {j+1} in a total of {param.num_folds} folds #######')
train_fold_mols, test_fold_mols = split_data(train_mols,train_molecules[test_index],param)
# Do scaling if needed
Scobj=Scaler()
train_fold_mols,test_fold_mols = Scobj.Scale_data(train_fold_mols,test_fold_mols,param)
# Create and run model
model, criterion, optimizer, scheduler = model_initialize(df,param)
vv=train_model(model,optimizer,criterion,j,param.teston,n_epochs=param.n_epochs,weight=param.weights,
train_loader=make_data_loader(train_fold_mols,param),
scheduler=scheduler,valid_loader=make_data_loader(test_fold_mols,param),scaler=Scobj,
seed=seed)
vvs+=vv
clist[i]=vvs/param.num_folds
set_seed(param.seed)
logging.info("========= Finished CV =========")
# Evaluate model
# Do scaling if needed
Scobje=Scaler()
train_mols,test_mols = Scobje.Scale_data(train_mols,test_mols,param)
# Create and run model
model, criterion, optimizer, scheduler = model_initialize(df,param)
loss = train_model(model,optimizer,criterion,10,param.teston,n_epochs=param.n_epochs,weight=param.weights,
train_loader=make_data_loader(train_mols,param),
scheduler=scheduler,valid_loader=make_data_loader(test_mols,param),scaler=Scobje,
seed=seed)
vlist[i]= loss
metrics = {'CV-loss': np.mean(clist), 'CV-std': np.std(clist), 'test': np.mean(vlist), 'test-std': np.std(vlist)}
wandb.log(metrics)
if __name__ == '__main__':
main()
| emadalibrahim/reaction_rate_prediction | Hyperoptimization/ReactionPrediction.py | ReactionPrediction.py | py | 4,712 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "sys.path.insert",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "wandb.init",
"line_nu... |
36808225829 | from typing import Sequence, Dict, Union
import numpy as np
from .evaluator import Evaluator
from .labeled_tensor import LabeledTensor
class LabelMapEvaluator(Evaluator):
""" Computes statistics related to volume and shape of the structures in a label map.
A table with per-subject stats will be included in the output dictionary
under the key ``'subject_stats'``. The table is a ``pd.DataFrame`` where the
first column header is ``"subject_name"`` and the other columns headers
are ``"stat_name.label_name"``.
A dictionary containing summary statistics will be included in the output dictionary
under the key ``'summary_stats'``. It has the following structure:
``{summary_stat_name: {stat_name: {label_name: value}}}``
The label map must have a ``Dict[str, int]`` property ``'label_values'``,
which maps a label's name to its value.
The supported output statistics are the following:
``('volume')``
Summary statistics for any of the above can also be computed and output from the evaluation
The following are supported:
``('mean', 'median', 'mode', 'std', 'min', 'max')``
Args:
label_map_name: Key to a ``tio.LabelMap`` in each ``tio.Subject``.
curve_params: Dictionary with keys for label_names and values that contain params for curve
curve_attribute: Subject key attribute that is passed into the curve, an example is 'age'.
stats_to_output: A sequence of statistic names that are output from the evaluation
summary_stats_to_output: A sequence of summary statistic names that are output from the evaluation.
"""
def __init__(
self,
label_map_name: str,
curve_params: Union[Dict[str, np.ndarray], None] = None,
curve_attribute: Union[str, None] = None,
stats_to_output: Sequence[str] = ('volume',),
summary_stats_to_output: Sequence[str] = ('mean', 'std', 'min', 'max'),
):
self.label_map_name = label_map_name
self.curve_params = curve_params
self.curve_attribute = curve_attribute
self.stats_to_output = stats_to_output
self.summary_stats_to_output = summary_stats_to_output
# arguments check
curve_stats = ['error', 'absolute_error', 'squared_error', 'percent_diff']
if any(stat in curve_stats for stat in self.stats_to_output):
if curve_params is None:
raise ValueError("curve_params must be provided")
if curve_attribute is None:
raise ValueError("curve_attribute must be provided")
if curve_params is not None and curve_attribute is not None:
self.poly_func = {label: np.poly1d(param) for label, param in curve_params.items()}
else:
self.poly_func = None
def __call__(self, subjects):
label_values = subjects[0][self.label_map_name]['label_values']
label_names = list(label_values.keys())
subject_names = [subject['name'] for subject in subjects]
subject_stats = LabeledTensor(dim_names=['subject', 'label', 'stat'],
dim_keys=[subject_names, label_names, self.stats_to_output])
for subject in subjects:
data = subject[self.label_map_name].data
for label_name, label_value in label_values.items():
label = (data == label_value)
# Compute tensors for each statistic. Each element corresponds to one label.
spatial_dims = (1, 2, 3)
volume = label.sum(dim=spatial_dims)
stats = {
'volume': volume,
}
if self.poly_func is not None:
curve_predicted = self.poly_func[label_name](subject[self.curve_attribute])
error = volume - curve_predicted
curve_stats = {
'error': error,
'absolute_error': abs(error),
'squared_error': error**2,
'percent_diff': (error / curve_predicted) * 100
}
stats.update(curve_stats)
for stat_name in self.stats_to_output:
value = stats[stat_name].item()
subject_stats[subject['name'], label_name, stat_name] = value
summary_stats = subject_stats.compute_summary_stats(self.summary_stats_to_output)
out_dict = {
'subject_stats': subject_stats.to_dataframe(),
'summary_stats': summary_stats
}
return out_dict
| efirdc/Segmentation-Pipeline | segmentation_pipeline/evaluators/label_map_evaluator.py | label_map_evaluator.py | py | 4,654 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "evaluator.Evaluator",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "numpy.ndarray",
"line... |
71949120425 | # -*- coding: utf-8 -*-
"""
This module contains the main experiments performed using the current framework.
Created on Mon Sep 30 13:42:15 2019
@author: Jorge Mario Cruz-Duarte (jcrvz.github.io), e-mail: jorge.cruz@tec.mx
"""
from . import hyperheuristic as hyp
from . import operators as op
from . import benchmark_func as bf
from . import tools as tl
import multiprocessing
from os import path
# %% PREDEFINED CONFIGURATIONS
# Use configuration files instead of predefined dictionaries
# Configuration dictionary for experiments
ex_configs = [
{'experiment_name': 'demo_test', 'experiment_type': 'default', 'heuristic_collection_file': 'default.txt',
'weights_dataset_file': 'operators_weights.json'}, # 0 - Default
{'experiment_name': 'brute_force', 'experiment_type': 'brute_force',
'heuristic_collection_file': 'default.txt'}, # 1 - Brute force
{'experiment_name': 'basic_metaheuristics', 'experiment_type': 'basic_metaheuristics',
'heuristic_collection_file': 'basicmetaheuristics.txt'}, # 2 - Basic metaheuristics
{'experiment_name': 'short_test1', 'experiment_type': 'default', 'heuristic_collection_file': 'default.txt',
'weights_dataset_file': 'operators_weights.json'}, # 3 - Short collection
{'experiment_name': 'short_test2', 'experiment_type': 'default',
'heuristic_collection_file': 'default.txt'}, # 4 - Short collection +
{'experiment_name': 'long_test', 'experiment_type': 'default', 'heuristic_collection_file': 'test-set-21.txt',
'auto_collection_num_vals': 21} # 5 - Long collection
]
# Configuration dictionary for hyper-heuristics
hh_configs = [
{'cardinality': 3, 'num_replicas': 30}, # 0 - Default
{'cardinality': 1, 'num_replicas': 30}, # 1 - Brute force
{'cardinality': 1, 'num_replicas': 30}, # 2 - Basic metaheuristic
{'cardinality': 3, 'num_replicas': 50}, # 3 - Short collection
{'cardinality': 5, 'num_replicas': 50}, # 4 - Short collection +
{'cardinality': 3, 'num_replicas': 50} # 5 - Long collection
]
# Configuration dictionary for problems
pr_configs = [
{'dimensions': [2, 5], 'functions': ['<choose_randomly>']}, # 0 - Default
{'dimensions': [2, 5, *range(10, 50 + 1, 10)], 'functions': bf.__all__}, # 1 - Brute force
{'dimensions': [2, 5, *range(10, 50 + 1, 10)], 'functions': bf.__all__}, # 2 - Basic metaheuristic
{'dimensions': [2, 5, *range(10, 50 + 1, 10)], 'functions': bf.__all__}, # 3 - Short collection
{'dimensions': [2, 5, *range(10, 50 + 1, 10)], 'functions': bf.__all__}, # 4 - Short collection +
{'dimensions': [2, 5, *range(10, 50 + 1, 10)], 'functions': bf.__all__}, # 5 - Long collection
]
# %% EXPERIMENT CLASS
# _ex_config_demo = {'experiment_name': 'demo_test', 'experiment_type': 'default',
# 'heuristic_collection_file': 'default.txt', 'weights_dataset_file': 'operators_weights.json'}
# _hh_config_demo = {'cardinality': 3, 'num_replicas': 30}
# _pr_config_demo = {'dimensions': [2, 5], 'functions': [bf.__all__[hyp.np.random.randint(0, len(bf.__all__))]]}
class Experiment:
"""
Create an experiment using certain configurations.
"""
def __init__(self, config_file=None, exp_config=None, hh_config=None, prob_config=None):
"""
Initialise the experiment object.
:param str config_file:
Name of the configuration JSON file with the configuration dictionaries: exp_config, hh_config, and
prob_config. If only the filename is provided, it is assumed that such a file is in the directory
'./exconf/'. Otherwise, the full path must be entered. The default value is None.
:param dict exp_config:
Configuration dictionary related to the experiment. Keys and default values are listed as follows:
'experiment_name': 'test', # Name of the experiment
'experiment_type': 'default', # Type: 'default', 'brute_force', 'basic_metaheuristics'
'heuristic_collection_file': 'default.txt', # Heuristic space located in /collections/
'weights_dataset_file': None, # Weights or probability distribution of heuristic space
'use_parallel': True, # Run the experiment using a pool of processors
'parallel_pool_size': None, # Number of processors available, None = Default
'auto_collection_num_vals': 5 # Number of values for creating an automatic collection
**NOTE 1:** 'experiment_type': 'default' or another name mean hyper-heuristic.
**NOTE 2:** If the collection does not exist, and it is not a reserved one ('default.txt', 'automatic.txt',
'basicmetaheuristics.txt', 'test_collection'), then an automatic heuristic space is generated with
``op.build_operators`` with 'auto_collection_num_vals' as ``num_vals`` and
'heuristic_collection_file' as ``filename``.
**NOTE 3:** # 'weights_dataset_file' must be determined in a pre-processing step. For the 'default'
heuristic space, it is provided 'operators_weights.json'.
:param dict hh_config:
Configuration dictionary related to the hyper-heuristic procedure. Keys and default values are listed as
follows:
'cardinality': 3, # Maximum cardinality used for building metaheuristics
'num_agents': 30, # Population size employed by the metaheuristic
'num_iterations': 100, # Maximum number of iterations used by the metaheuristic
'num_replicas': 50, # Number of replicas for each metaheuristic implemented
'num_steps': 100, # * Number of steps that the hyper-heuristic performs
'max_temperature': 200, # * Initial temperature for HH-Simulated Annealing
'stagnation_percentage': 0.3, # * Percentage of stagnation used by the hyper-heuristic
'cooling_rate': 0.05 # * Cooling rate for HH-Simulated Annealing
**NOTE 4:** Keys with * correspond to those that are only used when ``exp_config['experiment_type']`` is
neither 'brute_force' nor 'basic_metaheuristic'.
:param dict prob_config:
Configuration dictionary related to the problems to solve. Keys and default values are listed as follows:
'dimensions': [2, 5, 10, 20, 30, 40, 50], # List of dimensions for the problem domains
'functions': bf.__all__, # List of function names of the optimisation problems
'is_constrained': True # True if the problem domain is hard constrained
:return: None.
"""
self.exp_config, self.hh_config, self.prob_config = read_config_file(config_file, exp_config, hh_config,
prob_config)
# Check if the heuristic collection exists
if not path.isfile('./collections/' + self.exp_config['heuristic_collection_file']):
# If the name is a reserved one. These files cannot be not created automatically
if exp_config['heuristic_collection_file'] in ['default.txt', 'automatic.txt', 'basicmetaheuristics.txt',
'test_collection', 'short_collection.txt']:
raise ExperimentError('This collection name is reserved and cannot be created automatically!')
else:
op.build_operators(op.obtain_operators(
num_vals=exp_config['auto_collection_num_vals']),
file_name=exp_config['heuristic_collection_file'].split('.')[0])
self.exp_config['weights_dataset_file'] = None
# Check if the weights dataset not exist or required
if self.exp_config['weights_dataset_file'] and (
self.exp_config['experiment_type'] not in ['brute_force', 'basic_metaheuristics']):
if path.isfile('collections/' + self.exp_config['weights_dataset_file']):
self.weights_data = tl.read_json('collections/' + self.exp_config['weights_dataset_file'])
else:
raise ExperimentError('A valid weights_dataset_file must be provided in exp_config')
else:
self.weights_data = None
def run(self):
"""
Run the experiment according to the configuration variables.
:return: None
"""
# TODO: Create a task log for prevent interruptions
# Create a list of problems from functions and dimensions combinations
all_problems = create_task_list(self.prob_config['functions'], self.prob_config['dimensions'])
# Check if the experiment will be in parallel
if self.exp_config['use_parallel']:
pool = multiprocessing.Pool(self.exp_config['parallel_pool_size'])
pool.map(self._simple_run, all_problems)
else:
for prob_dim in all_problems:
self._simple_run(prob_dim)
def _simple_run(self, prob_dim):
"""
Perform a single run, i.e., for a problem and dimension combination
:param tuple prob_dim:
Problem name and dimensionality ``(function_string, num_dimensions)``
:return: None.
"""
# Read the function name and the number of dimensions
function_string, num_dimensions = prob_dim
# Message to print and to store in folders
label = '{}-{}D-{}'.format(function_string, num_dimensions, self.exp_config['experiment_name'])
# Get and format the problem
problem = eval('bf.{}({})'.format(function_string, num_dimensions))
problem_to_solve = problem.get_formatted_problem(self.prob_config['is_constrained'])
# Read the particular weights array (if so)
weights = self.weights_data[str(num_dimensions)][problem.get_features(fmt='string', wrd='1')] \
if self.weights_data else None
# Call the hyper-heuristic object
hh = hyp.Hyperheuristic(heuristic_space=self.exp_config['heuristic_collection_file'],
problem=problem_to_solve, parameters=self.hh_config,
file_label=label, weights_array=weights)
# Run the HH according to the specified type
save_steps = self.exp_config['save_steps']
if self.exp_config['experiment_type'] in ['brute_force', 'bf']:
hh.brute_force(save_steps)
elif self.exp_config['experiment_type'] in ['basic_metaheuristics', 'bmh']:
hh.basic_metaheuristics(save_steps)
elif self.exp_config['experiment_type'] in ['online_learning', 'dynamic']:
hh.solve('dynamic', save_steps)
elif self.exp_config['experiment_type'] in ['neural_network']:
hh.solve('neural_network', save_steps)
else: # 'static_run'
hh.solve('static', save_steps)
if self.exp_config['verbose']:
print(label + ' done!')
class ExperimentError(Exception):
"""
Simple ExperimentError to manage exceptions.
"""
pass
def read_config_file(config_file=None, exp_config=None, hh_config=None, prob_config=None):
"""
Return the experimental (`exp_config`), hyper-heuristic (`hh_config`), problem (`prob_config`) configuration
variables from `config_file`, if it is supplied. Otherwise, use the `exp_config`, `hh_config`, and `prob_config`
inputs. If there is no input, then assume the default values for these three configuration variables. Further
information about these variables can be found in the Experiment class's `__doc__`.
"""
# If there is a configuration file
if config_file:
# Adjustments
directory, filename = path.split(config_file)
if directory == '':
directory = './exconf/' # Default directory
basename, extension = path.splitext(filename)
if extension not in ['.json', '']:
raise ExperimentError("Configuration file must be JSON")
# Existence verification and load
full_path = path.join(directory, basename + '.json')
if path.isfile(full_path):
all_configs = tl.read_json(full_path)
# Load data from json file
exp_config = all_configs['ex_config']
hh_config = all_configs['hh_config']
prob_config = all_configs['prob_config']
else:
raise ExperimentError(f"File {full_path} does not exist!")
else:
if exp_config is None:
exp_config = dict()
if hh_config is None:
hh_config = dict()
if prob_config is None:
prob_config = dict()
# Load the default experiment configuration and compare it with exp_cfg
exp_config = tl.check_fields(
{
'experiment_name': 'test',
'experiment_type': 'default', # 'default' -> hh, 'brute_force', 'basic_metaheuristics'
'heuristic_collection_file': 'default.txt',
'weights_dataset_file': None, # 'operators_weights.json',
'use_parallel': True,
'parallel_pool_size': None, # Default
'auto_collection_num_vals': 5,
'save_steps': True,
'verbose': True,
}, exp_config)
# Load the default hyper-heuristic configuration and compare it with hh_cfg
hh_config = tl.check_fields(
{
'cardinality': 3,
'num_agents': 30,
'num_iterations': 100,
'num_replicas': 50,
'num_steps': 200,
'max_temperature': 1,
'min_temperature': 1e-6,
'stagnation_percentage': 0.37,
'temperature_scheme': 'fast',
'acceptance_scheme': 'exponential',
'cooling_rate': 1e-3,
'cardinality_min': 1,
'repeat_operators': True,
'as_mh': True,
'verbose': False,
'verbose_statistics': False,
'trial_overflow': True,
'learnt_dataset': None,
'allow_weight_matrix': True,
'learning_portion': 0.37,
'solver': 'static',
'tabu_idx': None,
'model_params': None,
'limit_time': None,
}, hh_config)
# Load the default problem configuration and compare it with prob_config
prob_config = tl.check_fields(
{
'dimensions': [2, 5, *range(10, 50 + 1, 10)],
'functions': bf.__all__,
'is_constrained': True,
'features': ['Differentiable', 'Separable', 'Unimodal']
}, prob_config)
# Check if there is a special case of function name: <choose_randomly>
prob_config['functions'] = [
bf.__all__[hyp.np.random.randint(0, len(bf.__all__))] if fun in ['<choose_randomly>', '<random>'] else fun
for fun in prob_config['functions']]
return exp_config, hh_config, prob_config
def create_task_list(function_list, dimension_list):
"""
Return a list of combinations (in tuple form) for problems from functions and dimensions.
:param list function_list:
List of functions from the `benchmark_func` module.
:param list dimension_list:
List of dimensions considered for each one of these functions.
:return: list of tuples
"""
return [(x, y) for x in function_list for y in dimension_list]
# %% Auto-run
if __name__ == '__main__':
# Import module for calling this code from command-line
import argparse
import os
from tools import preprocess_files
DATA_FOLDER = "./data_files/raw"
OUTPUT_FOLDER = "./data_files/exp_output"
# Only one argument is allowed: the code
parser = argparse.ArgumentParser(
description="Run certain experiment, default experiment is './exconf/demo.json'")
parser.add_argument('-b', '--batch', action='store_true', help="carry out a batch of experiments")
parser.add_argument('exp_config', metavar='config_filename', type=str, nargs='?', default='demo',
help='''Name of the configuration file in './exconf/' or its full path. Only JSON files.
If --batch flag is given, it is assumed that the entered file contains a list of all the
paths of experiment files (JSON) to carry out. It would be read as plain text.''')
args = parser.parse_args()
if not os.path.exists(OUTPUT_FOLDER):
os.makedirs(OUTPUT_FOLDER)
if args.batch:
with open(args.exp_config) as configs:
exp_filenames = configs.read()
exp_filenames = [filename.strip() for filename in exp_filenames.splitlines()]
else:
exp_filenames = [args.exp_config]
for exp_filename in exp_filenames:
tail_message = f" - ({exp_filenames.index(exp_filename)+1}/{len(exp_filenames)})" + "\n" + ("=" * 50) \
if args.batch else ""
print(f"\nRunning {exp_filename.split('.')[0]}" + tail_message)
# Create the experiment to runs
exp = Experiment(config_file=exp_filename)
# print("* Experiment configuration: \n", "-" * 30 + "\n", json.dumps(exp.prob_config, indent=2, default=str))
# print("* Hyper-heuristic configuration: \n", "-" * 30 + "\n",json.dumps(exp.hh_config, indent=2, default=str))
# print("* Problem configuration: \n", "-" * 30 + "\n", json.dumps(exp.prob_config, indent=2, default=str))
# Run the experiment et voilà
exp.run()
# After run, it preprocesses all the raw data
print(f"\nPreprocessing {exp_filename.split('.')[0]}" + tail_message)
preprocess_files("data_files/raw/",
kind=exp.hh_config["solver"],
experiment=exp_filename,
output_name=OUTPUT_FOLDER + "/" + exp.exp_config["experiment_name"])
# Rename the raw folder to raw-$exp_name$
print(f"\nChanging folder name of raw results...")
os.rename(DATA_FOLDER, DATA_FOLDER + "-" + exp.exp_config["experiment_name"])
print("\nExperiment(s) finished!")
| jcrvz/customhys | customhys/experiment.py | experiment.py | py | 18,465 | python | en | code | 17 | github-code | 36 | [
{
"api_name": "os.path.isfile",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 127,
"usage_type": "name"
},
{
"api_name": "os.path.isfile",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number":... |
39556953839 | import pytest
def Fibonacci(n):
# Check if input is 0 then it will print incorrect input
if n < 0:
print("Incorrect input")
# Check if n is 0 then it will return 0
elif n == 0:
return 0
# Check if n is 1,2 it will return 1
elif n == 1 or n == 2:
return 1
else:
return Fibonacci(n-1) + Fibonacci(n-2)
@pytest.mark.parametrize("n, ans", [(2,1), (3,2)])
def test_Fibonacci(n, ans):
assert Fibonacci(n) == ans | Orya-s/U_code-CyberArk | exercises/week5/PythonIntermediate/tests/test_ex_1.py | test_ex_1.py | py | 480 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pytest.mark.parametrize",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 20,
"usage_type": "attribute"
}
] |
7958737031 | import argparse
import glob
import logging
import os
import random
import re
import shutil
import stat
import subprocess
# Path to the directory containing this source file.
SRC_DIR = os.path.dirname(os.path.realpath(__file__))
BINARIES_DIR = os.path.join(SRC_DIR, 'binaries')
def generate_shaders(no_of_files, output_dir, verbose=False):
"""Generate at least |no_of_files| number of shaders using wgslsmith and write them
to |output_dir|."""
i = 0
while i < no_of_files:
wgslsmith_command = [ os.path.join(BINARIES_DIR, "wgslsmith"), "--preset", "tint", "--recondition" ]
try:
proc = subprocess.Popen(wgslsmith_command, stdout = subprocess.PIPE, stderr = subprocess.PIPE)
stdout, stderr = proc.communicate()
if proc.returncode != 0:
continue
with open(os.path.join(output_dir, "fuzz-" + str(i) + ".wgsl"), "w") as outfile:
outfile.write(stdout.decode('utf-8'))
except subprocess.TimeoutExpired:
continue
i = i + 1
def main():
"""Parse arguments and generate WGSL shaders using wgslsmith."""
parser = argparse.ArgumentParser()
parser.add_argument(
'--no_of_files',
type=int,
required=True,
help='Number of WGSL shaders to generate.')
parser.add_argument(
'--input_dir',
required=False,
help='Ignored. Needed to support the ClusterFuzz fuzzer '
'API.')
parser.add_argument(
'--output_dir',
required=True,
help='Directory to write WGSL shaders.')
parser.add_argument(
'--verbose',
action='store_true',
default=False,
help='Use INFO log-level.')
parser.add_argument(
'--random_seed',
type=int,
default=0,
help='Ignored. Needed to support the ClusterFuzz fuzzer.')
opts = parser.parse_args()
if opts.random_seed:
random.seed(opts.random_seed)
if opts.verbose:
logging.basicConfig(level=logging.INFO)
generate_shaders(opts.no_of_files, opts.output_dir, opts.verbose)
if __name__ == '__main__':
main()
| google/randomized-graphics-shaders | clusterfuzz-black-box-fuzzers/wgslsmith/run.py | run.py | py | 2,023 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "os.path.dirname",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "os.path.realpath",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"lin... |
6484108617 | """
Data Analytics II: Simulation Study Functions.
Author: Arbian Halilaj, 16-609-828.
Spring Semester 2022.
University of St. Gallen.
"""
# load the required functions
import numpy as np
import statsmodels.api as sm
import pandas as pd
from sklearn.linear_model import LinearRegression
import matplotlib.pyplot as plt
##############################################################################
def dgp1(dim_x,n):
'''Creates one draw of DGP1
Parameters
----------
dim_x : Number of Covariates
n : ONumber of Observations
Returns
-------
x : Covariates
d : Treatment
y : Outcome
'''
x = np.random.normal(0, 1, size=(n, dim_x))
betas = np.array(list(range(1,dim_x+1)))/(dim_x)
g = x @ betas
d = np.random.binomial(1,0.5,n)
y0 = np.random.normal(0,1,n)
y1 = -1 + g + np.random.normal(0,1,n)
y = d * y1 + (1-d) * y0
return(x,d,y)
def dgp2(dim_x,n):
'''Creates one draw of DGP2
Parameters
----------
dim_x : Number of Covariates
n : ONumber of Observations
Returns
-------
x : Covariates
d : Treatment
y : Outcome
'''
x = np.random.normal(0, 1, size=(n, dim_x))
betas = np.array(list(range(1,dim_x+1)))/(dim_x)
d = np.random.binomial(1,0.8,n)
g = x @ betas
y0 = np.random.normal(0,1,n)
y1 = -1 + g + np.random.normal(0,1,n)
y = d * y1 + (1-d) * y0
return(x,d,y)
def dgp3(dim_x,n):
'''Creates one draw of DGP3
Parameters
----------
dim_x : Number of Covariates
n : ONumber of Observations
Returns
-------
x : Covariates
d : Treatment
y : Outcome
'''
x = np.random.normal(0, 1, size=(n, dim_x))
betas = np.array(list(range(1,dim_x+1)))/(dim_x)
g = x @ betas
v = np.random.uniform(0,1,n)
d = np.random.binomial(1,0.5*v,n)
y1 = 1 + g + np.random.normal(0,1,n)
y0 = np.random.normal(0,1,n)
y = d * y1 + (1-d) * y0 + v
return(x,d,y)
##############################################################################
def ols(x,y):
'''Estimates the ATE according to Doubly OLS
Parameters
----------
x : Covariates
y : Output
Returns
-------
Returns the ATE.
'''
n = y.shape[0] # num of obs
x_c = np.c_[np.ones(n),x] # add constant
betas = np.linalg.inv(x_c.T @ x_c) @ x_c.T @ y # calculate coeff
return(betas)
def dr(d, x, y, dim_x):
'''Estimates the ATE according to Doubly Robust approach
Parameters
----------
d : Treatment variable
x : Covariates
y : Output
dim_x : Number of Covariates
Returns
-------
Returns the ATE.
'''
# Step 1: Estimate propensity score with logit
ps = sm.Logit(endog = d, exog = sm.add_constant(x)).fit(disp = 0).predict()
# Step 2: Estimate outcome equation by ols
data = np.column_stack([d, x, y])
data = pd.DataFrame(data)
data.columns=["V"+str(i) for i in range(1, dim_x + 3)] # give arbitrary column names
df1 = data.query("V1==1")
df0 = data.query("V1==0")
x1 = df1.iloc[:,1:dim_x+1]
x0 = df0.iloc[:,1:dim_x+1]
y1 = df1.iloc[:,dim_x+1]
y0 = df0.iloc[:,dim_x+1]
mu1 = LinearRegression().fit(x1, y1).predict(x1)
mu0 = LinearRegression().fit(x0, y0).predict(x0)
# Step 3: Calculate ATE
ate = np.mean(d*(y - np.c_[mu1])/ps + np.c_[mu1]) - np.mean((1-d)*(y - np.c_[mu0])/(1-ps) + np.c_[mu0])
# Create a vector of the value
ate = np.c_[ate]
# Return the result
return ate
##############################################################################
def simulation(n_sim, n, dim_x):
'''Runs a simulation over all GDP's
Parameters
----------
n_sim : Number of Simulations
n : Number of Obervations
dim_x : Number of Covariates
Returns
-------
Returns the ATE.
'''
all_results = np.empty( (n_sim, 2, 3) ) # initialize for results
# Loop through many simulations
for i in range(n_sim):
# Run DGP1
x, d, y = dgp1(dim_x,n)
all_results[i,0,0] = dr(d, x, y, dim_x)
all_results[i,1,0] = ols(np.c_[d,x],y)[1]
# Run DGP2
x, d, y = dgp2(dim_x,n)
all_results[i,0,1] = dr(d, x, y, dim_x)
all_results[i,1,1] = ols(np.c_[d,x],y)[1]
# Run DGP3
x, d, y = dgp3(dim_x,n)
all_results[i,0,2] = dr(d, x, y, dim_x)
all_results[i,1,2] = ols(np.c_[d,x],y)[1]
return all_results
def plot_results(results,dgp,truth):
plt.figure()
plt.hist(x=results[:,1,dgp-1], bins='auto', color='red',alpha=0.5,label="OLS")
plt.hist(x=results[:,0,dgp-1], bins='auto', color='blue',alpha=0.5,label="Doubly Robust")
plt.axvline(x=truth,label="truth")
plt.legend(loc='upper right')
plt.title('DGP' + str(dgp))
plt.show()
def print_results(results,dgp,truth):
# Caculate and print performance measures
bias_ols = np.mean(results[:,1,dgp-1]) - truth
variance_ols= np.mean( (results[:,1,dgp-1] - np.mean(results[:,1,dgp-1]))**2 )
mse_ols= bias_ols**2 + variance_ols
bias_dr = np.mean(results[:,0,dgp-1]) - truth
variance_dr = np.mean( (results[:,0,dgp-1] - np.mean(results[:,0,dgp-1]))**2 )
mse_dr = bias_dr**2 + variance_dr
print("\n Results DGP" + str( dgp ))
print("Bias OLS: " + str( round(bias_ols,3) ))
print("Bias DR: " + str( round(bias_dr,3) ))
print("Variance OLS: " + str( round(variance_ols,3) ))
print("Variance DR: " + str( round(variance_dr,3) ))
print("MSE OLS: " + str( round(mse_ols,3) ))
print("MSE DR: " + str( round(mse_dr,3) ))
##############################################################################
# APPENDIX: Doubly ML Algorithm
##############################################################################
'''
def dml(d, x, y, numberOfFolds, n_x):
# Get data from DGP and store into DataFrame
data = np.column_stack([d, x, y])
data = pd.DataFrame(data)
# 1) Split data
dataList = np.array_split(data.sample(frac=1), numberOfFolds)
result = []
# Get nuisance estimator
nuisanceEstimatorG = RandomForestRegressor(max_depth=30, max_features='sqrt', n_estimators=500, min_samples_leaf=2)
nuisanceEstimatorM = RandomForestRegressor(max_depth=30, max_features='sqrt', n_estimators=500, min_samples_leaf=2)
for i in range(len(dataList)):
# Prepare D (treatment effect), Y (predictor variable), X (controls)
mainData = dataList[0]
D_main = mainData.iloc[:,0].values
Y_main = pd.DataFrame(mainData.iloc[:,n_x+1])
X_main = pd.DataFrame(mainData.iloc[:,1:n_x+1])
dataList_ = dataList[:]
dataList_.pop(0)
compData = pd.concat(dataList_)
D_comp = compData.iloc[:,0].values
Y_comp = pd.DataFrame(compData.iloc[:,n_x+1])
X_comp = pd.DataFrame(compData.iloc[:,1:n_x+1])
# Compute g as a machine learning estimator, which is trained on the predictor variable
g_comp = nuisanceEstimatorG.fit(X_main, Y_main.values.ravel()).predict(X_comp)
g_main = nuisanceEstimatorG.fit(X_comp, Y_comp.values.ravel()).predict(X_main)
# Compute m as a machine learning estimator, which is trained on the treatment variable
m_comp = nuisanceEstimatorM.fit(X_main, D_main).predict(X_comp)
m_main = nuisanceEstimatorM.fit(X_comp, D_comp).predict(X_main)
# Compute V
V_comp = np.array(D_comp) - m_comp
V_main = np.array(D_main) - m_main
# We provide two different theta estimators for computing theta
theta_comp = pc.thetaEstimator(Y_comp, V_comp, D_comp, g_comp)
theta_main = pc.thetaEstimator(Y_main, V_main, D_main, g_main)
result.append((np.mean(theta_comp + theta_main)))
# Aggregate theta
theta = np.mean(result)
return theta
def thetaEstimator(Y, V, D, g):
try:
return np.mean((np.array(V)*(np.array(Y)-np.array(g))))/np.mean((np.array(V)*np.array(D)))
except ZeroDivisionError:
return 0
''' | akapedan/Causal_Econometrics | functions.py | functions.py | py | 8,216 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.random.normal",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "numpy.random.binomi... |
23452539852 | # coding=utf-8
from oss2.api import *
from oss2.auth import *
from flask import current_app
def get_oss_auth_sigi():
# access_key_id, access_key_secret
access_key_id = current_app.config["ACCESS_KEY_ID"]
access_key_secret = current_app.config["ACCESS_KEY_SECRET"]
auth = AuthV2(access_key_id, access_key_secret)
return auth
def get_oss_bucket(filename,input_time=60*60):
auth = get_oss_auth_sigi()
# endpoint, bucket_name
# endpoint 假设为 http://oss-cn-hangzhou.aliyuncs.com
# bucket_name 为存储空间名
endpoint = current_app.config["ENDPOINT"]
bucket_name = current_app.config["BUCKET_NAME"]
bucket = Bucket(auth=auth,endpoint=endpoint,bucket_name=bucket_name)
put_url = bucket.sign_url("PUT",filename,input_time)
get_url = bucket.sign_url("GET", filename, input_time)
di = {
"get":get_url,
"put":put_url
}
return di
def get_img_oss_url(filename,input_time=60*60):
auth = get_oss_auth_sigi()
endpoint = current_app.config["ENDPOINT"]
bucket_name = current_app.config["BUCKET_NAME"]
bucket = Bucket(auth=auth, endpoint=endpoint, bucket_name=bucket_name)
get_url = bucket.sign_url("GET", filename, input_time)
return get_url
if __name__ == '__main__':
get_oss_bucket("123.jpg") | z1421012325/py_flask_online_classroom | OnlineClassroom/app/utils/aliyun_oss.py | aliyun_oss.py | py | 1,316 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.current_app.config",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "flask.current_app",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "flask.current_app.config",
"line_number": 13,
"usage_type": "attribute"
},
{
"api... |
38380409709 | import pytest, os
import warnings, collections, operator, logging
import generic_utils
## see https://docs.pytest.org/en/latest/example/simple.html#incremental-testing-test-steps
from typing import Dict, Tuple
# store history of failures per test class name and per index in parametrize (if parametrize used)
##_test_failed_incremental: Dict[str, Dict[Tuple[int, ...], str]] = {}
_test_failed_incremental = {}
RAISE_NO_CALLBACK_EXCEPTION = False
NT__scope = collections.namedtuple( 'scope', ['overview','identifier','description','priority','traceability'] )
NT__test_case_spec = collections.namedtuple( 'test_case_spec', ['overview', 'identifier', 'objective', 'priority', 'traceability', 'preconditions', 'inputs', 'expected_results'])
NT__test_case_spec = collections.namedtuple( 'test_case_spec', ['ov', 'id', 'obj', 'p', 'tr', 'prec', 'i', 'expected'])
NT__test_case = collections.namedtuple( 'test_case', ['spec', 'actual_results'])
NT__config = collections.namedtuple( 'config', ['project','scope','log_name'])
__overview__ = "Testing the local utilities module"
__identifier__ = "scope01"
__description__ = """Test of local utilities module, local_utilities.py, which contains functions and classes used in other modules"""
__priority__ = "MUST"
__traceability__ = "documentation is tbd"
test_scope = NT__scope( overview = __overview__, priority=__priority__,identifier=__identifier__, description=__description__, traceability=__traceability__)
project = 'rt001'
scope_id = 'sc001'
class BaseClassCheck(object):
config = NT__config( project='default_project', scope='default_scope', log_name='test01' )
reporter = None
review_template = 'Failed tests: %s, passed tests: %s'
@classmethod
def configure(cls,project,scope,log_name,reporter=None):
cls.config = NT__config( project=project, scope=scope, log_name=log_name )
if reporter != None:
cls.reporter = reporter
class Check3(BaseClassCheck):
def __init__(self,func,sfx='',reset=False,blocking=False):
self.ee = dict()
self.function = func
fid = func.__name__
self.test_name = fid
tr = '%s:%s:%s' % (self.config.project,self.config.scope,fid)
defaults = {'prec':'None', 'i':'None', 'tr':tr, 'id':fid }
ret = func.__annotations__['return']
if BaseClassCheck.reporter == None:
BaseClassCheck.reporter = LogReporter(self.config.log_name,sfx=sfx)
for k,v in defaults.items():
if k not in ret:
ret[k] = v
func.__annotations__['tc'] = TCBuild( func, BaseClassCheck.reporter, ret=ret )
if blocking:
func.__annotations__['tc'].is_blocking = True
for k in ['ov','obj','p','tr','prec','i','expected','id']:
v = ret.get(k, defaults.get(k) )
self.ee[k] = v
if reset:
self._reset()
#### print ('FAIL COUNT: %s' % fid, self.reporter.fails )
#### Note that the mark.incremental approach was tried, but did not work with hooks implemented
if self.reporter.blocking_fails > 0 and fid.find( 'wrapup' ) == -1:
pytest.skip( 'blocking_fail count > 0' )
@classmethod
def _reset(cls):
BaseClassCheck.reporter.fails = 0
BaseClassCheck.reporter.passes = 0
@classmethod
def review(cls):
return BaseClassCheck.review_template % (BaseClassCheck.reporter.fails,BaseClassCheck.reporter.passes)
def __call__(self,value,cmt=None):
self.function.__annotations__['tc'].result = (value,cmt)
assert value == self.ee['expected'], '%s: result [%s] does not match expected [%s]' % (self.test_name,value,self.ee['expected'])
class NoCallback(Exception):
def __init__(self,item,rep):
self.item = item
self.rep = rep
class BaseClassISOReports(object):
pass
class BaseClassTS(object):
pass
class LogReporter(BaseClassISOReports):
def __init__(self,log_name,log_file=None,log_dir='./logs',sfx=None):
self.log_name = log_name
if log_file == None:
log_file = "%s_2020" % log_name
log_factory = generic_utils.LogFactory(dir=log_dir)
log_pytest = log_factory( log_name, mode="a", logfile=log_file )
self.sfx = sfx
self.fails = 0
self.blocking_fails = 0
self.passes = 0
def __call__(self, pytest_report=None,test_case=None,sfx=None, cls=None):
cmt = None
if pytest_report != None and hasattr( pytest_report, 'outcome' ) and pytest_report.outcome in ['passed', 'failed', 'skipped']:
tag = {'passed':'OK', 'failed':'FAIL', 'skipped':'SKIP'}[pytest_report.outcome]
##if pytest_report != None and hasattr( pytest_report, 'failed' ) and pytest_report.failed in [True,False]:
##tag = {True:'OK', False:'ERROR'}[not pytest_report.failed]
else:
tag = '-'
if cls != None:
cid = '%s' % cls.id
else:
cid = ''
##mode = "a" if os.path.exists("pytest_line_log") else "w"
##with open("pytest_line_log", mode) as f:
if test_case == None:
self.report_line = '%s:%s..: ' % (tag, cid)
else:
self.report_line = '%s:%s.%s: %s' % (tag, cid, test_case.spec.id, test_case.spec.ov)
if hasattr( test_case, 'result' ):
if type( test_case.result ) == type( () ) and len( test_case.result ) == 2:
result, cmt = [str( x) for x in test_case.result]
else:
result = str( test_case.result )
else:
result = "****"
cmt_item = cmt if cmt != None else ''
if tag == 'FAIL':
self.fails += 1
if test_case.is_blocking:
self.blocking_fails += 1
elif tag == 'OK':
self.passes += 1
self.report_line = '\t'.join( [tag] + [str( test_case.spec._asdict()[x] ) for x in ['id','expected']] + [result,cmt_item] )
if sfx != None:
self.report_line += sfx
if self.sfx != None:
self.report_line += ' ' + self.sfx
log = logging.getLogger( self.log_name )
log.info( self.report_line )
## f.write( '%s\n' % self.report_line )
## f.write( ', '.join( dir(item.cls) ) )
return self.report_line
##line_reporter = LogReporter()
def get_user_warning(data):
class HERE(UserWarning):
pass
setattr( HERE, 'data', data )
return HERE
def matching( result, expected):
"""Check result against expected.
Allows for encoding of lt/gt/le/ge in text strings.
"""
if type(expected) == type('') and expected.find(':') != -1 and expected.split( ':' )[0] in ['lt','gt','le','ge']:
exp = eval( expected[3:] )
op = expected[:2]
assert op in ['lt','gt','le','ge']
return operator.__dict__[ op ](result,exp)
else:
return result == expected
class TCBuild(BaseClassTS):
"""
BaseClassTS: brings in a standard report function which assumes a spec attribute which is an instance of NT__scope
"""
scope = test_scope
def __init__(self, function, reporter, ret=None):
if ret == None:
ret = function.__annotations__['return']
self.reporter = reporter
if type(ret) == type(dict()):
self.spec = NT__test_case_spec( **ret )
elif isinstance(ret,NT__test_case_spec):
self.spec = ret
print( 'TCBuild Instantiating: %s' % function.__name__ )
self.function = function
expected = self.spec.expected
if type(expected) == type('') and expected.find( 'regex:' ) == 0:
self.conformance_mode = 'match'
else:
self.conformance_mode = 'equals'
self.ov = self.spec.ov
self.is_blocking = False
def report(self,*args,**kwargs):
return self.reporter( *args,**kwargs)
def __call__(self,*args,**kwargs):
self.result = self.function(*args,**kwargs)
return self.result
class MakeTest(object):
"""A decorator which converts a function into a pytest compatible test by appending an assertion checking the
return value against an expected value.
The expected value must be provided in the 'return' annotation of the function as part of a named tuple.
"""
def __init__(self,reporter=None,log_file=None,log_name=None):
if reporter != None:
self.reporter = reporter
elif BaseClassCheck.reporter != None:
self.reporter = BaseClassCheck.reporter
else:
if log_name == None:
log_name = __name__
self.reporter = LogReporter(log_name,log_file=log_file)
def __call__(self,f):
expected = f.__annotations__['return']['expected']
def this(*args,**kwargs) -> TCBuild(f,self.reporter):
this.__annotations__['return'].result = result = f(*args,**kwargs)
assert matching(result, expected), 'Result [%s] does not match expected [%s]' % (result,expected)
return this
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_makereport(item, call):
# we only look at actual failing test calls, not setup/teardown
# execute all other hooks to obtain the report object
outcome = yield
rep = outcome.get_result()
if rep.when == "call":
lr = None
if any( [x in item.function.__annotations__.keys() for x in ['return','tc']] ) or 'tc' in item.funcargs:
if 'tc' in item.function.__annotations__.keys():
src = 'tc'
ret = item.function.__annotations__['tc']
elif 'return' in item.function.__annotations__.keys():
ret = item.function.__annotations__['return']
src = 'return'
else:
src = 'tc*'
ret = item.funcargs['tc']
##
##objective here is to compile an ISO ... record combining information about tests and results.
##
if isinstance( ret, BaseClassTS ) and hasattr( ret, 'spec' ):
if hasattr( item, 'cls' ):
cls = item.cls
else:
cls = None
lr = ret.report( pytest_report=rep, test_case=ret, cls=cls )
elif not isinstance( ret, BaseClassTS ):
warnings.warn("Test function ret is not a BaseClassTS instance: %s" % item.function.__name__, get_user_warning((item,rep) ) )
else:
warnings.warn("Test function lacks specification: %s" % item.function.__name__, get_user_warning((item,rep) ) )
else:
warnings.warn("Test function lacks hook: %s" % item.function.__name__, get_user_warning((item,rep) ) )
if lr == None:
if RAISE_NO_CALLBACK_EXCEPTION:
raise NoCallback(item,rep)
##lr = line_reporter( pytest_report=rep, sfx='{no spec: %s}' % item.function.__name__ )
| cp4cds/cmip6_range_check_old | scripts/local_pytest_utils.py | local_pytest_utils.py | py | 11,159 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "collections.namedtuple",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "collections.namedtuple",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "collections.namedtuple",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "... |
70480534505 | import numpy as np
from tqdm import tqdm
from tools import augment_cluster
class InversionAugmentor(object):
"""docstring for InversionAugmentor."""
def __init__(self, waveforms = None, earthquake_parameters = None, new_elements=2, parameter_variation=0.2, additional_weight = False):
super(InversionAugmentor, self).__init__()
self.waveforms = waveforms
self.earthquake_parameters = earthquake_parameters
self.__check_variable_state()
# assign data values which have default values
self.new_elements = new_elements
self.parameter_variation = parameter_variation
self.additional_weight = additional_weight
# print(f"new_elements is {new_elements}")
def __check_variable_state(self):
# check if values for inputs are set and set state variable accordingly
if (self.waveforms is None) or (self.earthquake_parameters is None):
self.all_inputs_set = False
print("Warning: Inputs missing")
else:
self.all_inputs_set = True
def __create_output_string(self):
""" this private method creates an output string of the internal variables
"""
try:
waveforms_string = self.waveforms.shape
except:
waveforms_string = self.waveforms
try:
earthquake_parameters_string = self.earthquake_parameters.shape
except:
earthquake_parameters_string = self.earthquake_parameters
output_string = \
"---- Inputs ----\n" + \
f"Waveforms = {waveforms_string}\n" + \
f"Earthquake Parameters = {earthquake_parameters_string}\n" + \
f"---- Augmentation Properties ----\n" + \
f"New elements = {self.new_elements}\n" + \
f"Parameter variation = {self.parameter_variation}\n" + \
f"Additional weight = {self.additional_weight}"
return output_string
def create_new_dataset(self, waveforms = None, earthquake_parameters = None, new_elements = None, parameter_variation = None,
additional_weight = None):
""" this public method creates new waveforms for augmentation. Checks if augmentation parameters
and input variables are given and, if given, uses them instead of the properties of the instance.
"""
# if new inputs have been given, overwrite the inputs into the instance properties
if (waveforms is not None) or (earthquake_parameters is not None):
self.set_input(waveforms, earthquake_parameters)
# same for augmentation properties
if (new_elements is not None) or (parameter_variation is not None) or (
additional_weight is not None):
self.set_augmentation_parameters(new_elements, parameter_variation, additional_weight)
self.__check_variable_state()
# initialize outputs for when function is called without parameters set
new_earthquakes, new_waveforms = None, None
if self.all_inputs_set:
# the actual generation is done here
new_earthquakes, new_waveforms = augment_cluster(
waveforms= self.waveforms,
earthquake_parameters = self.earthquake_parameters,
parameter_variation=self.parameter_variation,
number_of_new_data= self.new_elements, clip_values=True,
additional_weight = self.additional_weight)
else: # create error message
print("Error: Not all input values are set. \n"+ \
"Please define the inputs: waveforms and/or earthquake parameters")
return new_earthquakes, new_waveforms
def set_input(self, waveforms = None, earthquake_parameters = None):
""" this public method sets the input parameters, if they have been given
"""
if (waveforms is not None):
self.waveforms = waveforms
if (earthquake_parameters is not None):
self.earthquake_parameters = earthquake_parameters
def set_augmentation_parameters(self, new_elements = None, parameter_variation = None, additional_weight = None):
""" this public method sets the parameters of the augmentation method
"""
if new_elements is not None:
self.new_elements = new_elements
if parameter_variation is not None:
self.parameter_variation = parameter_variation
if additional_weight is not None:
self.additional_weight = additional_weight
def store_settings(self, filename, ID=None):
""" this public method stores all the internal variables for input and
augmentation settings in the given filename; if a ID code is given, it is
written to the file as well
"""
# store all settings in a plain text file,
# store also the data and time of creation, and possibly with corresponding data
output_string = self.__create_output_string()
current_time = datetime.now()
output_string += f"\n---- Time of creation ----\n{current_time}"
if ID is not None:
output_string = f"---- ID ----\nID ={ID}\n" + \
output_string
with open(filename, 'w') as f:
f.write(output_string)
def display_properties(self):
""" this method displays the stored values
"""
output_string= self.__create_output_string()
print(output_string) | alexmundt/augmentation-tools | inversion_augmentation.py | inversion_augmentation.py | py | 5,611 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "tools.augment_cluster",
"line_number": 73,
"usage_type": "call"
}
] |
32018992802 | # -*- coding: utf-8 -*-
"""
Author : Jason See
Date : 2022/6/9 15:50
Tool : PyCharm
Content:
"""
import json
import urllib.request
def get_response_dic(data):
url = 'http://192.168.30.128/api_jsonrpc.php'
header = {'Content-Type': 'application/json'}
data = bytes(data, 'utf-8')
request = urllib.request.Request(url, data, header)
response = urllib.request.urlopen(request)
result_str = response.read().decode('utf-8')
result_dic = json.loads(result_str)
return result_dic
def get_result(raw):
data = json.dumps(raw)
result = get_response_dic(data)
return result
def get_auth():
raw = {"jsonrpc": "2.0", "method": "user.login", "params": {"user": "Admin", "password": "zabbix"}, "id": 0} # 提交数据
data = json.dumps(raw)
auth = get_response_dic(data)['result']
return auth
def get_host_list(auth):
raw = {
"jsonrpc": "2.0",
"method": "hostinterface.get",
"params": {
"output": "extend",
# "hostids": "30057"
},
"auth": auth,
"id": 1
}
host_list_dic = get_result(raw)
return host_list_dic['result']
def creat_host(auth, hostid, ip, dns='', main_host=0, port='10050', type_host=1, useip=1):
# raw = {
# "jsonrpc": "2.0",
# "method": "hostinterface.create",
# "params": {
# "hostid": "30052",
# "dns": "",
# "ip": "127.0.0.1",
# "main": 0,
# "port": "10050",
# "type": 1,
# "useip": 1
# },
# "auth": auth,
# "id": 1
# }
raw = {
"jsonrpc": "2.0",
"method": "hostinterface.create",
"params": {
"hostid": hostid,
"dns": dns,
"ip": ip,
"main": main_host,
"port": port,
"type": type_host,
"useip": useip
},
"auth": auth,
"id": 1
}
def update_host_port(auth, host_port, hostid, host_type=1):
raw = {
"jsonrpc": "2.0",
"method": "hostinterface.update",
"params": {
"interfaceid": hostid,
"port": host_port
},
"auth": auth,
"id": 1
}
result = get_result(raw)
return result
def main():
auth = get_auth()
print(auth)
host_list = get_host_list(auth)
print(host_list)
# creat_host(auth, hostid='10318', ip='192.168.7.7', type_host=1)
result = update_host_port(auth, hostid='10318', host_port=30050)
print(result)
if __name__ == '__main__':
main()
| zcsee/pythonPra | zabbix_pra/demo2.py | demo2.py | py | 2,611 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "urllib.request.request.Request",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "urllib.request.request",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "urllib.request",
"line_number": 16,
"usage_type": "name"
},
{
"api_nam... |
41524333829 | import cv2 # working with, mainly resizing, images
import numpy as np # dealing with arrays
import os # dealing with directories
from random import shuffle # mixing up or currently ordered data that might lead our network astray in training.
from tqdm import tqdm # a nice pretty percentage bar for tasks. Thanks to viewer Daniel BA1/4hler for this suggestion
from code.env_variables import *
def label_img(cls):
# conversion to array [face, notFace]
# [much face, no notFace]
if cls == 'face': return [1,0]
# [no cat, very doggo]
elif cls == 'notFace': return [0,1]
def create_train_data():
training_data = []
for cls in CLASSES:
for img in tqdm(os.listdir("{}/{}".format(TRAIN_DIR, cls))):
label = label_img(cls)
path = os.path.join("{}/{}".format(TRAIN_DIR, cls), img)
img = cv2.imread(path, cv2.IMREAD_GRAYSCALE)
img = cv2.resize(img, (IMG_SIZE_WIDTH, IMG_SIZE_HEIGHT))
training_data.append([np.array(img), np.array(label)])
shuffle(training_data)
np.save(TRAIN_DB, training_data)
return training_data
def process_test_data():
testing_data = []
for cls in CLASSES:
for img in tqdm(os.listdir("{}/{}".format(TEST_DIR, cls))):
label = label_img(cls)
path = os.path.join("{}/{}".format(TEST_DIR, cls), img)
img = cv2.imread(path, cv2.IMREAD_GRAYSCALE)
img = cv2.resize(img, (IMG_SIZE_WIDTH, IMG_SIZE_HEIGHT))
testing_data.append([np.array(img), np.array(label)])
shuffle(testing_data)
np.save(TEST_DB, testing_data)
return testing_data
if __name__ == "__main__":
train_data = create_train_data()
test_data = process_test_data() | MourabitElBachir/Face_Detection_Deep_Learning | code/preparing_data.py | preparing_data.py | py | 1,845 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "tqdm.tqdm",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 23,
... |
69816265383 | """Stream type classes for tap-sharepointsites."""
from singer_sdk.typing import (
DateTimeType,
ObjectType,
PropertiesList,
Property,
StringType,
)
from tap_sharepointsites.client import sharepointsitesStream
class ListStream(sharepointsitesStream):
"""Define custom stream."""
primary_keys = ["id"]
replication_key = None
schema = PropertiesList(
Property("@odata.etag", StringType),
Property("createdDateTime", StringType),
Property("eTag", StringType),
Property("id", StringType),
Property("lastModifiedDateTime", StringType),
Property("webUrl", StringType),
Property("createdBy", ObjectType()),
Property("lastModifiedBy", ObjectType()),
Property("parentReference", ObjectType()),
Property("contentType", ObjectType()),
Property("fields@odata.context", StringType),
Property("fields", ObjectType()),
Property("_loaded_at", DateTimeType),
).to_dict()
| storebrand/tap-sharepointsites | tap_sharepointsites/streams.py | streams.py | py | 1,003 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "tap_sharepointsites.client.sharepointsitesStream",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "singer_sdk.typing.PropertiesList",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "singer_sdk.typing.Property",
"line_number": 20,
"usage_... |
14415686598 | #!/usr/bin/env python3
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVR
from sklearn.ensemble import RandomForestRegressor
class Surrogate:
""" Surrogate models
"""
def __init__(self, hparams):
self.model_name = hparams['name']
self.normalize = hparams['normalize']
if self.model_name == 'GP':
if 'normalize_y' not in hparams:
hparams['normalize_y'] = False
self.model = GaussianProcessRegressor(kernel=hparams['kernel'],
n_restarts_optimizer=hparams['n_restarts_optimizer'],
normalize_y=hparams['normalize_y'])
if self.model_name == 'SVR':
self.model = SVR()
if self.model_name == 'RF':
self.model = RandomForestRegressor(max_depth=2, random_state=0,
n_estimators=hparams['n_estimators'])
def fit(self, X, y):
"""
Fit surrogate models
Args:
X: array-like, shape = (n, d)
Training data
y: array-like, shape = (n, [num_lsf])
Soft output values
Returns:
self: returns an instance of self
"""
if self.normalize:
self.scaler_X = StandardScaler()
# self.scaler_y = StandardScaler()
self.scaler_X.fit(X)
# self.scaler_y.fit(y)
X = self.scaler_X.transform(X)
# y = self.scaler_y.transform(y)
if self.model_name == 'RF':
y = y.flatten()
self.model.fit(X, y)
return self
def predict(self, X, *args):
if self.normalize:
X = self.scaler_X.transform(X)
if self.model_name == 'GP':
return_std = args[0]
return_cov = args[1]
y = self.model.predict(X, return_std, return_cov)
return y
if self.model_name == 'SVR':
y = self.model.predict(X).reshape(-1, 1)
return y
if self.model_name == 'RF':
y = self.model.predict(X).reshape(-1, 1)
return y
| RobinSeaside/S4IS | src/Surrogate.py | Surrogate.py | py | 2,259 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "sklearn.gaussian_process.GaussianProcessRegressor",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "sklearn.svm.SVR",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "sklearn.ensemble.RandomForestRegressor",
"line_number": 25,
"usage_type... |
75071113384 | import requests
import pandas as pd
import numpy as np
import seaborn as sns
from bs4 import BeautifulSoup
import warnings
import nltk
#import surprise
import scipy as sp
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.feature_extraction.text import CountVectorizer
from nltk.corpus import stopwords
from nltk import word_tokenize, RegexpTokenizer
from nltk.stem import SnowballStemmer
from sklearn.feature_extraction.text import TfidfVectorizer
from datetime import datetime, time
class Tags():
def __init__(self):
self.cargaDocumentos()
def cargaDocumentos(self):
self.df_usuaarioO = pd.read_csv('csv/Usuario_0.csv', sep=';')
self.df_usuaarioO = self.df_usuaarioO.drop(columns=["title"])
for usuario_nuevo in range(len(self.df_usuaarioO["movieId"])):
self.df_usuaarioO["userId"] = 0
self.df_usuaarioO["timestamp"] = datetime.now()
self.df_movies = pd.read_csv('csv/movies.csv')
self.df_movies = self.df_movies.dropna()
self.df_ratings = pd.read_csv('csv/ratings.csv')
self.df_ratings = self.df_ratings.dropna()
self.df_tags = pd.read_csv('csv/tags.csv')
self.df_tags = self.df_tags.dropna()
self.df_movies_ratings_old = self.df_ratings.merge(self.df_movies)[
['userId', 'movieId', 'title', 'rating', 'genres']]
self.df_movies_ratings_tags_old = pd.merge(self.df_movies_ratings_old, self.df_tags, how='outer')[
['userId', 'movieId', 'title', 'rating', 'genres', 'tag']]
self.df_movies_ratings_tags_old["tag"] = self.df_movies_ratings_tags_old["tag"].str.lower()
self.df_ratings = pd.concat([self.df_usuaarioO, self.df_ratings], axis=0)
self.df_movies_ratings = self.df_ratings.merge(self.df_movies)[
['userId', 'movieId', 'title', 'rating', 'genres']]
self.df_movies_ratings_tags = pd.merge(self.df_movies_ratings, self.df_tags, how='outer')[
['userId', 'movieId', 'title', 'rating', 'genres', 'tag']]
self.df_movies_ratings_tags["tag"] = self.df_movies_ratings_tags["tag"].str.lower()
# self.df_movies_ratings_tags.fillna("vacio", inplace = True)
self.ratings_table = self.df_movies_ratings.pivot_table(index='userId', columns='title', values='rating')
# para cambiar los NAN por 0:
self.ratings_table.fillna(0, inplace=True)
def recomedacionPorTags(self, nombrePelicula, n_similares):
listaPeliculasMostrar = []
try:
n_similares=int(n_similares)
count_matrix = self.df_movies_ratings_tags.pivot_table(index='movieId', columns='tag', values='userId')
#count_matrix = self.df_movies_ratings_tags.pivot_table(index='movieId', columns='tag', values='rating')
count_matrix.fillna(0, inplace=True)
sparse_rating = sp.sparse.csr_matrix(count_matrix)
selected_movie = self.df_movies[self.df_movies["title"] == nombrePelicula]["movieId"].values[0]
#encontramos el id de la pelicula en la matriz
selected_movie_index = count_matrix.index.get_loc(selected_movie)
similarities = cosine_similarity(sparse_rating, sparse_rating[selected_movie_index])
movie_list = [(index, similarity) for index, similarity in enumerate(similarities)]
movie_list.sort(key=lambda x: x[1], reverse=True)
if(n_similares>len(movie_list)):
n_similares=len(movie_list)-1
bandera=False
contador = 1
for movie in movie_list[0:n_similares]:
if(nombrePelicula != self.df_movies.iloc[movie[0]]["title"]):
listaPeliculasMostrar.append(self.df_movies.iloc[movie[0]]["title"])
contador+=1
else:
bandera=True
if(bandera):
mov=movie_list[n_similares][0]
listaPeliculasMostrar.append(self.df_movies.iloc[mov]["title"])
except Exception as e:
print("")
return listaPeliculasMostrar
def predecirRatingDeUserAPeliculaPorSusTags(self, nombrePelicula, user_id):
user_id=int(user_id)
yaVotado = self.df_movies_ratings[(self.df_movies_ratings['title']==nombrePelicula) & (self.df_movies_ratings['userId']==user_id)]["rating"].unique()
if(len(yaVotado)!=0):
prediction = yaVotado[0]
return str(prediction)
else:
# obtener tags de la película a predecir
tagsPeli = []
movie_tags = self.df_movies_ratings_tags[self.df_movies_ratings_tags['title']==nombrePelicula]["tag"].unique()
for m in movie_tags:
tagsPeli.append(m)
filtroMergeandoTags = self.df_movies_ratings_tags[['userId','movieId','title', 'rating', 'tag']]
filtroEnBaseUserId = filtroMergeandoTags[filtroMergeandoTags['userId']==user_id]
#similitud = [distance.cosine(tagsPeli, j['tag']) for i,j in filtroEnBaseUserId.iterrows()]
user_ratings = filtroEnBaseUserId[filtroEnBaseUserId['tag'].isin(tagsPeli)]
#si el usuario a creado un tag de alguna peli que sea igual a alguno de el de la pelicula buscada filtramos mas el df quitando los nulos
#si no a hecho ningun tag y todos sus tag son nan dejamos el df como esta ya que si hacemos dropna eliminamos el df entero
if user_ratings.dropna().size != 0:
user_ratings = user_ratings.dropna()
# calcular la media de valoraciones del usuario para las peliculas con generos en comun
if user_ratings.empty:
print()
return "Vacia"
else:
#prediction = user_ratings_ID['rating'].mean()
prediction = format(user_ratings['rating'].mean(), '.3f')
return str(prediction)
def recomedacionPorTagsUser(self, user_id, n_similares):
user_id=int(user_id)
n_similares=int(n_similares)
df_movies_rating_user = self.df_movies_ratings[self.df_movies_ratings['userId']==user_id]
self.df_movies[~self.df_movies.movieId.isin(df_movies_rating_user["movieId"])]
df = pd.DataFrame()
movies = self.df_movies[~self.df_movies.movieId.isin(df_movies_rating_user["movieId"])]
df["movieId"] = movies["movieId"]
df["title"] = movies["title"]
df["ratingPredict"] = [self.predecirRatingDeUserAPeliculaPorSusTags(j["title"], user_id) for i,j in df.iterrows()]
df = df.sort_values(by='ratingPredict', ascending = False)
df_resultados = df["title"].head(n_similares)
listaPeliculasRecomendadas = []
for i in df_resultados:
listaPeliculasRecomendadas.append(i)
return listaPeliculasRecomendadas | Liixxn/MovieMender | tags.py | tags.py | py | 6,909 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "pandas.read_c... |
74194893543 | from torchvision import models
import torch.nn as nn
import torch.nn.functional as F
import torch
from initialize_delf import init_delf, init_densenet_TL, init_delf_TL, init_resnet101gem, init_delf_pca
def initialize_model(model_name, num_classes, freeze_layers, use_pretrained=True):
# Initialize these variables which will be set in this if statement. Each of these
# variables is model specific.
model_ft = None
if model_name == "resnet50":
""" Resnet50
"""
model_ft = models.resnet50(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, freeze_layers)
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Linear(num_ftrs, num_classes)
elif model_name == "resnet101":
""" Resnet101
"""
model_ft = models.resnet101(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, freeze_layers)
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Linear(num_ftrs, num_classes)
elif model_name == "resnet152":
""" Resnet152
"""
model_ft = models.resnet152(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, freeze_layers)
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Linear(num_ftrs, num_classes)
elif model_name == "alexnet":
""" Alexnet
"""
model_ft = models.alexnet(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, freeze_layers)
num_ftrs = model_ft.classifier[6].in_features
model_ft.classifier[6] = nn.Linear(num_ftrs,num_classes)
elif model_name == "vgg":
""" VGG11_bn
"""
model_ft = models.vgg11_bn(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, freeze_layers)
num_ftrs = model_ft.classifier[6].in_features
model_ft.classifier[6] = nn.Linear(num_ftrs,num_classes)
elif model_name == "squeezenet":
""" Squeezenet
"""
model_ft = models.squeezenet1_0(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, freeze_layers)
model_ft.classifier[1] = nn.Conv2d(512, num_classes, kernel_size=(1,1), stride=(1,1))
model_ft.num_classes = num_classes
elif model_name == "densenet121":
""" Densenet121
"""
model_ft = models.densenet121(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, freeze_layers)
num_ftrs = model_ft.classifier.in_features
model_ft.classifier = nn.Linear(num_ftrs, num_classes)
elif model_name == "densenet169":
""" Densenet169
"""
model_ft = init_densenet169()
set_parameter_requires_grad(model_ft, freeze_layers)
elif model_name == "densenet201":
""" Densenet201
"""
model_ft = models.densenet201(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, freeze_layers)
num_ftrs = model_ft.classifier.in_features
model_ft.classifier = nn.Linear(num_ftrs, num_classes)
elif model_name == "inception":
""" Inception v3
Be careful, expects (299,299) sized images and has auxiliary output
"""
model_ft = models.inception_v3(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, freeze_layers)
# Handle the auxilary net
num_ftrs = model_ft.AuxLogits.fc.in_features
model_ft.AuxLogits.fc = nn.Linear(num_ftrs, num_classes)
# Handle the primary net
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Linear(num_ftrs,num_classes)
elif model_name == "delf":
""" DELF using our pretrained Densenet169 features """
model_ft = init_delf(num_classes)
set_parameter_requires_grad(model_ft, 2)
elif model_name == "delf_TL":
""" DELF using our pretrained Densenet169 features, without FC layer """
model_ft = init_delf_TL()
set_parameter_requires_grad(model_ft, 2)
elif model_name == "our_densenet_TL":
""" Our pretrained Densenet169 without FC layer """
model_ft = init_densenet_TL()
set_parameter_requires_grad(model_ft, freeze_layers)
elif model_name == "resnet101gem":
model_ft = init_resnet101gem()
set_parameter_requires_grad(model_ft, 0)
elif model_name == "delf_pca":
model_ft = init_delf_pca()
set_parameter_requires_grad(model_ft, 1)
else:
print("Invalid model name, exiting...")
exit()
# model_ft = nn.Sequential(*list(model_ft.children()))
return model_ft
def set_parameter_requires_grad(model, freeze_layers):
child_counter = 0
for child in model.children():
if child_counter < freeze_layers:
for param in child.parameters():
param.requires_grad = False
child_counter += 1
class Pool(nn.Module):
def __init__(self, dim):
super(Pool,self).__init__()
self.dim = dim
def forward(self, x):
out = F.adaptive_avg_pool2d(x, (1, 1)).view(-1, self.dim)
return out
def init_densenet169():
model_ft = models.densenet169(pretrained=True)
num_ftrs = model_ft.classifier.in_features
model_ft.classifier = nn.Linear(num_ftrs, 2000)
features = list(model_ft.children())[:-1]
features.append(nn.ReLU(inplace=True))
features.append(Pool(1664))
features.append(list(model_ft.children())[-1])
model_ft = nn.Sequential(*features)
return model_ft
| kauterry/cs231n-retrieval | Kaushik/pretrain/model.py | model.py | py | 5,634 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "torchvision.models.resnet50",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "torchvision.models",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "torch.... |
36048548399 | import os
from uuid import uuid4
from datetime import timedelta
SECRET_KEY = os.urandom(32)
# Grabs the folder where the script runs.
basedir = os.path.abspath(os.path.dirname(__file__))
# create JWT secretkey
JWT_SECRET_KEY = 'm.f.ragab5890@gmail.comtafi_5890_TAFI'
JWT_ACCESS_TOKEN_EXPIRES = timedelta(hours=12)
# Enable debug mode.
DEBUG = True
# DATABASE URL
SQLALCHEMY_DATABASE_URI = 'postgresql://postgres:tafiTAFI@127.0.0.1:5432/'
SQLALCHEMY_TRACK_MODIFICATIONS = False | mfragab5890/shop-sales | backend/instance/config.py | config.py | py | 478 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.urandom",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "os.path.abspath",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_numb... |
35098607020 | import numpy as np
from numpy import genfromtxt
from functools import reduce
#--- przerabianie grzybow ---#
mushroom = genfromtxt('dane/mushroom.csv', delimiter=',' ,dtype = str)
classes = mushroom[:,0].copy()
last = mushroom[:,-1].copy()
training_set= mushroom
training_set[:,0]= last
training_set[:,-1]=classes
def f(x):
if x=='p':
return 1
if x =='e':
return 0
binary_classes = [f(x) for x in classes]
b = np.reshape(binary_classes,(8124, 1))
def toBinaryFeatures(features):
COLUMNS = features.shape[1]
v = [x + str(i % COLUMNS) for i, x in enumerate(features.flatten())]
l = features.tolist()
uv = list(set(v)) # unique values of all features
mv = {} # mapping to unique powers of 2
for i,x in enumerate(uv):
mv[x] = 2**i
as_numbers = [reduce((lambda x, y: x | y), [mv[x + str(i)] for i, x in enumerate(row)]) for row in l]
TO_BIN = "{0:0" + str(len(mv)) +"b}"
flattened_features = [[int(char) for char in TO_BIN.format(number)] for number in as_numbers]
return np.array(flattened_features)
a= toBinaryFeatures(training_set)
grzyby_zestaw_uczacy = np.append(a,b,axis=1)
#--- koniec przerabiania grzybow ---#
#--- pobieranie danych ---#
training_set = np.array([[0, 1, 0, 1, 0, 1],[0, 0, 0, 1, 0, 0],[0, 0, 0, 0, 1, 0],[1, 0, 1, 0, 1, 0],[0, 1, 1, 1, 0, 1],[0, 1, 0, 0, 1, 1],[1, 1, 1, 0, 0, 0],[1, 1, 1, 1, 0, 1],[0, 1, 1, 0, 1, 0],[1, 1, 0, 0, 0, 1],[1, 0, 0, 0, 1, 0]])
test1 = genfromtxt('dane/test1.csv', delimiter=',')
dane1 = genfromtxt('dane/dane1.csv', delimiter=',')
udacity_set1 = np.array(
[[1,1,1,0],
[1,0,1,0],
[0,1,0,1],
[1,0,0,1]])
#--- koniec pobierania danych ---#
#--- liczenie entropii ---#
def H(X):
if X.shape[0] == 0:
return 0 #nie wiem jaka jest entropia zrobic jak macierz jest pusta
fx = X[:,-1]
zeros = len([j for j in fx if j == 0])
ones = len([j for j in fx if j == 1])
p0 = zeros/ len(fx)
p1 = 1-p0
if p0 == 0 or p0 == 1:
return 0
return -p0*np.log2(p0)-p1*np.log2(p1)
def select_attribute(X,i,v):
list = X[np.where(X[:,i] == v)]
return list
def Q(X,i,v):
return select_attribute(X,i,v).shape[0]/X.shape[0]
def IG(X,i):
return H(X)-Q(X,i,0)*H(select_attribute(X,i,0))-Q(X,i,1)*H(select_attribute(X,i,1))
def ID3(S, recursion = 1, tree = {}):
result = np.array([])
rows = S.shape[0]
columns = S.shape[1]
decision_column = S[:,columns-1]
if np.all(decision_column==0):
tree[recursion] = int(0)
return
if np.all(decision_column==1):
tree[recursion] = int(1)
return
for i in range(columns-1):
ig = IG(S,i)
result = np.append(result, ig)
j = result.argmax()
leaf_label2 = str(int(j))
tree[recursion] = leaf_label2
S0 = select_attribute(S,j,0)
S1 = select_attribute(S,j,1)
ID3(S0,recursion*2,tree)
ID3(S1,recursion*2+1,tree)
return tree
def zwroc_wierzcholki(X):
drzewo_lista = list(X.keys())
wynik = {}
for i in range(0, len(drzewo_lista)):
index = drzewo_lista[i]
if X[index] != 1 and X[index] != 0:
wynik[index] = X[index]
return sorted(list(wynik.keys()))
def poddrzewa(drzewo):
wszystkie_drzewa = []
wierzcholki = zwroc_wierzcholki(drzewo)
drzewo_klucze = sorted(list(drzewo.keys()))
for wierzcholek in wierzcholki[1:]:
pojedyncze = {}
for j in range(0,drzewo_klucze.index(wierzcholek)):
wartosc = drzewo_klucze[j]
pojedyncze[wartosc]=drzewo[wartosc]
wszystkie_drzewa.append(pojedyncze)
wszystkie_drzewa.append(drzewo)
return wszystkie_drzewa
def zapis2(drzewo):
a = [None for i in range(max(drzewo.keys()))]
for i in drzewo.keys():
a[i-1]=drzewo[i]
return a
def drzewa_zapisy(drzewo):
return list(drzewo.items()), list(drzewo.keys()), list(drzewo.values()), zapis2(drzewo)
def poddrzewa2(drzewo):
wszystkie = []
chwilowe= []
drzewo = zapis2(drzewo)
for i in drzewo:
chwilowe.append(i)
if type(i) == int:
wszystkie.append(chwilowe.copy())
return wszystkie
def blad_drugiego_rodzaju(drzewa, training_set):
wynik = []
iloscbledow=0
for drzewo in drzewa:
test_set = training_set.copy()
for i in range(0,len(drzewo)-1):
if i%2 == 1: #negatywne
if type(drzewo[i]) == str:
atrybut = int(drzewo[i])
test_set = select_attribute(test_set,atrybut,1)
elif drzewo[i] == 1:
iloscbledow = len([j for j in test_set[:,-1] if j==1])
print(test_set)
print(iloscbledow)
elif drzewo[i] == 0:
iloscbledow = len([j for j in test_set[:,-1] if j==0])
print(test_set)
print(iloscbledow)
else: #pozytywne
if type(drzewo[i]) == str:
atrybut = int(drzewo[i])
test_set = select_attribute(test_set,atrybut,0)
elif drzewo[i] == 1:
iloscbledow = len([j for j in test_set[:,-1] if j==1])
print(test_set)
print(iloscbledow)
elif drzewo[i] == 0:
iloscbledow = len([j for j in test_set[:,-1] if j==0])
print(test_set)
print(iloscbledow)
wynik.append(iloscbledow)
return wynik
#def treetoarray(tree):
# a=0
# array = max(tree.keys())
# treearray = np.empty([2,len])
# for i in tree.keys():
# treearray[1]
# a = a+1
#
#
#
#def liczbalisci(drzewo):
# z = list(drzewo.values())
# f = z.count(1)
# g = z.count(0)
# return f+g
#
#def blad(drzewo,S):
#
#def alfai(T,Ti,S):
# return blad(Ti,S)-blad(T,S)/liczbalisci(T)-liczbalisci(Ti)
#
#def wybierzdrzewo(drzewa,S):
# wyniki= []
# glowne_drzewo = drzewa[len(drzewa)-1]
# for drzewo in drzewa:
# wyniki.append(alfai(glowne_drzewo,drzewo,S))
# indexdrzewa= wyniki.index(max(wyniki))
# return drzewa[indexdrzewa]
#
#
#from sklearn.feature_extraction import DictVectorizer
#dvec = DictVectorizer(sparse=False)
#
#X = dvec.fit_transform(mushroom.transpose().to_dict().values())
#
#data = pd.DataFrame({'0': ['u']})
#res = pd.get_dummies(mushroom)
#res.to_csv('output.csv')
#print(res) | iSeptio/archive_ML_mushroom_clasification_tree | lista1.py | lista1.py | py | 7,049 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.genfromtxt",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "functools.reduce",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"lin... |
24921295985 | #SETUP
#Import Modules
import discord
from discord.ext import commands
from discord.ext import tasks
import os
from dotenv import load_dotenv
import math
import datetime
import _pickle
import xlwings
#Load the Token and Create a Bot
load_dotenv();
TOKEN = os.getenv('ULTIMATEPC_TOKEN'); #load token from .env file
bot = commands.Bot(command_prefix = ';');
#Set Global Variables
authorizedList = ["BlockMaster", "TheGreatMurloc"]; #used for some command permissions \
guild = None; #set global variables (than they are defined in on_ready event so we I don't have to getting them in every event repeatedly over and over again)
biTimeRange = 30; #this variable represents from what time range of the image channel history will be the best images chosen
timeOffset = 2; #just for displaying time information purposes (CET is shifted by 2 hours from UTC at the moment)
#Set Server's Specific Naming
dataFileName = 'Ultimate PC - DataFile';
dataFileBackupName = 'Ultimate PC - DataFile.txt Backup';
excelFileName = 'Ultimate PC - Leaderboard';
dmHistoryFileName = 'Ultimate PC - DM History';
channelImagesName = '🎨u-wot-made';
channelGalleryName = '👀gallery';
channelPcName = '👀pc-gallery';
channelBiName = '🏆best-images';
emojiGeneralPointsName = 'PointGeneral';
emoji1pointName = 'MedalBronze';
emoji2pointName = 'MedalSilver';
emoji3pointName = 'MedalGold';
emojiMedalBronzeName = 'MedalBronze';
emojiMedalSilverName = 'MedalSilver';
emojiMedalGoldName = 'MedalGold';
roleCommand = 'Pretty High';
roleRank1Name = 'PhotoEdit Novice';
roleRank2Name = 'PhotoEdit Adept';
roleRank3Name = 'PhotoEdit Master';
roleRank4Name = 'PhotoEdit Wizard';
roleRank5Name = 'PhotoEdit Guru';
#Remove the 'help' Command To Be Set Later
bot.remove_command('help');
#GENERAL FUNCTIONALITY
#Tell When Bot is Connected to the Server and Set Global Variables
@bot.event
async def on_ready():
#Set the Global Variables
global guild; #set global variable 'guild'
guild = bot.guilds[0];
global authorizedList; #set global list 'authorizedList'
print(f"Members authorized to use some of the commands: {authorizedList}.");
tempAuthorizedList = authorizedList.copy();
authorizedList.clear();
for personName in tempAuthorizedList: #go trough the authorized list (containing just names) and convert it to a list of actual members (the member class)
authorizedList.append(discord.utils.get(guild.members, name = personName));
#Set Bot's Status
await bot.change_presence(status = discord.Status.online, activity = discord.Game("Roblox Deluxe Edition"));
#Print Time Until the Next Best Images Update
#Load Data from the DataFile
dataFile = open(f'{dataFileName}.txt', 'rb');
dataList = _pickle.load(dataFile);
lastUpdateDateBi = dataList[1][0];
#Print Time Info
global biTimeRange;
global timeOffset;
currentTime = datetime.datetime.utcnow();
print("Last BI update: " + str(lastUpdateDateBi));
nextUpdateTime = lastUpdateDateBi + datetime.timedelta(days = biTimeRange + 7) + datetime.timedelta(hours = timeOffset);
zeroBeforeMinute = "0" * (nextUpdateTime.minute <= 9);
print(f"Time until the next BI update: {(nextUpdateTime - currentTime).days} d {(nextUpdateTime - currentTime).seconds // 3600} h {((nextUpdateTime - currentTime).seconds % 3600) // 60} min [{nextUpdateTime.date()}, {nextUpdateTime.hour}:{zeroBeforeMinute}{nextUpdateTime.minute}].");
#print("\033[91m" + "[DataFile doesn't exist (on_message)]" + "\033[0m");
#Send a Message
print(f"{bot.user.name} connected to {guild.name}!\n___________________________________\n");
"""
channelImages = discord.utils.get(guild.channels, name = 'main-chat');
messageHistory = await channelImages.history(limit = None).flatten();
print(len(messageHistory));
"""
#Send a Message to a Certain User
@bot.command(name = "sendMessage")
@commands.has_role(roleCommand)
async def send_message_to_user(ctx, user: discord.Member, *, message):
#Send and Save the Message
global guild;
await user.create_dm();
await user.dm_channel.send(message);
messagePostDate = ctx.message.created_at + datetime.timedelta(hours = timeOffset); #get time when was the message sent
zeroBeforeMinute = "0" * (messagePostDate.minute <= 9);
messageTimeInfo = f'{messagePostDate.date()}, {messagePostDate.hour}:{zeroBeforeMinute}{messagePostDate.minute}';
print(f"[{messageTimeInfo}] to {user.name}: {message}");
textFile = open(f'{dmHistoryFileName}.txt', 'a'); #save the message sent to the user to a .txt file
textFile.write(f"[{messageTimeInfo}] to {user.name}: {message}\n");
textFile.close();
#Save a Message Someone Sent to the Bot's DM
@bot.event
async def on_message(message):
global guild;
if (message.channel not in guild.channels) and (message.author != bot.user):
messagePostDate = message.created_at + datetime.timedelta(hours = timeOffset); #get time when was the message sent
zeroBeforeMinute = "0" * (messagePostDate.minute <= 9);
messageTimeInfo = f'{messagePostDate.date()}, {messagePostDate.hour}:{zeroBeforeMinute}{messagePostDate.minute}';
print(f"[{messageTimeInfo}] from {message.author.name}: {message.content}");
textFile = open(f'{dmHistoryFileName}.txt', 'a'); #save the received message to a .txt file
textFile.write(f"[{messageTimeInfo}] from {message.author.name}: {message.content}\n");
textFile.close();
await bot.process_commands(message);
#Delete Certain Amount of Messages
@bot.command(name = "deleteMessage")
@commands.has_role(roleCommand)
async def delete_messages(ctx, amount = 2): #this will take as a argument amount of messages (if it's not specified it will be 2) and delete them
await ctx.channel.purge(limit = amount);
#Compute "Simple" Math
@bot.command(name = "quickMath")
@commands.has_role(roleCommand)
async def quick_math(ctx, *, toCompute):
await ctx.send(eval(toCompute));
#Send Bot's server-info Article Part
@bot.command(name = "sendText")
@commands.has_role(roleCommand)
async def send_mainInfo_text(ctx):
await ctx.send("**Ultimate PC Bot**"
"\nHi! My major function here is to write your images data to the database used to update server's channels. Beside that I can afford you some pretty cool commands. (Prefix for the commands is ';'. You can find it next to the 'L' letter on most of the keyboards.)"
"\n\n**;help**\n```This shows you a list of commands with a brief explanation what they do.```"
"\n**;info** @<member>\n```This command will show you an info embed with some data of the tagged user. (If no one is tagged, it will send your info embed.)```"
"\n;**images** (pc)<number>```Typing ';images' will show you a list of all your registered images. Then you can get the url link of a specific image by typing ';images <number>' (for any image) or ';images pc<number>' (for a PC image).```"
"\nThe bot is still under construction and it's not running 24/7 at the moment. (You can access it at least on Saturday from 16:00 to 19:00 UTC). I'm not an experienced programmer so in case you're really bored and want to improve the way the bot is working Python code is available here: https://github.com/BlockMaster320/Ultimate-PC-Discord-Bot");
#MEMBER DATABASE AND MESSAGE MANIPULATION
#Handle Command Exceptions
@bot.event
async def on_command_error(ctx, error):
errorCommand = ctx.command;
if isinstance(error, commands.errors.CheckFailure):
await ctx.send("You don't have permission to use this command. :( (Perhaps git gut?)");
if isinstance(error, commands.errors.CommandNotFound):
await ctx.send("Entered command doesn't exist. (But there are some which actually does!)");
if isinstance(error, commands.errors.BadArgument):
if (errorCommand == show_member_info):
await ctx.send("You entered an invalid user as the argument. (Next time try to look for a person from *this* universe.)");
if (errorCommand == show_member_images):
await ctx.send("You entered an invalid image number as the argument. (Check your num lock, just in case. ;))");
else:
raise error;
@bot.command('help')
async def show_help(ctx):
commandPrefix = bot.command_prefix;
helpEmbed = discord.Embed(title = "Ultimate PC Bot.py | Commands", colour = discord.Colour.blue());
helpEmbed.add_field(name = f'{commandPrefix}help', value = "Shows this embed.", inline = False);
helpEmbed.add_field(name = f'{commandPrefix}info', value = "Shows your info embed.", inline = False);
helpEmbed.add_field(name = f'{commandPrefix}images', value = "Gets links to your images.", inline = False);
helpEmbed.set_thumbnail(url = bot.user.avatar_url);
await ctx.send(embed = helpEmbed);
#Class Representing a Member
class EditMember():
def __init__(self, memberID, memberName, memberTag):
self.memberID = memberID;
self.memberName = memberName; #this attribute will probably change eventually, it's just for case the member will leave the server and its name is needed (it's not possible to get a name of a member who isn't on the server (actually you can do it using member's message or discord.Member object but a message can be deleted and the discord.Member class cannot be pickled into the dataFile))
self.memberTag = memberTag;
self.imageNormalList = [];
self.imagePcDict = {};
self.pointsGeneral = 0;
self.pointsPc = 0;
def get_editMember_info(self):
imageNormalNumber = len(self.imageNormalList);
imagePcNumber = 0;
for imageList in self.imagePcDict.values():
imagePcNumber += len(imageList);
infoList = [imageNormalNumber + imagePcNumber, imagePcNumber, self.pointsGeneral, self.pointsPc];
return infoList;
def print_editMember_data(self, memberName, memberID, imageNormalList, imagePcDict, pointsGeneral, pointsPc):
print(f"memberName: {self.memberName}" * memberName,
f"\nmemberID: {self.memberID}" * memberID,
f"\nimageNormalList: {self.imageNormalList}" * imageNormalList,
f"\nimagePcDict: {self.imagePcDict}" * imagePcDict,
f"\npointsGeneral: {self.pointsGeneral}" * pointsGeneral,
f"\npointsPc: {self.pointsPc}" * pointsPc);
#Prepare the DataFile
def create_dataFile():
dataFile = open(f'{dataFileName}.txt', 'wb');
lastUpdateDate = datetime.datetime(2019, 3, 29, 16, 57, 50, 0);
lastUpdateDateBi = [datetime.datetime(2020, 5, 29, 17, 0, 0, 0), 0];
memberDict = {};
imageNormalList = [];
imagePcDict = {};
embedGalleryDict = {};
embedPcDict = {};
embedBiDict = {};
dataList = [lastUpdateDate, lastUpdateDateBi, memberDict, imageNormalList, imagePcDict, embedGalleryDict, embedPcDict, embedBiDict];
_pickle.dump(dataList, dataFile);
#Create an Embed for Embed Channel
def create_embed(message, attachment, messagePoints, emojiGeneralPoints, title = None):
messageAuthor = message.author;
messagePostDate = message.created_at;
imageEmbed = discord.Embed(title = title, description = f"\n{messagePoints} \u200b {emojiGeneralPoints} \u200b \u200b | \u200b \u200b [jump to the message]({message.jump_url})\n\n{message.content}");
imageEmbed.set_author(name = f"{messageAuthor.name}", icon_url = messageAuthor.avatar_url);
imageEmbed.set_image(url = message.attachments[attachment].url);
zeroBeforeMinute = '0' * (messagePostDate.minute <= 9);
imageEmbed.set_footer(text = f"{messagePostDate.date()} | {messagePostDate.hour}:{zeroBeforeMinute}{messagePostDate.minute} UTC");
return imageEmbed;
#Get Points by Finding Specific Reactions
async def get_generalPoints(message):
messageReactions = message.reactions;
messagePoints = 0;
for reaction in messageReactions:
if (type(reaction.emoji) == str): #UNICODE emojis are strings so we need to don't let them enter the code which is working with discord.Emoji class only (custom emojis on the server)
continue;
if (reaction.emoji.name == emoji1pointName):
async for member in reaction.users(): #don't count reactions from the message's author himself
if (member != message.author):
messagePoints += 1;
elif (reaction.emoji.name == emoji2pointName):
async for member in reaction.users():
if (member.id != message.author.id):
messagePoints += 2;
elif (reaction.emoji.name == emoji3pointName):
async for member in reaction.users():
if (member != message.author):
messagePoints += 3;
return messagePoints;
"""
@bot.command(name = 'reactionTest')
async def reaction_test(ctx):
global guild;
channelImages = discord.utils.get(guild.channels, name = 'u-wot-made-test-2');
messageHistory = await channelImages.history(limit = None).flatten();
for message in messageHistory:
generalPoints = await get_generalPoints(message);
print(generalPoints);
"""
#Go Trough All the Images and Register Them to the DataFile
@bot.command(name = 'registerImages')
@commands.has_role(roleCommand)
async def register_images(ctx, createDataFile = False):
#Get the Channels
global guild;
global biTimeRange;
global timeOffset;
global emojiGeneralPointsName;
channelImages = discord.utils.get(guild.channels, name = channelImagesName);
channelGallery = discord.utils.get(guild.channels, name = channelGalleryName);
channelPc = discord.utils.get(guild.channels, name = channelPcName);
emojiGeneralPoints = discord.utils.get(guild.emojis, name = emojiGeneralPointsName)
#Load Data from the DataFile
if (createDataFile):
create_dataFile();
dataFile = open(f'{dataFileName}.txt', 'rb');
dataList = _pickle.load(dataFile);
lastUpdateDate = dataList[0];
lastUpdateDateBi = dataList[1];
memberDict = dataList[2];
imageNormalList = dataList[3];
imagePcDict = dataList[4];
embedGalleryDict = dataList[5];
embedPcDict = dataList[6];
embedBiDict = dataList[7];
#Register the Images
messageHistory = await channelImages.history(limit = None, after = lastUpdateDate, oldest_first = True).flatten();
print(f"new messages in the channel: {len(messageHistory)}");
for message in messageHistory:
if (message.attachments != []):
#Get Message Information
messageID = message.id;
messageAuthor = message.author;
messagePostDate = message.created_at + datetime.timedelta(hours = timeOffset);
zeroBeforeMinute = "0" * (messagePostDate.minute <= 9);
try:
pcNumber = input(f"{messageAuthor.name} ({messagePostDate.hour}:{zeroBeforeMinute}{messagePostDate.minute}, {messagePostDate.day}/{messagePostDate.month}/{messagePostDate.year}): ");
except: #if there's any character that cannot be encoded in the member's name
pcNumber = input(f"[name not readable] ({messagePostDate.hour}:{zeroBeforeMinute}{messagePostDate.minute}, {messagePostDate.day}/{messagePostDate.month}/{messagePostDate.year}): ");
if (pcNumber == 'x'): #don't save the image if 'x' is entered as the pcNumber
continue;
elif (pcNumber != '') and (pcNumber[0] not in '123456789'): #if by a mistake something else than 'x' or a number is entered, save the image as a normal image
pcNumber = '';
#Save the Image
if (messageAuthor.id not in memberDict.keys()): #search for the MemberEdit object or create a new one
memberDict[messageAuthor.id] = EditMember(messageAuthor.id, messageAuthor.name.encode('utf-8'), messageAuthor.discriminator);
editMember = memberDict[messageAuthor.id];
for attachmentCount in range(len(message.attachments)): #in case that there are multiple attachments in the message
if (pcNumber != ''): #add PC image to the MemberEdit object
pcNumber = int(pcNumber);
if (pcNumber not in editMember.imagePcDict.keys()):
editMember.imagePcDict[pcNumber] = [];
editMember.imagePcDict[pcNumber].append(messageID);
editMember.pointsPc += 1 - 0.5 * (len(editMember.imagePcDict[pcNumber]) > 1);
if (pcNumber not in imagePcDict.keys()): #add PC image to the general PC image dictionary
imagePcDict[pcNumber] = [];
imagePcDict[pcNumber].append(messageID);
else:
editMember.imageNormalList.append(messageID); #add normal image to the MemberEdit object and general normal image list
imageNormalList.append(messageID);
#Add Image's General Points According to Its Reactions
messagePoints = await get_generalPoints(message);
editMember.pointsGeneral += messagePoints;
#Send the Image to Channels
for attachmentCount in range(len(message.attachments)): #in case that there are multiple attachments in the message
galleryEmbed = create_embed(message, attachmentCount, messagePoints, emojiGeneralPoints); #send the image to the Gallery Channel
galleryEmbedMessage = await channelGallery.send(embed = galleryEmbed);
embedGalleryDict[galleryEmbedMessage.id] = [messageID, messagePoints];
if (pcNumber != ''): #send the image to the PhotoChallenges Channel
pcEmbed = create_embed(message, attachmentCount, messagePoints, emojiGeneralPoints, f"PhotoChallenge {pcNumber}");
pcEmbedMessage = await channelPc.send(embed = pcEmbed);
embedPcDict[pcEmbedMessage.id] = [messageID, messagePoints];
#Save Data to the DataFile
dataFile = open(f'{dataFileName}.txt', 'wb');
lastUpdateDate = datetime.datetime.utcnow();
dataList.clear();
dataList = [lastUpdateDate, lastUpdateDateBi, memberDict, imageNormalList, imagePcDict, embedGalleryDict, embedPcDict, embedBiDict];
_pickle.dump(dataList, dataFile);
dataFile.close();
#Make a Backup Save File
dataFileBackup = open(f'{dataFileBackupName} [{lastUpdateDate.date()}].txt', 'bw'); #create a backup copy of the dataFile just in case its content gets lost
_pickle.dump(dataList, dataFileBackup);
dataFileBackup.close();
print("\033[4m" + "\nImages have been registered." + "\033[0m");
#Post Last Month's Best Images from the Image Channel
@bot.command(name = 'updateBestImages')
@commands.has_role(roleCommand)
async def update_bestImages(ctx):
#Get the Channels
global guild;
global biTimeRange;
global emojiMedalBronzeName;
global emojiMedalSilverName;
global emojiMedalGoldName;
channelImages = discord.utils.get(guild.channels, name = channelImagesName);
channelBi = discord.utils.get(guild.channels, name = channelBiName);
emojiGeneralPoints = discord.utils.get(guild.emojis, name = emojiGeneralPointsName);
emojiMedalBronze = discord.utils.get(guild.emojis, name = emojiMedalBronzeName);
emojiMedalSilver = discord.utils.get(guild.emojis, name = emojiMedalSilverName);
emojiMedalGold = discord.utils.get(guild.emojis, name = emojiMedalGoldName);
#Load Data from the DataFile
dataFile = open(f'{dataFileName}.txt', 'rb');
dataList = _pickle.load(dataFile);
lastUpdateDateBi = dataList[1][0];
biMonth = dataList[1][1];
embedBiDict = dataList[7];
#Create a List of Messages and Their General Points
biMonth += 1;
timeRange = datetime.timedelta(days = biTimeRange);
messageHistory = await channelImages.history(limit = None, before = lastUpdateDateBi + timeRange, after = lastUpdateDateBi, oldest_first = True).flatten();
messageList = [];
messagePointsTotal = 0;
for message in messageHistory:
if (message.attachments != []):
messagePoints = await get_generalPoints(message);
messagePointsTotal += messagePoints;
messageList.append([message, messagePoints]);
#Get the Best Images and Send Them to BestImages Channel #send an announcement
biMonthEmbed = discord.Embed(title = f"BEST IMAGES OF THE MONTH {biMonth}", description = None, colour = discord.Colour.blue());
biMonthEmbed.add_field(name = "Images Posted", value = f"{len(messageList)}");
biMonthEmbed.add_field(name = "Total Points Earned", value = f"{messagePointsTotal}");
biMonthEmbed.set_footer(text = f"\u200b \n from {lastUpdateDateBi.date()} to {(lastUpdateDateBi + timeRange).date()}");
await channelBi.send(embed = biMonthEmbed);
messageList.sort(key = lambda x: x[1], reverse = True); #sort the list by general points (second element of each sublist)
lastMessagePoints = 0;
messageCount = 1;
for message in messageList: #get top 5 (or more) images from the list and send them to the channelBi
if (messageCount <= 10) or (lastMessagePoints == message[1]):
if (messageCount == 1):
trophy = emojiMedalGold;
elif (messageCount == 2):
trophy = emojiMedalSilver;
elif (messageCount == 3):
trophy = emojiMedalBronze;
else:
trophy = "";
biEmbed = create_embed(message[0], 0, message[1], emojiGeneralPoints, f"#{messageCount} Image {trophy}");
biEmbedMessage = await channelBi.send(embed = biEmbed);
embedBiDict[biEmbedMessage.id] = [message[0].id, message[1]];
lastMessagePoints = message[1];
messageCount += 1;
#Save Data to the DataFile
dataFile = open(f'{dataFileName}.txt', 'wb');
lastUpdateDateBi = lastUpdateDateBi + timeRange;
dataList[1] = [lastUpdateDateBi, biMonth];
dataList[7] = embedBiDict;
_pickle.dump(dataList, dataFile);
dataFile.close();
print("\033[4m" + "\nBest images have been updated." + "\033[0m");
#Update Embeds in a Certain Channel
async def update_embedChannel(embedChannel, imageChannel, imageNumber, memberDict, embedDict, embedDictType, dataList, updateAuthor, emojiGeneralPoints):
#Get Message Information
messageHistoryGallery = await embedChannel.history(limit = imageNumber).flatten();
for embedMessage in messageHistoryGallery:
if (embedMessage.id in embedDict.keys()): #the check is there just in case the embed message isn't in the embedDict (but it should be there (like it really should))
imageMessageID = embedDict[embedMessage.id][0]; #get message information from the EmbedDict
imageMessageOldPoints = embedDict[embedMessage.id][1];
try:
imageMessage = await imageChannel.fetch_message(imageMessageID); #get the Message from its ID
except: #continue if the message doesn't exist
print("\033[91m" + f"\n[{embedDictType} message not found (update_embeds)]" + "\033[0m");
continue;
else:
continue;
#Update the Embed of the Message
imageMessagePoints = await get_generalPoints(imageMessage);
embedEdit = embedMessage.embeds[0];
if (updateAuthor):
if (embedDictType == "gallery"):
memberDict[imageMessage.author.id].pointsGeneral += imageMessagePoints - imageMessageOldPoints; #add points to the memberEdit object (it has to happen only once, that's why there's the "gallery" check)
embedDict[embedMessage.id][1] = imageMessagePoints; #update the points in the embedDict
#change points and update author info in the embed
embedEdit.description = f"\n{imageMessagePoints} \u200b {emojiGeneralPoints} \u200b \u200b | \u200b \u200b [jump to the message]({imageMessage.jump_url})\n\n{imageMessage.content}";
embedEdit.set_author(name = imageMessage.author.name, icon_url = imageMessage.author.avatar_url);
await embedMessage.edit(embed = embedEdit);
dataList[2] = memberDict;
if (embedDictType == 'gallery'):
dataList[5] = embedDict;
if (embedDictType == 'pc'):
dataList[6] = embedDict;
if (embedDictType == 'bi'):
dataList[7] = embedDict;
return dataList;
#Update the Embeds Data (general points)
@bot.command(name = 'updateEmbeds')
@commands.has_role(roleCommand)
async def update_embeds(ctx, imageNumber = 200, updateAuthor = True):
#Get the Channels
global guild;
channelImages = discord.utils.get(guild.channels, name = channelImagesName);
channelGallery = discord.utils.get(guild.channels, name = channelGalleryName);
channelPc = discord.utils.get(guild.channels, name = channelPcName);
channelBi = discord.utils.get(guild.channels, name = channelBiName);
emojiGeneralPoints = discord.utils.get(guild.emojis, name = emojiGeneralPointsName);
#Load Data from the DataFile
dataFile = open(f'{dataFileName}.txt', 'rb');
dataList = _pickle.load(dataFile);
memberDict = dataList[2];
embedGalleryDict = dataList[5];
embedPcDict = dataList[6];
embedBiDict = dataList[7];
#Update the Embeds
#dataList = await update_embedChannel(channelGallery, channelImages, imageNumber, memberDict, embedGalleryDict, "gallery", dataList, updateAuthor, emojiGeneralPoints);
#dataList = await update_embedChannel(channelPc, channelImages, imageNumber, memberDict, embedPcDict, "pc", dataList, updateAuthor, emojiGeneralPoints);
dataList = await update_embedChannel(channelBi, channelImages, imageNumber, memberDict, embedBiDict, "bi", dataList, updateAuthor, emojiGeneralPoints);
#Save Data to the DataFile
dataFile = open(f'{dataFileName}.txt', 'wb');
_pickle.dump(dataList, dataFile);
dataFile.close();
print("\033[4m" + "\nEmbeds have been updated." + "\033[0m");
#Update Rank Roles of All the EditMembers
@bot.command(name = 'updateRoles')
@commands.has_role(roleCommand)
async def update_roles(ctx):
#Get the Rank Roles
global guild;
global roleRank1Name;
global roleRank2Name;
global roleRank3Name;
global roleRank4Name;
global roleRank5Name;
roleRank1 = discord.utils.get(guild.roles, name = roleRank1Name);
roleRank2 = discord.utils.get(guild.roles, name = roleRank2Name);
roleRank3 = discord.utils.get(guild.roles, name = roleRank3Name);
roleRank4 = discord.utils.get(guild.roles, name = roleRank4Name);
roleRank5 = discord.utils.get(guild.roles, name = roleRank5Name);
#Load Data from the DataFile
dataFile = open(f'{dataFileName}.txt', 'br');
dataList = _pickle.load(dataFile);
memberDict = dataList[2];
dataFile.close();
#Update the Rank Roles
for editMember in memberDict.values():
print(editMember.memberName);
pointsGeneral = editMember.pointsGeneral;
member = discord.utils.get(guild.members, id = editMember.memberID);
if (member is not None): #add a role according to EditMember's general points and delete all other roles
if (pointsGeneral <= 10):
await member.add_roles(roleRank1);
await member.remove_roles(roleRank2, roleRank3, roleRank4, roleRank5);
elif (pointsGeneral > 10) and (pointsGeneral <= 25):
await member.add_roles(roleRank2);
await member.remove_roles(roleRank1, roleRank3, roleRank4, roleRank5);
elif (pointsGeneral > 25) and (pointsGeneral <= 50):
await member.add_roles(roleRank3);
await member.remove_roles(roleRank1, roleRank2, roleRank4, roleRank5);
elif (pointsGeneral > 50) and (pointsGeneral <= 100):
await member.add_roles(roleRank4);
await member.remove_roles(roleRank1, roleRank2, roleRank3, roleRank5);
elif (pointsGeneral > 100):
await member.add_roles(roleRank4);
await member.remove_roles(roleRank1, roleRank2, roleRank3, roleRank4);
print("\033[4m" + "\nRank roles has been updated." + "\033[0m");
#Update the Excel File Data
@bot.command(name = 'updateExcel')
@commands.has_role(roleCommand)
async def update_excel(ctx, lastPcNumber: int):
#Load Data from the DataFile
dataFile = open(f'{dataFileName}.txt', 'br');
dataList = _pickle.load(dataFile);
memberDict = dataList[2];
#Load the Excel File
excelFile = xlwings.Book(f'{excelFileName}.xlsx');
excelSheet = excelFile.sheets[0];
#Create Excel Table With Member Data
global guild
memberList = []; #list containing EditMember's name, tag and pointsGeneral
postedNormalImagesList = []; #list containing EditMember's total images posted
pcList = []; #list containing EditMember's PhotoChallenges
memberCount = 0;
for editMember in memberDict.values(): #write data to the lists
memberCount += 1;
member = discord.utils.get(guild.members, id = editMember.memberID);
if (member is not None): #in case the member left the server (or was kicked or banned) we get its name from editMember object
memberName = member.name;
memberTag = member.discriminator
else:
memberName = editMember.memberName.decode('utf-8');
memberTag = editMember.memberTag;
memberList.append([memberCount, memberName, f"#{memberTag}", editMember.pointsGeneral]);
postedNormalImagesList.append([len(editMember.imageNormalList)]);
pcTempList = [None] * lastPcNumber;
for pcNumber, pcImageList in editMember.imagePcDict.items():
pcTempList[pcNumber - 1] = len(pcImageList);
pcList.append(pcTempList);
#Write the Data to the Excel File
excelSheet.cells(2, 'A').value = memberList;
excelSheet.cells(2, 'G').value = postedNormalImagesList;
excelSheet.cells(2, 'J').value = pcList;
print("\033[4m" + "\nThe Excel file has been updated." + "\033[0m");
#Move an EditMember's Image to a Different Location in the Object #USE ONLY IF THE EMBEDS ARE UPDATED! (otherwise it could be subtracting wrong values from the EditMember)
@bot.command(name = 'editImage')
@commands.has_role(roleCommand)
async def edit_image(ctx, messageID: int, action):
#Get the Channels
global guild;
channelImages = discord.utils.get(guild.channels, name = channelImagesName);
channelGallery = discord.utils.get(guild.channels, name = channelGalleryName);
channelPc = discord.utils.get(guild.channels, name = channelPcName);
emojiGeneralPoints = discord.utils.get(guild.emojis, name = emojiGeneralPointsName);
#Load Data from the DataFile
dataFile = open(f'{dataFileName}.txt', 'br');
dataList = _pickle.load(dataFile);
memberDict = dataList[2];
embedGalleryDict = dataList[5];
embedPcDict = dataList[6];
#Get EditMember From the Message ID
imageMessage = None;
imageMessagePoints = None;
try: #get imageMessage and its points
imageMessage = await channelImages.fetch_message(messageID);
imageMessagePoints = await get_generalPoints(imageMessage);
except:
print("\033[91m" + "\n[message not found (edit_message)]" + "\033[0m");
editMember = memberDict[imageMessage.author.id];
editMember.print_editMember_data(False, False, True, True, True, True);
#Get the Image's Embeds
embedGalleryMessage = None;
embedPcMessage = None;
for embedMessageID, imageMessageList in embedGalleryDict.items(): #get gallery embedMessage
if (messageID == imageMessageList[0]):
try:
embedGalleryMessage = await channelGallery.fetch_message(embedMessageID);
except:
print("\033[91m" + "\n[message not found (edit_message)]" + "\033[0m");
break;
for embedMessageID, imageMessageList in embedPcDict.items(): #get PC embedMessage
if (messageID == imageMessageList[0]):
try:
embedPcMessage = await channelPc.fetch_message(embedMessageID);
except:
print("\033[91m" + "\n[message not found (edit_message)]" + "\033[0m");
break;
#Remove the Image from the EditMember
subtractGeneralPoints = False; #this variable will ensure that if there's no imageMessage with the ID it will not subtract general points from the EditMember
if (messageID in editMember.imageNormalList): #look for the image in EditMember's imageNormalList
editMember.imageNormalList.remove(messageID);
subtractGeneralPoints = True;
else:
pcKeyToDelete = None;
for pcNumber, pcList in editMember.imagePcDict.items(): #look for the image in EditMember's imagePcDict
if (messageID in pcList):
pcList.remove(messageID);
subtractGeneralPoints = True;
if (len(pcList) == 0):
editMember.pointsPc -= 1;
pcKeyToDelete = pcNumber;
else:
editMember.pointsPc -= 0.5;
break;
if (pcKeyToDelete is not None): #delete imagePcDict's pcNumber key if there's no PC image remaining
editMember.imagePcDict.pop(pcKeyToDelete);
if (subtractGeneralPoints):
messagePoints = await get_generalPoints(imageMessage);
editMember.pointsGeneral -= messagePoints;
#Add the Image to a New Location in the EditMember
if (action[0] in '123456789'): #add the image to EditMember's imagePcDict
pcNumber = int(action);
if (pcNumber not in editMember.imagePcDict.keys()):
editMember.imagePcDict[pcNumber] = [];
editMember.imagePcDict[pcNumber].append(messageID);
messagePoints = await get_generalPoints(imageMessage); #add general and PC points according to the image
editMember.pointsGeneral += messagePoints;
editMember.pointsPc += 1 - 0.5 * (len(editMember.imagePcDict[pcNumber]) > 1);
if (embedPcMessage is not None): #when moving a PC image to a different PC (just update the existing PC embed)
embedEdit = embedPcMessage.embeds[0];
embedEdit.title = f"PhotoChallenge {pcNumber}";
await embedPcMessage.edit(embed = embedEdit);
else: #when moving a normal image to the imagePcDict (send a new PC embed)
pcEmbed = create_embed(imageMessage, 0, imageMessagePoints, emojiGeneralPoints, f"PhotoChallenge {pcNumber}");
embedPcMessageNew = await channelPc.send(embed = pcEmbed);
embedPcDict[embedPcMessageNew.id] = [messageID, imageMessagePoints];
if (embedGalleryMessage is None): #when adding the image to a new EditMember object (send a new gallery embed and PC embed)
galleryEmbed = create_embed(imageMessage, 0, imageMessagePoints, emojiGeneralPoints);
embedGalleryMessageNew = await channelGallery.send(embed = galleryEmbed);
embedGalleryDict[embedGalleryMessageNew.id] = [messageID, imageMessagePoints];
elif (action == 'n'): #add the image to EditMember's imageNormalDict
editMember.imageNormalList.append(messageID);
messagePoints = await get_generalPoints(imageMessage); #add general points according to the image
editMember.pointsGeneral += messagePoints;
if (embedPcMessage is not None): #when moving a PC image to the imageNormalList (delete image's PC embed)
embedPcDict.pop(embedPcMessage.id);
await embedPcMessage.delete();
elif (embedGalleryMessage is None): #when adding a normal image to a new EditMember object (send a new gallery embed) (the elif is there just for the case we're trying to move a normal image to the imageNormalList for some reason - it has no effect then)
galleryEmbed = create_embed(imageMessage, 0, imageMessagePoints, emojiGeneralPoints);
embedGalleryMessageNew = await channelGallery.send(embed = galleryEmbed);
embedGalleryDict[embedGalleryMessageNew.id] = [messageID, imageMessagePoints];
elif (action == 'x'): #remove the image from the gallery
if (embedGalleryMessage is not None): #delete embed message id key (the embed was deleted)
embedGalleryDict.pop(embedGalleryMessage.id);
await embedGalleryMessage.delete();
if (embedPcMessage is not None):
embedPcDict.pop(embedPcMessage.id);
await embedPcMessage.delete();
#Save Data to the DataFile
editMember.print_editMember_data(False, False, True, True, True, True);
dataFile = open(f'{dataFileName}.txt', 'bw');
dataList[2] = memberDict;
dataList[5] = embedGalleryDict;
dataList[6] = embedPcDict;
_pickle.dump(dataList, dataFile);
dataFile.close();
print("\033[4m" + "\nThe image has been relocated." + "\033[0m");
#Give or Remove a Role from All the Members
@bot.command(name = 'manageRole')
@commands.has_role(roleCommand)
async def manage_role(ctx, action = 'a', editMembersOnly: bool = False): #possible action variable values are 'a' (add) and 'r' (remove)
#Set Variables
global guild;
roleName = "Microsoft Paint";
role = discord.utils.get(guild.roles, name = roleName);
#Add or Remove the Role from all the Members
if (editMembersOnly):
#Load Data from the DataFile
dataFile = open(f'{dataFileName}.txt', 'br');
dataList = _pickle.load(dataFile);
memberDict = dataList[2];
dataFile.close();
#Add or Remove the Role
for editMember in memberDict.values():
print(editMember.memberName);
member = discord.utils.get(guild.members, id = editMember.memberID);
if (member is not None):
if (action == 'a'):
await member.add_roles(role);
elif (action == 'r'):
await member.remove_roles(role);
#Add or Remove the Role from EditMembers Only
else:
#Add or Remove the Role
for member in guild.members:
print(member.name);
if (member is not None):
if (action == 'a'):
await member.add_roles(role);
elif (action == 'r'):
await member.remove_roles(role);
print("\033[4m" + "\nThe role has been updated." + "\033[0m");
#MEMBER COMMANDS
#Send an Info Embed of a Specific Member
@bot.command(name = 'info')
async def show_member_info(ctx, member: discord.Member = None, printData = False):
#Load Data from the DataFile
dataFile = open(f'{dataFileName}.txt', 'rb');
dataList = _pickle.load(dataFile);
memberDict = dataList[2];
if (member is None):
member = ctx.author;
editMember = memberDict[member.id];
#Get Data from a EditMember Function
infoList = editMember.get_editMember_info();
infoAllImageNumber = infoList[0];
infoPcImageNumber = infoList[1];
infoGeneralPoints = infoList[2];
infoPcPoints = infoList[3];
#Send a Member Info Embed
if (not printData):
infoEmbed = discord.Embed(title = f"{member.name} | Member Info", colour = discord.Colour.blue());
infoEmbed.set_thumbnail(url = member.avatar_url);
infoEmbed.add_field(name = "Total Images Sent", value = infoAllImageNumber, inline = True);
infoEmbed.add_field(name = "PC Images Sent", value = infoPcImageNumber, inline = True);
infoEmbed.add_field(name = "\u200b", value = "\u200b", inline=True); #these blank lines has to be there to create 2x2 embed field grid (it's probably not possible any other way)
infoEmbed.add_field(name = "General Points", value = infoGeneralPoints, inline = True);
infoEmbed.add_field(name = "PC Points", value = infoPcPoints, inline = True);
infoEmbed.add_field(name = "\u200b", value = "\u200b", inline=True);
infoEmbed.set_footer(text = f"\u200b \njoined {member.joined_at.date()} | {member.display_name}");
await ctx.send(embed=infoEmbed);
#Print EditMember Data to the Console
if (printData):
editMember.print_editMember_data(False, False, True, True, True, True);
#Send an Embed Containg Member's Images
@bot.command(name = 'images')
async def show_member_images(ctx, imageNumber = None, member: discord.Member = None):
#Get the Channel
global guild;
channelImages = discord.utils.get(guild.channels, name = channelImagesName);
#Load Data from the DataFile
dataFile = open(f'{dataFileName}.txt', 'rb');
dataList = _pickle.load(dataFile);
dataFile.close();
memberDict = dataList[2];
if (member is None):
member = ctx.author;
editMember = memberDict[member.id];
#Setup Strings For the Image Links and allImage List
allImageFieldName = ''; #setup strings containing the images links
allImageString = '';
pcImageString = '';
sendEmbed = True;
normalImageList = editMember.imageNormalList; #setup a list containing all the images
pcImageList = list(editMember.imagePcDict.values());
allImageList = normalImageList;
for pcList in pcImageList: #the list naming is little bit confusing here, pcImageList is imagePcDict' values converted to a list, these values are also lists (named pcList) containing the PC images themselves (pcImage)
for pcImage in pcList:
allImageList.append(pcImage);
allImageList.sort();
#Create String Containing Specific Images
if (imageNumber is not None):
#Create String Containing Specific PC Images
if (imageNumber.startswith('pc')):
pcNumber = int(imageNumber[2:]);
if (pcNumber not in editMember.imagePcDict.keys()): #check if the EditMember has PC image with the pcNumber entered
print("\033[91m" + "\n[message out of range (show_member_images)]" + "\033[0m");
await ctx.send(f"You haven't posted any images for PhotoChallenge {pcNumber} yet.");
sendEmbed = False;
else:
for pcImage in editMember.imagePcDict[pcNumber]:
try: #change message's embed description if the message doesn't exist
imageMessage = await channelImages.fetch_message(pcImage);
pcImageString += f"[PC {pcNumber}]({imageMessage.jump_url})\n"
except:
pcImageString += f"PC {pcNumber}\n"
print("\033[91m" + "\n[message not found (show_member_images)]" + "\033[0m");
#Create String Containing a Specific Image
elif (imageNumber[0] in '123456789'):
allImageFieldName = "Image"
if (int(imageNumber) > len(editMember.imageNormalList)): #check if the EditMember has that many images as the imageNumber entered
print("\033[91m" + "\n[message out of range (show_member_images)]" + "\033[0m");
sendEmbed = False;
await ctx.send(f"You haven't posed that many images.");
else:
image = allImageList[int(imageNumber) - 1];
try:
imageMessage = await channelImages.fetch_message(image); #change message's embed description if the message doesn't exist
allImageString += f"[image {imageNumber}]({imageMessage.jump_url})"
except:
allImageString += f"image {imageNumber}\n"
print("\033[91m" + "\n[message not found (show_member_images)]" + "\033[0m");
else: #if something else than a number or 'pc'number is entered raise an command error
raise commands.errors.BadArgument;
#Create String Containing All the Images and PC Images
else:
#Create String Containing All the Images
allImageFieldName = "All Images";
imageCount = 1;
for image in allImageList:
try:
imageMessage = await channelImages.fetch_message(image);
allImageString += f"image {imageCount}\n";
except:
allImageString += f"*[image {imageCount}]*\n"
print("\033[91m" + "\n[message not found (show_member_images)]" + "\033[0m");
imageCount += 1;
#Create String Containing PC Images
pcImageDict = editMember.imagePcDict;
pcNumberList = sorted(pcImageDict.keys()); #due to the fact that a dictionary can't be easily sorted we firstly create a list of sorted keys (PC numbers) a then go trough the list and search for the images by the PC number in pcImagesDict
for pcNumber in pcNumberList:
for pcImage in pcImageDict[pcNumber]:
try:
imageMessage = await channelImages.fetch_message(pcImage);
pcImageString += f"PC {pcNumber}\n";
except:
pcImageString += f"*[PC {pcNumber}]*\n"
print("\033[91m" + "\n[message not found (show_member_images)]" + "\033[0m");
#Send an Embed
if (sendEmbed):
imagesEmbed = discord.Embed(title = f"{member.name} | Images", colour = discord.Colour.blue());
imagesEmbed.set_thumbnail(url = member.avatar_url);
if (allImageString != ''):
imagesEmbed.add_field(name = allImageFieldName, value = allImageString);
if (pcImageString != ''):
imagesEmbed.add_field(name = "PC Images", value = pcImageString);
await ctx.send(embed = imagesEmbed);
#Run The Bot
bot.run(TOKEN);
| BlockMaster320/Ultimate-PC-Discord-Bot | Ultimate PC Bot.py | Ultimate PC Bot.py | py | 51,121 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "dotenv.load_dotenv",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.Bot",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "discord.ext.com... |
39916650041 | import scrapy
class QuotesSpider(scrapy.Spider):
name="quotes"
start_urls=[
'http://quotes.toscrape.com/page/1/'
]
def parse(self,response):
for quote in response.xpath('//div[@class="quote"]'):
yield {
'page':response.url,
'text':quote.xpath('./span/text()').get(),
'author':quote.xpath('./span/small/text()').get(),
'tags':quote.xpath('./div[@class="tags"]//a/text()').getall(),
}
next_page=response.xpath('//nav/ul/li/a/@href').get()
if next_page is not None:
next_page=response.urljoin(next_page)
yield scrapy.Request(next_page,callback=self.parse) | nigo81/python_spider_learn | scrapy/tutorial/tutorial/spiders/queots_spider.py | queots_spider.py | py | 714 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "scrapy.Spider",
"line_number": 2,
"usage_type": "attribute"
},
{
"api_name": "scrapy.Request",
"line_number": 20,
"usage_type": "call"
}
] |
2412659284 | from flask import Flask, Blueprint
from app.controllers.animes_controller import get_animes, get_animes_by_id, create_animes, delete, update
bp_animes = Blueprint('animes', __name__, url_prefix='/animes')
bp_animes.post('')(create_animes)
bp_animes.get('')(get_animes)
bp_animes.get('/<int:anime_id>')(get_animes_by_id)
bp_animes.patch('/<int:anime_id>')(update)
bp_animes.delete('/<int:anime_id>')(delete) | GustavoCielo/python-flask-psycopg2-CRUD-SQL | app/routes/anime_blueprints.py | anime_blueprints.py | py | 411 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Blueprint",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "app.controllers.animes_controller.create_animes",
"line_number": 9,
"usage_type": "argument"
},
{
"api_name": "app.controllers.animes_controller.get_animes",
"line_number": 10,
"usag... |
40569343475 | from PySide6.QtWidgets import (
QComboBox,
QGroupBox,
QHBoxLayout,
QLabel,
QVBoxLayout,
)
from game.ato.flight import Flight
from game.ato.starttype import StartType
from game.theater import OffMapSpawn
from qt_ui.models import PackageModel
class QFlightStartType(QGroupBox):
def __init__(self, package_model: PackageModel, flight: Flight):
super().__init__()
self.package_model = package_model
self.flight = flight
self.layout = QVBoxLayout()
self.main_row = QHBoxLayout()
self.start_type_label = QLabel("Start type:")
self.start_type = QComboBox()
for start_type in StartType:
self.start_type.addItem(start_type.value, start_type)
self.start_type.setCurrentText(flight.start_type.value)
if isinstance(self.flight.departure, OffMapSpawn):
self.start_type.setEnabled(False)
self.start_type.currentTextChanged.connect(self._on_start_type_selected)
self.main_row.addWidget(self.start_type_label)
self.main_row.addWidget(self.start_type)
self.layout.addLayout(self.main_row)
self.layout.addWidget(
QLabel(
"Any option other than Cold will make this flight non-targetable "
+ "by OCA/Aircraft missions. This will affect game balance."
)
)
self.setLayout(self.layout)
def _on_start_type_selected(self):
selected = self.start_type.currentData()
self.flight.start_type = selected
self.package_model.update_tot()
| dcs-liberation/dcs_liberation | qt_ui/windows/mission/flight/settings/QFlightStartType.py | QFlightStartType.py | py | 1,582 | python | en | code | 647 | github-code | 36 | [
{
"api_name": "PySide6.QtWidgets.QGroupBox",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "qt_ui.models.PackageModel",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "game.ato.flight.Flight",
"line_number": 16,
"usage_type": "name"
},
{
"api_... |
36120731703 | import yaml
from termcolor import colored
import torch
from fortex.nltk import NLTKSentenceSegmenter, NLTKWordTokenizer, NLTKPOSTagger
from forte.common.configuration import Config
from forte.data.multi_pack import MultiPack
from forte.data.readers import MultiPackTerminalReader
from forte.common.resources import Resources
from forte.pipeline import Pipeline
from forte.processors.third_party import MicrosoftBingTranslator
from forte.processors.nlp.srl_predictor import SRLPredictor
from forte.processors.ir import SearchProcessor, BertBasedQueryCreator
from forte.data.selector import NameMatchSelector
from ft.onto.base_ontology import PredicateLink, Sentence
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def setup(config: Config) -> Pipeline:
resource = Resources()
query_pipeline = Pipeline[MultiPack](resource=resource)
query_pipeline.set_reader(
reader=MultiPackTerminalReader(), config=config.reader
)
query_pipeline.add(
component=MicrosoftBingTranslator(), config=config.translator
)
query_pipeline.add(
component=BertBasedQueryCreator(), config=config.query_creator
)
query_pipeline.add(component=SearchProcessor(), config=config.searcher)
top_response_pack_name = config.indexer.response_pack_name_prefix + "_0"
query_pipeline.add(
component=NLTKSentenceSegmenter(),
selector=NameMatchSelector(select_name=top_response_pack_name),
)
query_pipeline.add(
component=NLTKWordTokenizer(),
selector=NameMatchSelector(select_name=top_response_pack_name),
)
query_pipeline.add(
component=NLTKPOSTagger(),
selector=NameMatchSelector(select_name=top_response_pack_name),
)
query_pipeline.add(
component=SRLPredictor(),
config=config.SRL,
selector=NameMatchSelector(select_name=top_response_pack_name),
)
query_pipeline.add(
component=MicrosoftBingTranslator(), config=config.back_translator
)
query_pipeline.initialize()
return query_pipeline
def main(config: Config):
query_pipeline = setup(config)
resource = query_pipeline.resource
m_pack: MultiPack
for m_pack in query_pipeline.process_dataset():
# update resource to be used in the next conversation
query_pack = m_pack.get_pack(config.translator.in_pack_name)
if resource.get("user_utterance"):
resource.get("user_utterance").append(query_pack)
else:
resource.update(user_utterance=[query_pack])
response_pack = m_pack.get_pack(config.back_translator.in_pack_name)
if resource.get("bot_utterance"):
resource.get("bot_utterance").append(response_pack)
else:
resource.update(bot_utterance=[response_pack])
english_pack = m_pack.get_pack("pack")
print(
colored("English Translation of the query: ", "green"),
english_pack.text,
"\n",
)
# Just take the first pack.
pack = m_pack.get_pack(config.indexer.response_pack_name_prefix + "_0")
print(colored("Retrieved Document", "green"), pack.text, "\n")
print(
colored("German Translation", "green"),
m_pack.get_pack("response").text,
"\n",
)
for sentence in pack.get(Sentence):
sent_text = sentence.text
print(colored("Sentence:", "red"), sent_text, "\n")
print(colored("Semantic role labels:", "red"))
for link in pack.get(PredicateLink, sentence):
parent = link.get_parent()
child = link.get_child()
print(
f' - "{child.text}" is role '
f"{link.arg_type} of "
f'predicate "{parent.text}"'
)
print()
input(colored("Press ENTER to continue...\n", "green"))
if __name__ == "__main__":
all_config = Config(yaml.safe_load(open("config.yml", "r")), None)
main(all_config)
| asyml/forte | examples/chatbot/chatbot_example.py | chatbot_example.py | py | 4,076 | python | en | code | 230 | github-code | 36 | [
{
"api_name": "torch.device",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "forte.common.con... |
17096699473 | import numpy as np
from sklearn.cluster import DBSCAN
from sklearn import metrics
input_file = ('data_perf.txt')
# Load data 載入資料
x = []
with open(input_file, 'r') as f:
for line in f.readlines():
data = [float(i) for i in line.split(',')]
x.append(data)
X = np.array(x)
# Find the best epsilon 找最佳的epsilon
eps_grid = np.linspace(0.3, 1.2, num=10)
silhouette_scores = []
eps_best = eps_grid[0]
silhouette_score_max = -1
labels_best = None
for eps in eps_grid:
# Train DBSCAN clustering model 訓練DBSCAN分群模型
model = DBSCAN(eps=eps, min_samples=5).fit(X)
# Extract labels 提取標籤
labels = model.labels_
# Extract performance metric 提取性能指標
silhouette_score = round(metrics.silhouette_score(X, labels), 4)
silhouette_scores.append(silhouette_score)
print("Epsilon:", eps, " --> silhouette score:", silhouette_score)
if silhouette_score > silhouette_score_max:
silhouette_score_max = silhouette_score
eps_best = eps
model_best = model
labels_best = labels
# Best params
print("Best epsilon =", eps_best)
# Associated model and labels for best epsilon
model = model_best
labels = labels_best
# Check for unassigned datapoints in the labels
offset = 0
if -1 in labels:
offset = 1
# Number of clusters in the data
num_clusters = len(set(labels)) - offset
print("Estimated number of clusters =", num_clusters)
# Extracts the core samples from the trained model
mask_core = np.zeros(labels.shape, dtype=np.bool)
mask_core[model.core_sample_indices_] = True
| neochen2701/TQCPans | 機器學習Python 3答案檔/MLA202.py | MLA202.py | py | 1,602 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "numpy.array",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "sklearn.cluster.DBSCAN",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.sil... |
37458267107 | from pprint import pprint
from fukushima_population import data
import json
with open('periodic_table.json', 'r') as f:
t = json.load(f)
# print(t)
for i in range(len(t["elements"])):
n = t["elements"][i]["name"]
sym = t["elements"][i]["symbol"]
print(f"{n}という元素は{sym}と表記する")
# lookup = {}
# for e in t["elements"]:
# lookup[e["symbol"]]=e["name"]
#
# lookup = {}
lookup = {e["symbol"]:e["name"] for e in t["elements"]}
symbol = "Cu"
print(f"{symbol} is the symbol for {lookup[symbol]}") | lbrichards/asaka | python_kiso_2021_05/dict_study.py | dict_study.py | py | 539 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "json.load",
"line_number": 9,
"usage_type": "call"
}
] |
19589266829 | import json
import argparse
from sklearn.metrics import classification_report
from eval_F1 import exact_match_score, f1_score, metric_max_over_ground_truths
import numpy as np
def compute_span_f1(pred_span, gold_span):
gold_start, gold_end = gold_span
pred_start, pred_end = pred_span
tp, fp, fn = 0, 0, 0
if pred_end >= gold_end:
tp = gold_end - max(pred_start, gold_start) + 1
else:
tp = pred_end - max(pred_start, gold_start) + 1
tp = max(tp, 0)
fp = pred_end-pred_start+1-tp
fn = gold_end-gold_start+1-tp
precision = tp/(tp+fp)
recall = tp/(tp+fn)
if precision == 0.0 and recall == 0.0:
return 0.0, precision, recall
F1 = 2*precision*recall/(precision+recall)
return F1, precision, recall
def score_claim(args):
print("Evalutating Claim Detection Task")
gold = json.load(open(args.gold_file))
predictions = json.load(open(args.predictions_file))
tp = 0.0
total_prec = 0.0
total_rec = 0.0
for doc_id in gold:
gold_sents = set(list(gold[doc_id].keys()))
if doc_id in predictions:
pred_sents = set(predictions[doc_id])
tp += len(gold_sents.intersection(pred_sents))
total_prec += len(pred_sents)
total_rec += len(gold_sents)
if total_prec != 0:
prec = float(tp)/total_prec
else:
prec = 0
rec = float(tp)/total_rec
print("Prec: ", prec, tp, total_prec)
print("Rec: ", rec, tp, total_rec)
if tp != 0:
print("F1: ", 2*prec*rec/(prec+rec))
else:
print("F1: ", 0.0)
def score_topic(args):
print("Evalutating Topic Classification Task")
gold = json.load(open(args.gold_file))
predictions = json.load(open(args.predictions_file))
gold_topics = list()
predicted_topics = list()
for doc_id in gold:
assert doc_id in predictions
for segment_id in gold[doc_id]:
assert segment_id in predictions[doc_id]
gold_topics.append(gold[doc_id][segment_id]["topic"])
predicted_topics.append(predictions[doc_id][segment_id]["topic"])
print(classification_report(gold_topics, predicted_topics, digits=4))
def score_claim_object(args):
print("Evalutating Claim Object Extraction Task")
gold = json.load(open(args.gold_file))
predictions = json.load(open(args.predictions_file))
EM = 0.0
F1 = 0.0
count = 0
for doc_id in gold:
for segment_id in gold[doc_id]:
if "claim_object" in gold[doc_id][segment_id]:
assert doc_id in predictions
assert segment_id in predictions[doc_id]
EM += metric_max_over_ground_truths(exact_match_score, predictions[doc_id][segment_id]["claim_object"], [gold[doc_id][segment_id]["claim_object"]])
F1 += metric_max_over_ground_truths(f1_score, predictions[doc_id][segment_id]["claim_object"], [gold[doc_id][segment_id]["claim_object"]])
count += 1
print("EM: ", EM/count, "F1: ", F1/count, "Count: ", count)
def score_stance(args):
print("Evalutating Stance Detection Task")
gold = json.load(open(args.gold_file))
predictions = json.load(open(args.predictions_file))
gold_stance = list()
predicted_stance = list()
for doc_id in gold:
for segment_id in gold[doc_id]:
if "stance" in gold[doc_id][segment_id]: # and "claim_object" in gold[doc_id][segment_id]:
assert doc_id in predictions
assert segment_id in predictions[doc_id]
gold_stance.append(gold[doc_id][segment_id]["stance"])
predicted_stance.append(predictions[doc_id][segment_id]["stance"])
print(classification_report(gold_stance, predicted_stance, digits=4))
def score_claim_span(args):
print("Evalutating Claim Span Detection Task")
gold = json.load(open(args.gold_file))
predictions = json.load(open(args.predictions_file))
prec = list()
recall = list()
for doc_id in gold:
for segment_id in gold[doc_id]:
if "claim_span" in gold[doc_id][segment_id]:
_, ex_prec, ex_recall = compute_span_f1(predictions[doc_id][segment_id]["claim_span"], gold[doc_id][segment_id]["claim_span"])
prec.append(ex_prec)
recall.append(ex_recall)
final_prec = np.mean(prec)
final_recall = np.mean(recall)
print("P: ", final_prec, "R: ", final_recall, "F1: ", (2*final_prec*final_recall)/(final_prec+final_recall), "Count: ", len(prec))
def score_claimer(args):
print("Evaluating Claimer Extraction Task")
gold = json.load(open(args.gold_file))
predictions = json.load(open(args.predictions_file))
in_sent_f1 = list()
out_of_sent_f1 = list()
ans_f1 = list()
no_claimer_tp = 0.0
no_claimer_total_prec = 0.0
no_claimer_total_rec = 0.0
for doc_id in gold:
for segment_id in gold[doc_id]:
if "has_claimer" in gold[doc_id][segment_id]:
assert doc_id in predictions
assert segment_id in predictions[doc_id]
if predictions[doc_id][segment_id]["has_claimer"] == False:
no_claimer_total_prec += 1.0
pred_claimer = "<AUTHOR>"
else:
pred_claimer = predictions[doc_id][segment_id]["claimer"]
if gold[doc_id][segment_id]["has_claimer"]:
if gold[doc_id][segment_id]["claimer_in_sentence"]:
in_sent_f1.append(metric_max_over_ground_truths(f1_score, pred_claimer, [gold[doc_id][segment_id]["claimer"]]))
else:
out_of_sent_f1.append(metric_max_over_ground_truths(f1_score, pred_claimer, [gold[doc_id][segment_id]["claimer"]]))
ans_f1.append(metric_max_over_ground_truths(f1_score, pred_claimer, [gold[doc_id][segment_id]["claimer"]]))
else:
if predictions[doc_id][segment_id]["has_claimer"] == False:
no_claimer_tp += 1.0
no_claimer_total_rec += 1.0
print("Ans F1: ", np.mean(ans_f1), " Count: ", len(ans_f1))
prec = no_claimer_tp/no_claimer_total_prec
recall = no_claimer_tp/no_claimer_total_rec
no_ans_f1 = 2*prec*recall/(prec + recall)
print("No ANS P:", prec, "No ANS R:", recall, "No ANS F1:", no_ans_f1, "Count: ", no_claimer_total_rec)
print("Final F1: ", (len(ans_f1)*np.mean(ans_f1) + no_claimer_total_rec*no_ans_f1)/(len(ans_f1) + no_claimer_total_rec))
print("In Sent F1: ", np.mean(in_sent_f1), "Count: ", len(in_sent_f1))
print("Out of Sent F1: ", np.mean(out_of_sent_f1), "Count: ", len(out_of_sent_f1))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Parser')
parser.add_argument('--gold_file', type=str, help="Path to gold data")
parser.add_argument('--predictions_file', type=str, help="path to predictions")
parser.add_argument("--eval_claim", action='store_true', help="Whether to evaluate claim detection performance")
parser.add_argument("--eval_topic", action='store_true', help="Whether to evaluate topic detection performance")
parser.add_argument("--eval_claim_object", action='store_true', help="Whether to evaluate topic detection performance")
parser.add_argument("--eval_claim_span", action='store_true', help="Whether to evaluate topic detection performance")
parser.add_argument("--eval_stance", action='store_true', help="Whether to evaluate topic detection performance")
parser.add_argument("--eval_claimer", action='store_true', help="Whether to evaluate topic detection performance")
args = parser.parse_args()
if args.eval_claim:
score_claim(args)
if args.eval_topic:
score_topic(args)
if args.eval_claim_object:
score_claim_object(args)
if args.eval_claim_span:
score_claim_span(args)
if args.eval_stance:
score_stance(args)
if args.eval_claimer:
score_claimer(args) | blender-nlp/NewsClaims | eval/eval_claim_detection.py | eval_claim_detection.py | py | 8,147 | python | en | code | 17 | github-code | 36 | [
{
"api_name": "json.load",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 59,
... |
18369494327 | import pandas as pd
import numpy as np
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import LinearSVC
from sklearn.metrics import confusion_matrix
#Function importing data from .txt file
def gatherData(directory):
with open(directory, "rb") as myFile:
data_table = pd.read_csv(myFile, sep=" ", header=None)
return data_table.values
#Data importing
train_features = gatherData("train/X_train.txt")
train_labels = gatherData("train/Y_train.txt")
test_features = gatherData("test/X_test.txt")
test_labels = gatherData("test/Y_test.txt")
print("Data loaded")
#Model training
model = GaussianNB()
model.fit(train_features, train_labels.ravel())
print("Trained model 1")
model2 = LinearSVC(multi_class="ovr") #I'm using One vs Rest method for multi-class classification
model2.fit(train_features, train_labels.ravel())
print("Trained model 2")
#Making predictions
predictions = model.predict(test_features)
confusion = confusion_matrix(test_labels, predictions)
counter = 0
print("Predicted for model 1")
predictions2 = model2.predict(test_features)
confusion2 = confusion_matrix(test_labels, predictions2)
counter2 = 0
print("Predicted for model 2")
#Checking accuracy
for i in range(np.size(confusion, 0)):
counter += confusion[i][i]
counter2 += confusion2[i][i]
print("{0:.2f} percent of test values predicted well with GaussianNB".format(float(counter/len(predictions))*100))
print("{0:.2f} percent of test values predicted well with LinearSVC".format(float(counter2/len(predictions))*100))
| trcz/human-activity-prediction | activity_prediction.py | activity_prediction.py | py | 1,585 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "sklearn.naive_bayes.GaussianNB",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "sklearn.svm.LinearSVC",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "... |
37319398459 | import torch
import numpy as np
from utils.misc import soft_update
from model.BCAgent import BCAgent
from model.utils.model import *
class BahaviorClone(object):
def __init__(self, name, params):
self.name = name
self.lr = params.lr
self.gamma = params.gamma
self.tau = params.tau
self.obs_dim = params.obs_dim
self.action_dim = params.action_dim
self.batch_size = params.batch_size // 2
self.device = params.device
self.discrete_action = params.discrete_action_space
self.agent_index = params.agent_index
self.num_agents = len(self.agent_index)
self.cross_entropy_loss = torch.nn.CrossEntropyLoss()
params.critic.obs_dim = (self.obs_dim + self.action_dim)
self.agents = [BCAgent(params) for _ in range(self.num_agents)]
[agent.to(self.device) for agent in self.agents]
def act(self, observations, sample=False):
observations = torch.Tensor(observations).to(self.device)
actions = []
for agent, obs in zip(self.agents, observations):
agent.eval()
actions.append(agent.act(obs, explore=sample).squeeze())
agent.train()
return np.array(actions)
def update(self, replay_buffer, logger, step):
sample = replay_buffer.sample(self.batch_size, nth=self.agent_index)
obses, actions, rewards, next_obses, dones = sample
# split each joint into two single trajectories
# need to check
obses = (torch.cat([obses[:, 0], obses[:, 1]], dim=0))
actions = (torch.cat([actions[:, 0], actions[:, 1]], dim=0))
rewards = (torch.cat([rewards[:, 0], rewards[:, 1]], dim=0))
next_obses = (torch.cat([next_obses[:, 0], next_obses[:, 1]], dim=0))
dones = (torch.cat([dones[:, 0], dones[:, 1]], dim=0))
if self.discrete_action:
actions = number_to_onehot(actions)
actions = torch.max(actions.long(), 1)[1]
for agent_i, agent in enumerate(self.agents):
agent.policy_optimizer.zero_grad()
agent_actions = agent.policy(obses)
loss = self.cross_entropy_loss(agent_actions, actions)
loss.backward()
agent.policy_optimizer.step()
def save(self, filename):
raise NotImplementedError
def load(self, filename):
raise NotImplementedError
| bic4907/Overcooked-AI | model/bc.py | bc.py | py | 2,420 | python | en | code | 19 | github-code | 36 | [
{
"api_name": "torch.nn.CrossEntropyLoss",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "model.BCAgent.BCAgent",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "torch.T... |
74537281383 | #!/usr/bin/env python
# -*- coding: utf_8 -*
__author__ = "Benoit Delbosc"
__copyright__ = "Copyright (C) 2012 Nuxeo SA <http://nuxeo.com>"
"""
jenkviz.command
~~~~~~~~~~~~~~~~
Crawl a Jenkins to extract builds flow.
"""
import os
import logging
from model import open_db, close_db, list_builds
from crawl import Crawl
from graphviz import graphviz
def cmd_list(args, options):
db = open_db(options)
list_builds(db)
close_db(db)
return 0
def cmd_crawl(args, options):
if len(args) != 1:
logging.error("Missing build URL")
return 1
if options.to_file and not os.path.exists(options.to_file):
os.mkdir(options.to_file)
if options.from_file and not os.path.exists(options.from_file):
os.mkdir(options.from_file)
db = open_db(options)
crawl = Crawl(db, options)
if options.reverse:
roots = crawl.reverse_crawl(args[0])
else:
roots = crawl.crawl(args[0])
close_db(db)
stat = roots[0].extra
logging.info("Started: %s\n\tend: %s\n\telapsed: %s\n\tduration: %ss\n\tNb builds: %s\n\ttrhoughput: %s\n" % (
stat['start'], stat['stop'], stat['elapsed'], stat['duration'], stat['count'], stat['throughput']))
if not options.output:
svg_file = roots[0].getId() + ".svg"
else:
svg_file = options.output
graphviz(roots, svg_file)
logging.info("%s generated." % svg_file)
return 0
def cmd_info(args, options):
if len(args) != 1:
logging.error('Missing bid')
return 1
db = open_db(options)
# bencher = Bencher.getBencherForBid(db, options, args[0])
# print """bid: %(bid)s, from %(start)s to %(end)s, samples: %(count)d, errors: %(error)d""" % bencher.getInfo(args[0])
close_db(db)
return 0
def cmd_report(args, options):
if len(args) != 1:
logging.error('Missing bid')
return 1
if not options.output:
logging.error('Missing --output option')
return 1
db = open_db(options)
# report = Report(db, options)
# report.buildReport(args[0])
close_db(db)
return 0
| bdelbosc/jenkviz | jenkviz/command.py | command.py | py | 2,109 | python | en | code | 6 | github-code | 36 | [
{
"api_name": "model.open_db",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "model.list_builds",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "model.close_db",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "logging.error",
"l... |
22196508979 | import math
def lyapunov_exponent(d):
lamb = 0
for i in range(len(d)-2):
lamb += math.log((abs(d[i+2]-d[i+1])+1e-17)/(abs(d[i+1]-d[i])+1e-17))
lamb /= len(d)
return lamb
def logistic_f(alpfa, x0, n):
y = [0] * n
y[0] = x0
x = x0
for i in range(1, n):
x = alpfa*x*(1-x)
y[i] = x
return y
import matplotlib.pyplot as plt
import numpy as np
r = 1000
x = np.linspace(1, 4, r)
y = [0] * r
for i in range(r):
y[i] = lyapunov_exponent(logistic_f(x[i], 0.001, 10000))
plt.xlabel('alpha')
plt.ylabel('lyapunov_exponent')
plt.plot(list(x), y)
plt.savefig('image/lyapunov_exponent_logistic.png') | jecht1014/book | shumi/chaos/lyapunov_exponent.py | lyapunov_exponent.py | py | 649 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "math.log",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
... |
846856008 | import itertools
from subprocess import call
import chainer
import chainer.functions as F
import chainer.links as L
import numpy as np
from chainer import serializers
from chainer.backends import cuda
from util import invLIDAR
class RecoNet(chainer.Chain):
def __init__(self, n_hid1=100, n_hid2=50):
super().__init__()
with self.init_scope():
self.l0 = L.Linear(None, n_hid1)
self.l1 = L.Linear(None, n_hid2)
self.l2 = L.Linear(None, 15)
def __call__(self, x, test=False):
h = F.relu(self.l0(x))
h = F.relu(self.l1(h))
return self.l2(h)
def predict(self, x, test=False):
h = F.relu(self.l0(x))
h = F.relu(self.l1(h))
return F.sigmoid(self.l2(h))
class ThreatEstimator:
def __init__(self, model, filename, gpu):
self.model = model
serializers.load_npz(filename, self.model)
self.gpu = gpu
if gpu:
self.model.to_gpu(0)
acc_list = [-0.02, 0, 0.02]
omega_list = [-0.15, -0.05, 0, 0.05, 0.15]
self.action_list = list(itertools.product(
acc_list, omega_list))
def threat(self, obs):
xp = cuda.get_array_module(obs)
r = invLIDAR(obs[:, :360][0], xp)
v_val = obs[:, 363][0]
rx, ry = r.real, r.imag
mask = (-0.5 < rx) & (rx < 0.7) & (-0.3 < ry) & (ry < 0.3)
r = r[mask]
num = len(r)
v = xp.ones((num, 1)) * v_val
data = xp.c_[r.real, r.imag, v].astype(np.float32)
if self.gpu:
data = cuda.cupy.asarray(data)
result = self.model.predict(data).array.sum(axis=0)
if self.gpu:
result = cuda.cupy.asnumpy(result)
return result
| pfnet-research/rp-safe-rl | circuit/estimator.py | estimator.py | py | 1,753 | python | en | code | 7 | github-code | 36 | [
{
"api_name": "chainer.Chain",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "chainer.links.Linear",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "chainer.links",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "chainer.links.L... |
18457796268 | from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render_to_response
import Image as PILImage
from uds.core.util.stats import charts
from uds.core.auths.auth import webLoginRequired
from uds.core.util.decorators import denyBrowsers
import io
import six
from geraldo.generators.pdf import PDFGenerator
from geraldo import Report, landscape, ReportBand, ObjectValue, SystemField, BAND_WIDTH, Label
from reportlab.lib.pagesizes import A4
from reportlab.lib.units import cm
from reportlab.lib.enums import TA_RIGHT, TA_CENTER
import logging
logger = logging.getLogger(__name__)
__updated__ = '2015-04-27'
class TestReport(Report):
title = 'Test report'
author = 'UDS Enterprise'
print_if_empty = True
page_size = A4
margin_left = 2 * cm
margin_top = 0.5 * cm
margin_right = 0.5 * cm
margin_bottom = 0.5 * cm
class band_detail(ReportBand):
height = 0.5 * cm
elements = (
ObjectValue(attribute_name='name', left=0.5 * cm),
ObjectValue(attribute_name='age', left=3 * cm,
get_value=lambda instance: six.text_type(instance['age'])),
)
class band_page_header(ReportBand):
height = 1.3 * cm
elements = [
SystemField(expression='%(report_title)s', top=0.1 * cm, left=0, width=BAND_WIDTH,
style={'fontName': 'Helvetica-Bold', 'fontSize': 14, 'alignment': TA_CENTER}),
Label(text="ID", top=0.8 * cm, left=0.5 * cm),
Label(text=u"Creation Date", top=0.8 * cm, left=3 * cm),
SystemField(expression=_('Page %(page_number)d of %(page_count)d'), top=0.1 * cm,
width=BAND_WIDTH, style={'alignment': TA_RIGHT}),
]
borders = {'bottom': True}
class band_page_footer(ReportBand):
height = 0.5 * cm
elements = [
Label(text='Geraldo Reports', top=0.1 * cm),
SystemField(expression=_('Printed in %(now:%Y, %b %d)s at %(now:%H:%M)s'), top=0.1 * cm,
width=BAND_WIDTH, style={'alignment': TA_RIGHT}),
]
borders = {'top': True}
@denyBrowsers(browsers=['ie<9'])
@webLoginRequired(admin='admin')
def usage(request):
resp = HttpResponse(content_type='application/pdf')
family = [
{'name': 'Leticia', 'age': 29, 'weight': 55.7, 'genre': 'female', 'status': 'parent'},
{'name': 'Marinho', 'age': 28, 'weight': 76, 'genre': 'male', 'status': 'parent'},
{'name': 'Tarsila', 'age': 4, 'weight': 16.2, 'genre': 'female', 'status': 'child'},
{'name': 'Linus', 'age': 0, 'weight': 1.5, 'genre': 'male', 'status': 'child'},
{'name': 'Mychelle', 'age': 19, 'weight': 50, 'genre': 'female', 'status': 'nephew'},
{'name': 'Mychell', 'age': 17, 'weight': 55, 'genre': 'male', 'status': 'niece'},
]
report = TestReport(queryset=family)
report.generate_by(PDFGenerator, filename=resp)
return resp
# return HttpResponse(pdf, content_type='application/pdf')
| karthik-arjunan/testuds | server/src/uds/admin/views/reporting/usage.py | usage.py | py | 3,158 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "geraldo.Report",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "reportlab.lib.pagesizes.A4",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "reportlab... |
12573117304 | from tkinter import *
from datetime import date
from tkinter import messagebox
from PIL import Image
from tkinter.ttk import Combobox
import openpyxl
from openpyxl import Workbook
import pathlib
import tkinter as tk
import customtkinter
from tkinter import ttk
#initial
customtkinter.set_appearance_mode("System")
textcolor = "#333333"
side_frame_col = "#F2DFD7"
buttoncolor = "#FF5722"
buttoncolorlite = "#FF855F"
background = "#D4C1EC"
obj_frame_col = "#9F9FED"
mainbackground = "#736ced"
framebg = "#EDEDED"
framefg = "#06283D"
root = Tk()
maincol1 = "pink"
maincol2 = "pink"
maincol3 = "pink"
width = root.winfo_screenwidth()
height = root.winfo_screenheight()
root.title("Clinic Managment System")
root.state('zoomed')
# root.geometry ("%dx%d"%(width,height))
root.config(bg=mainbackground)
# pathmain = "//LAPTOP-F1A0LRP8/Users/aman/Student_data.xlsx"
pathmain = "Student_data.xlsx"
file = pathlib.Path(pathmain)
patient_detail_from_doc = "patient.xlsx"
doc_med_old = "docmed.xlsx"
file = pathlib.Path(patient_detail_from_doc)
# ----------------------------------------------------------------------------------------------------------------------
# Declare
fontmain = "Dotum"
Name = StringVar()
age = StringVar()
radio = IntVar()
weight = StringVar()
height = StringVar()
temprature = StringVar()
pulse = StringVar()
respiration = StringVar()
bp = StringVar()
obj = None
mobile = StringVar()
village = StringVar()
Search = StringVar()
Gender = StringVar()
my_list = Listbox
my_entry = Entry
name_lable = customtkinter.CTkLabel
med_no = IntVar()
med_name = StringVar()
pnt_dis = StringVar()
dose = StringVar()
days = StringVar()
Registration = IntVar()
Date = StringVar()
docmedslst = []
#-----------------------------------------------------------------------------------------------------------------------
# file creation if not exist
if file.exists():
pass
else:
file = Workbook()
sheet1 = file.active
file.save(patient_detail_from_doc)
file = pathlib.Path(doc_med_old)
if file.exists():
pass
else:
file = Workbook()
sheet1 = file.active
sheet1['A1'] = "Name"
file.save(doc_med_old)
if file.exists():
pass
else:
file = Workbook()
sheet = file.active
sheet['A1'] = "Registration No."
sheet['B1'] = "Name"
sheet['C1'] = "Class"
sheet['D1'] = "Gender"
sheet['E1'] = "D0B"
sheet['F1'] = "Date Of Registration"
sheet['G1'] = "weight"
sheet['H1'] = "height"
sheet['I1'] = "Temperature"
sheet['J1'] = "Respiration"
sheet['K1'] = "Pulse"
sheet['L1'] = "BP"
sheet['M1'] = "Village"
sheet['N1'] = "Mobile"
file.save(pathmain)
def Exit():
root.destroy()
# def registration_no():
# file = openpyxl.load_workbook(pathmain)
# sheet = file.active
# row = sheet.max_row
#
# max_row_value = sheet.cell(row=row, column=1).value
#
# try:
# Registration.set(max_row_value + 1)
#
# except:
# Registration.set("1")
def Clear():
global img
Name.set('')
age.set('')
weight.set('')
Gender.set('')
height.set('')
temprature.set('')
respiration.set('')
pulse.set('')
bp.set('')
village.set('')
mobile.set('')
# registration_no()
def Clear2():
global img
Name.set('')
age.set('')
weight.set('')
Gender.set('')
height.set('')
village.set('')
mobile.set('')
temprature.set('')
respiration.set('')
pulse.set('')
bp.set('')
# registration_no()
def Save():
print("abcd")
def search():
text = Search.get() # taking input from entry box
Clear()
file = openpyxl.load_workbook(pathmain)
sheet = file.active
for row in sheet.rows:
if row[0].value == int(text):
name = row[0]
reg_no_position = str(name)[14:-1]
reg_number = str(name)[15:-1]
try:
print(str(name))
except:
messagebox.showerror("Invalid", "Invalid registration number! !!")
x1 = sheet.cell(row=int(reg_number), column=1).value
x2 = sheet.cell(row=int(reg_number), column=2).value
x3 = sheet.cell(row=int(reg_number), column=3).value
x4 = sheet.cell(row=int(reg_number), column=4).value
x5 = sheet.cell(row=int(reg_number), column=5).value
x6 = sheet.cell(row=int(reg_number), column=6).value
x7 = sheet.cell(row=int(reg_number), column=7).value
x8 = sheet.cell(row=int(reg_number), column=8).value
x9 = sheet.cell(row=int(reg_number), column=9).value
x10 = sheet.cell(row=int(reg_number), column=10).value
x11 = sheet.cell(row=int(reg_number), column=11).value
x12 = sheet.cell(row=int(reg_number), column=12).value
x13 = sheet.cell(row=int(reg_number), column=13).value
x14 = sheet.cell(row=int(reg_number), column=14).value
Registration.set(x1)
Name.set(x2)
if x4 == "Female":
Gender.set("Female")
else:
Gender.set("male")
age.set(x5)
Date.set(x6)
weight.set(x7)
height.set(x8)
temprature.set(x9)
respiration.set(x10)
pulse.set(x11)
bp.set(x12)
village.set(x13)
mobile.set(x14)
remove_all()
data.clear()
updatetree()
####################################gender#####################################
def selection():
global gender
value = radio.get()
if value == 1:
gender = "Male"
else:
gender = "Female"
data = []
def updatetree():
file = openpyxl.load_workbook(patient_detail_from_doc)
# sheet.cell(column=1, row=sheet.max_row + 1, value=med_no)
sheet1 = file.active
for row in sheet1.rows:
lst = []
if Search.get() == row[0].value:
lst.append(row[0].value)
lst.append(row[1].value)
lst.append(row[2].value)
lst.append(row[3].value)
data.append(lst)
global count
count = 0
for record in data:
if count % 2 == 0:
my_tree.insert(parent='', index='end', iid=count, text="", values=(record[0], record[1], record[2],record[3]),
tags=('evenrow',))
else:
my_tree.insert(parent='', index='end', iid=count, text="", values=(record[0], record[1], record[2],record[3]),
tags=('oddrow',))
count += 1
def filldata():
file = openpyxl.load_workbook(patient_detail_from_doc)
# sheet.cell(column=1, row=sheet.max_row + 1, value=med_no)
sheet5 = file.active
setdata = StringVar()
count = 1
rows_to_delete = []
for rowcheck in sheet5.rows:
if(Search.get() == rowcheck[0].value):
rows_to_delete.append(count)
count = count +1
# for item in my_tree.get_children():
# values = my_tree.item(item)["values"]
#
# for row1 in sheet5.iter_rows():
# count += 1
#
# print(values[0])
# if Search.get() == str(values[0]):
# rows_to_delete.append(count)
# Delete the rows in reverse order
print(rows_to_delete)
for row_index in sorted(rows_to_delete, reverse=True):
sheet5.delete_rows(row_index)
count = 0
for item in my_tree.get_children():
values = my_tree.item(item)["values"]
row = sheet5.max_row + 1
for col, value in enumerate(values, start=1):
setdata.set(value)
sheet5.cell(row=row, column=col, value=setdata.get())
count =0
file.save(patient_detail_from_doc)
# Close the workbook
file.close()
def remove_all():
for record in my_tree.get_children():
my_tree.delete(record)
def reg_page():
file = openpyxl.load_workbook("Stock.xlsx")
sheet3 = file.active
for i in sheet3.rows:
docmedslst.append(i[2].value)
# Labels
customtkinter.CTkLabel(obj, text="Full Name:", text_color=textcolor,font=(fontmain, 20)).place(x=30, y=55)
customtkinter.CTkLabel(obj, text="Age:", text_color=textcolor,font=(fontmain, 20)).place(x=30, y=105)
customtkinter.CTkLabel(obj, text="Gender:", text_color=textcolor,font=(fontmain, 20)).place(x=30, y=155)
customtkinter.CTkLabel(obj, text="Weight:", text_color=textcolor,font=(fontmain, 20)).place(x=30, y=355)
customtkinter.CTkLabel(obj, text="Height:", text_color=textcolor,font=(fontmain, 20)).place(x=30, y=405)
customtkinter.CTkLabel(obj, text="Temperature:", text_color=textcolor,font=(fontmain, 20)).place(x=30, y=205)
customtkinter.CTkLabel(obj, text="Pulse:", text_color=textcolor,font=(fontmain, 20)).place(x=30, y=255)
customtkinter.CTkLabel(obj, text="Respiration:", text_color=textcolor,font=(fontmain, 20)).place(x=30, y=455)
customtkinter.CTkLabel(obj, text="BP:", text_color=textcolor,font=(fontmain, 20)).place(x=30, y=505)
customtkinter.CTkLabel(obj, text="Village Name:", text_color=textcolor,font=(fontmain, 20)).place(x=30, y=305)
customtkinter.CTkLabel(obj, text="Mobile:", text_color=textcolor,font=(fontmain, 20)).place(x=30, y=555)
# Entry
customtkinter.CTkLabel(obj, textvariable = Name, text_color=textcolor, font=(fontmain, 20)).place(x=170, y=50)
customtkinter.CTkLabel(obj, textvariable = age, text_color=textcolor, font=(fontmain, 20)).place(x=170, y=100)
customtkinter.CTkLabel(obj, textvariable = weight, text_color=textcolor, font=(fontmain, 20)).place(x=170, y=350)
customtkinter.CTkLabel(obj, textvariable = height, text_color=textcolor, font=(fontmain, 20)).place(x=170, y=400)
customtkinter.CTkLabel(obj, textvariable = temprature, text_color=textcolor, font=(fontmain, 20)).place(x=170, y=200)
customtkinter.CTkLabel(obj, textvariable = pulse, text_color=textcolor, font=(fontmain, 20)).place(x=170, y=250)
customtkinter.CTkLabel(obj, textvariable = respiration, text_color=textcolor, font=(fontmain, 20)).place(x=170, y=450)
customtkinter.CTkLabel(obj, textvariable = bp, text_color=textcolor, font=(fontmain, 20)).place(x=170, y=500)
customtkinter.CTkLabel(obj, textvariable = village, text_color=textcolor, font=(fontmain, 20)).place(x=170, y=550)
customtkinter.CTkLabel(obj, textvariable = mobile, text_color=textcolor, font=(fontmain, 20)).place(x=170, y=300)
customtkinter.CTkLabel(obj, textvariable= Gender, text_color=textcolor, font=(fontmain, 20)).place(x=170, y=160)
# Doc
def clear_textbox():
textbox.delete(1.0,3.0)
def get_text():
global pnt_dis
pnt_dis = textbox.get(1.0, 3.0)
customtkinter.CTkLabel(obj2, text="Patient Description", text_color=textcolor, font=(fontmain, 20)).place(x=430, y=10)
textbox = customtkinter.CTkTextbox(obj2, fg_color=mainbackground,width=400,height=200, corner_radius=10, border_width=2,
border_color="black", border_spacing=2,text_color="black",activate_scrollbars=True,scrollbar_button_color=background)
textbox.place(x=430, y=50)
def update(data):
# Clear the listbox
my_list.delete(0, END)
# Add toppings to listbox
for item in data:
my_list.insert(END, item)
def fillout(e):
my_entry.delete(0, END)
selected_item = my_list.get(ANCHOR)
my_entry.insert(0, selected_item)
my_list.selection_clear(0, END)
def check(e):
# grab what was typed
typed = my_entry.get()
if typed == '':
data = toppings
else:
data = []
for item in toppings:
if typed.lower() in item.lower():
data.append(item)
# update our listbox with selected items
update(data)
customtkinter.CTkLabel(obj2, text="Search", text_color=textcolor, font=(fontmain, 20)).place(x=10, y=10)
my_entry = customtkinter.CTkEntry(master=obj2, text_color=textcolor,fg_color=background,corner_radius=10, textvariable=med_name, height=30,
font=(fontmain, 20), width=240)
my_entry.place(x=10, y=50)
my_list = Listbox(obj2, width=40,bd=0,background=background)
my_list.place(x=10, y=90)
# Create a list of pizza toppings
toppings = []
file = openpyxl.load_workbook("Stock.xlsx")
sheet = file.active
for row in sheet.rows:
if(row[2].value != None):
toppings.append(row[2].value)
# Add the toppings to our list
update(toppings)
# Create a binding on the listbox onclick
my_list.bind("<<ListboxSelect>>", fillout)
# Create a binding on the entry box
my_entry.bind("<KeyRelease>", check)
# addition med
my_entry_mg = customtkinter.CTkEntry(master=obj2, text_color=textcolor, fg_color=background, corner_radius=8,
textvariable=dose, height=30,
font=(fontmain, 20), width=50)
my_entry_mg.place(x=330, y=130)
customtkinter.CTkLabel(obj2, text="Dose : ", text_color=textcolor, font=(fontmain, 20)).place(x=260, y=130)
my_entry_days = customtkinter.CTkEntry(master=obj2, text_color=textcolor, fg_color=background, corner_radius=8,
textvariable=days, height=30,
font=(fontmain, 20), width=50)
my_entry_days.place(x=330, y=190)
customtkinter.CTkLabel(obj2, text="Days : ", text_color=textcolor, font=(fontmain, 20)).place(x=260, y=190)
# button delete ans add
def Clear():
global pnt_dis
med_no.set('')
days.set('')
dose.set('')
pnt_dis = ''
med_name.set('')
# registration_no()
def add_record():
sno =1
for i in docmedslst:
print(i)
print(med_name.get())
if(med_name.get() != i):
print("------------------------------")
print(i)
print(med_name.get())
sno = sno +1
print("--------------------------------")
else:
break
file = openpyxl.load_workbook("Stock.xlsx")
sheet = file.active
col = "B"
main_drug_name = sheet[f"{col}{sno}"].value
my_tree.tag_configure('oddrow', background="white")
my_tree.tag_configure('evenrow', background="lightblue")
global count
if count % 2 == 0:
my_tree.insert(parent='', index='end', iid=count, text="",
values=(Registration.get(), main_drug_name, days.get(),dose.get()), tags=('evenrow',))
else:
my_tree.insert(parent='', index='end', iid=count, text="",
values=(Registration.get(), main_drug_name, days.get(),dose.get()), tags=('oddrow',))
count += 1
def find_item_in_list(lst, item):
try:
index = lst.index(item)
return False
except ValueError:
return True
def addmed():
if(Search.get() == ''):
messagebox.showerror("“error", "Please find the Patient!")
return
get_text()
n = med_name.get()
med_d = days.get()
med_dose = dose.get()
if n == "" or med_d == "" or med_dose == "":
messagebox.showerror("“error", "Few Data is missing!")
else:
# file = openpyxl.load_workbook(patient_detail_from_doc)
# # sheet.cell(column=1, row=sheet.max_row + 1, value=med_no)
# sheet1 = file.active
# sheet1.cell(column=1, row=sheet1.max_row + 1, value=Search.get())
# sheet1.cell(column=2, row=sheet1.max_row, value=n)
# sheet1.cell(column=3, row=sheet1.max_row, value=med_d)
# sheet1.cell(column=4, row=sheet1.max_row, value=med_dose)
# sheet1.cell(column=5, row=sheet1.max_row, value=pnt_dis)
#
# file.save(patient_detail_from_doc)
add_record()
a = find_item_in_list(docmedslst,med_name.get())
if(a):
file2 = openpyxl.load_workbook(doc_med_old)
sheet3 = file2.active
sheet3.cell(column=1, row=sheet3.max_row + 1, value=med_name.get())
file2.save(doc_med_old)
for row in sheet3.rows:
toppings.append(row[0].value)
# Add the toppings to our list
Clear() # clear entry box and image section
def delmed():
# x = my_tree.selection()[0]
#
# print(x)
# values = my_tree.item(x, 'values')
# print(values)
# file3 = openpyxl.load_workbook(patient_detail_from_doc)
# sheet4 = file3.active
# print(values[1])
# count =0
# for row1 in sheet4.rows:
# count = count+1
# print(row1)
# if str(values[1]) == row1[1].value:
# sheet4.delete_rows(count)
# # Save the changes
# file3.save(patient_detail_from_doc)
# count =0
# # Close the workbook
# file3.close()
# my_tree.delete(x)
x = my_tree.selection()[0]
my_tree.delete(x)
add_btn = customtkinter.CTkButton(obj2, text='ADD', hover="disable",
fg_color=buttoncolorlite, width=80, corner_radius=10, border_width=2,
border_color="black", border_spacing=2, height=40,command=lambda: addmed()
)
add_btn.place(x=30, y=280)
delete_btn = customtkinter.CTkButton(obj2, text='DELETE', hover="disable",
fg_color=buttoncolorlite, width=100, corner_radius=10, border_width=2,
border_color="black", border_spacing=2, height=40,
command=lambda: delmed()
)
delete_btn.place(x=140, y=280)
# excel
style = ttk.Style()
# Pick a theme
style.theme_use("clam")
# Configure our treeview colors
style.configure("Treeview",
background=background,
foreground="black",
rowheight=20,
fieldbackground=mainbackground
)
# Change selected color
style.map('Treeview',
background=[('selected', 'blue')])
# Create Treeview Frame
tree_frame = Frame(obj2)
tree_frame.place(x = 20, y = 340)
# Treeview Scrollbar
tree_scroll = customtkinter.CTkScrollbar(tree_frame,corner_radius=9,fg_color=mainbackground,button_color=background)
tree_scroll.pack(side=RIGHT, fill=Y)
# Create Treeview
global my_tree
my_tree = ttk.Treeview(tree_frame, yscrollcommand=tree_scroll.set, selectmode="extended")
# Pack to the screen
my_tree.pack()
# Configure the scrollbar
tree_scroll.configure(command=my_tree.yview)
# Define Our Columns
my_tree['columns'] = ("Token", "Name", "Dose","Days")
# Formate Our Columns
my_tree.column("#0", width=0, stretch=NO)
my_tree.column("Name", anchor=W, width=140)
my_tree.column("Dose", anchor=CENTER, width=100)
my_tree.column("Days", anchor=W, width=140)
my_tree.column("Token", anchor=W, width=140)
# Create Headings
my_tree.heading("#0", text="", anchor=W)
my_tree.heading("Name", text="Name", anchor=W)
my_tree.heading("Dose", text="Dose", anchor=CENTER)
my_tree.heading("Days", text="Days", anchor=W)
my_tree.heading("Token", text="Token", anchor=W)
updatetree()
# Create striped row tags
my_tree.tag_configure('oddrow', background="white")
my_tree.tag_configure('evenrow', background="lightblue")
def stock_page():
s_p = tk.Frame(obj)
tk.Label(s_p, text='stock Page\n\nPage:3', font='Bold,30').place(x=1,y=1)
def del_page():
for frame in obj.winfo_children():
frame.destroy()
for frame in obj2.winfo_children():
frame.destroy()
def hideindicate():
reg_indicate.config(bg=buttoncolorlite)
stock_indicate.config(bg=buttoncolorlite)
reg_btn.configure(fg_color=buttoncolorlite)
stock_btn.configure(fg_color=buttoncolorlite)
def indicate(lb, btn, page):
hideindicate()
if btn == 1:
reg_btn.configure(fg_color=buttoncolor)
else:
stock_btn.configure(fg_color=buttoncolor)
lb.config(bg=buttoncolor)
del_page()
page()
# top frames
obj = customtkinter.CTkFrame(master=root, corner_radius=15, width=400, height=600, fg_color=obj_frame_col, border_width=4,
border_color="black")
obj.place(x=230, y=130)
obj2 = customtkinter.CTkFrame(master=root, corner_radius=15, width=850, height=600, fg_color=obj_frame_col, border_width=4,
border_color="black")
obj2.place(x=650, y=130)
Label(root, text="Clinic Management", width=10, height=2, bg="#c36464", fg='#fff', font='arial 20 bold').pack(side=TOP,
fill=X)
customtkinter.CTkLabel(master=root, text="Date:", text_color=textcolor,font=(fontmain, 20)).place(x=230, y=77)
# registration_no()
today = date.today()
d1 = today.strftime("%d/%m/%Y")
customtkinter.CTkLabel(root, textvariable=Date, text_color=textcolor, font=(fontmain, 20)).place(x=290, y=75)
Date.set(d1)
reg_page()
option_frame = customtkinter.CTkFrame(master=root, corner_radius=0, fg_color=side_frame_col)
my_image = customtkinter.CTkImage(light_image=Image.open("stock-removebg-preview.png"),
dark_image=Image.open("stock-removebg-preview.png"),
size=(40, 40))
reg_btn = customtkinter.CTkButton(option_frame, image=my_image, fg_color=buttoncolor, hover="disable", text='Check-Up',
width=150, corner_radius=10, border_width=2, border_color="black", border_spacing=2,
height=40, command=lambda: indicate(reg_indicate, 1, reg_page))
reg_btn.place(x=15, y=50)
reg_indicate = tk.Label(option_frame, text='', bg=buttoncolor)
reg_indicate.place(x=3, y=55, width=5, height=40)
stock_btn = customtkinter.CTkButton(option_frame, text='Stock ', hover="disable", image=my_image,
fg_color=buttoncolorlite, width=150, corner_radius=10, border_width=2,
border_color="black", border_spacing=2, height=40,
command=lambda: indicate(stock_indicate, 3, stock_page))
stock_btn.place(x=15, y=160)
stock_indicate = tk.Label(option_frame, text='', bg=buttoncolor)
stock_indicate.place(x=3, y=165, width=5, height=40)
option_frame.pack(side=tk.LEFT)
option_frame.pack_propagate(False)
option_frame.configure(width=200, height=730)
main_frame = tk.Frame(root, highlightbackground='black', highlightthickness=10)
# button
customtkinter.CTkEntry(master=root, corner_radius=15,text_color=textcolor, fg_color=background,textvariable=Search, placeholder_text="search", height=40,
font=(fontmain, 20), width=220).place(x=1110, y=75)
imageicon3 = PhotoImage(file="Images/search.png")
srchimage = customtkinter.CTkImage(light_image=Image.open("stock-removebg-preview.png"),
dark_image=Image.open("stock-removebg-preview.png"),
size=(40, 40))
Srch = customtkinter.CTkButton(root, text="Search", command=search, image=srchimage, fg_color=buttoncolor, hover="disable",
width=150, corner_radius=10, border_width=2, border_color="black", border_spacing=2,
height=40)
Srch.place(x=1350, y=70)
save2Button = customtkinter.CTkButton(obj2, text="Save", image=srchimage, fg_color=buttoncolor, hover="disable", width=150,
corner_radius=10, border_width=2, border_color="black", border_spacing=2,
height=40, command=filldata)
save2Button.place(x=600, y=300)
customtkinter.CTkButton(obj2, text="Reset", image=srchimage, fg_color=buttoncolor, hover="disable", width=150,
corner_radius=10, border_width=2, border_color="black", border_spacing=2, height=40,
command=Clear2).place(x=600, y=400)
customtkinter.CTkButton(obj2, text="Exit", image=srchimage, fg_color=buttoncolor, hover="disable", width=150,
corner_radius=10, border_width=2, border_color="black", border_spacing=2, height=40,
command=Exit).place(x=600, y=500)
root.mainloop() | agarg1107/healthcare | doc.py | doc.py | py | 24,634 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "customtkinter.set_appearance_mode",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "customtki... |
26821754249 | from django.urls import path
from . import views
app_name = 'myids'
urlpatterns = [
path('', views.view_index, name='index'),
path('stat', views.view_stat, name='stat'),
path('conn', views.view_conn, name='conn'),
path('query_conn', views.query_conn, name='query_conn'),
path('query_stat', views.query_stat, name='query_stat'),
] | ponedo/Network-intrusion-detection-system-based-on-heuristic-downsampling-and-random-forest | web_module/myids/urls.py | urls.py | py | 351 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
... |
23620664915 | import math
import sys
import os
import itertools
import traceback
import copy
import datetime
from collections import deque
from collections import Counter
from random import shuffle, choice
import json
import multiprocessing
"""
Hexagons are handled in the QRS coordinate system.
Refer to https://www.redblobgames.com/grids/hexagons/
for a detailed overview.
All hexagons in "pointy top" orientation.
PIL coordinate system origin is top-left.
All dimensions in mm, except when drawing on PIL's canvas.
We want a DeBruijn-like 2D map of hexagons, ie. each
hexagonal sliding window should show a pattern of
points (one center, 6 in the ring) that is unique in
the whole map.
Wording: "pattern" = 7 points, all points = "map".
A perfect map would contain unique patterns that can only
be found once and have no duplicates if rotated. A good map
contains only unique patterns or patterns that need to be
rotated more than one position forward or backward to create
a duplicate.
General procedure:
* make a list of a all possible combinations of colors that
can be a pattern (ie. 2 colors = 2**7=128 patterns)
* shuffle that list
* pick the first pattern from the list and assign to center hex
* iteratively test and assign patterns from the list to all
neighbouring hexes that match up with the already assigned
patterns
* if all hexes are filled successful calculate penalty (low
penalty for duplicated that require more than one rotation,
very high penalty for +1/-1 rot duplicates. Increase penatly
for duplicates close to the center of the map
* shuffle the pattern list and repeat
# Execute with PyPy for a 20-25% speedup.
pypy3 generate.py
"""
DIMENSIONS = [40, 40] # [60, 60]
IMAGE_SCALE = 50
OUTPUT_DIR = os.path.join("unique_color_{}", "output_radius_{}")
OUTPUT_IMAGE = "{penalties}_output_{prefix}_i-{iteration}.png"
OUTPUT_JSON = "{penalties}_data_{prefix}_i-{iteration}.json"
OUTPUT_REPORT = "report.txt"
# hexagon shape: pointy side up
HEX_SIZE = 1.0
HEX_HORIZONTAL = math.sqrt(3) * HEX_SIZE
HEX_VERTICAL = 3/2 * HEX_SIZE
INNER_CIRCLE_DIAM = 28
NUM_COLORS = 2
COLORS = [(0, 0, 255), (0, 255, 0), (255, 0, 0)]
GRID_RADIUS = 6
NUM_ITERATIONS = 1e9
WRITE_JSON = True
WRITE_IMAGE = True
ABORT_FAST = False
# UNIQUE_PATTERN = [1] * 7
UNIQUE_PATTERN = [0, 0, 0, 0, 1, 1, 1]
if WRITE_IMAGE:
from PIL import Image, ImageDraw, ImageFont
font = ImageFont.load_default()
font_large = ImageFont.truetype("FiraMono-Regular.ttf", 32)
font_large_bold = ImageFont.truetype("FiraMono-Bold.ttf", 32)
# ----------------------------------------------------------------------
def distance_from_center(q, r, s):
return max(abs(q), abs(r), abs(s))
def sanity_check(h, lookup):
known_patterns = []
unique_patterns = []
for i in range(0, 6):
p_rot = UNIQUE_PATTERN[i:] + UNIQUE_PATTERN[:i]
unique_patterns.append(make_lookup_key(p_rot))
# for q, r, s in reversed(get_all_hexagons()):
for key in h.keys():
q, r, s = [int(x) for x in key.split("|")]
p = [h["{}|{}|{}".format(q, r, s)]]
neighbours = get_neighbours(q, r, s)
for n in neighbours:
p.append(h["{}|{}|{}".format(*n)])
# ignore the outermost ring of hexagons
if len(p) != 7:
continue
lookup_key = make_lookup_key(p)
# print("{} {} {} --- {}".format(q, r, s, lookup_key))
# check if the unique pattern (or any rotational duplicate of it) is found anywhere except 0|0|0
if lookup_key in unique_patterns and [q, r, s] != [0, 0, 0]:
raise Exception("unique pattern (rotation) duplicate! [{},{},{}]".format(q, r, s))
if lookup_key in known_patterns:
raise Exception("duplicate! [{},{},{}]".format(q, r, s))
known_patterns.append(lookup_key)
def calculate_penalties(h, lookup):
try:
sanity_check(h, lookup)
except Exception as e:
print(e)
info = {
"penalties": -1,
"iteration": 1,
"rot_1": [],
"rot_n": [],
"prefix": "error"
}
draw_image(h, info)
sys.exit(-1)
penalty_points = 0
rot_1 = []
rot_n = []
for key in h.keys():
q, r, s = [int(x) for x in key.split("|")]
p = [h["{}|{}|{}".format(q, r, s)]]
neighbours = get_neighbours(q, r, s)
for n in neighbours:
p.append(h["{}|{}|{}".format(*n)])
# ignore the outermost ring of hexagons
if len(p) != 7:
continue
# find the pattern rotated
for rot in range(1, 6):
p_rot = [p[0]] + p[-rot:] + p[1:-rot]
key_for_rotated_values = make_lookup_key(p_rot)
if key_for_rotated_values == make_lookup_key(p):
# false positive (pattern has only a single color on the ring)
continue
if key_for_rotated_values in lookup:
pos_of_rotated_hex = lookup[key_for_rotated_values]
if rot == 1 or rot == 5:
penalty_points += 10 * (GRID_RADIUS - distance_from_center(q, r, s))
rot_1.append("{}|{}|{}".format(q, r, s))
rot_1.append("{}|{}|{}".format(*pos_of_rotated_hex))
else:
penalty_points += 1 * (GRID_RADIUS - distance_from_center(q, r, s))
rot_n.append("{}|{}|{}".format(q, r, s))
rot_n.append("{}|{}|{}".format(*pos_of_rotated_hex))
print("penalty: {}".format(penalty_points))
return penalty_points, rot_1, rot_n
# def calculate_penalties(h, lookup):
# penalty_points = 0
# rot_1 = []
# rot_n = []
# # sanity check:
# print("h keys: {} | lookup keys {}".format(len(h.keys()), len(lookup.keys())))
# for key in h.keys():
# q, r, s = [int(x) for x in key.split("|")]
# center = deque([h["{}|{}|{}".format(q, r, s)]])
# p = deque()
# neighbours = get_neighbours(q, r, s)
# for n in neighbours:
# p.append(h["{}|{}|{}".format(*n)])
# # ignore the outermost ring of hexagons
# if len(p) != 6:
# continue
# key_unrotated = make_lookup_key(center + p)
# # find the pattern rotated
# for rot in range(1, 6):
# p.rotate(1)
# key_for_rotated_values = make_lookup_key(center + p)
# if key_for_rotated_values == key_unrotated:
# # false positive (pattern has only a single color on the ring)
# continue
# if key_for_rotated_values in lookup:
# pos_of_rotated_hex = lookup[key_for_rotated_values]
# if rot == 1 or rot == 5:
# penalty_points += 10 * (GRID_RADIUS - distance_from_center(q, r, s))
# rot_1.append("{}|{}|{}".format(q, r, s))
# rot_1.append("{}|{}|{}".format(*pos_of_rotated_hex))
# else:
# penalty_points += 1 * (GRID_RADIUS - distance_from_center(q, r, s))
# rot_n.append("{}|{}|{}".format(q, r, s))
# rot_n.append("{}|{}|{}".format(*pos_of_rotated_hex))
# return penalty_points, rot_1, rot_n
def get_all_hexagons_rec(l, q, r, s):
l.append([q, r, s])
for n in get_neighbours(q, r, s):
if n not in l:
get_all_hexagons_rec(l, *n)
return l
def get_all_hexagons():
l = []
# ---
# extend circular from center
# l.append([0, 0, 0]) # center
# for radius in range(1, GRID_RADIUS+1):
# # bottom (6 o'clock)
# for i in range(0, radius):
# l.append([-radius+i, radius, -i])
# # bottom right (4 o'clock)
# for i in range(0, radius):
# l.append([i, radius-i, -radius])
# # ...
# for i in range(0, radius):
# l.append([radius, -i, -radius+i])
# for i in range(0, radius):
# l.append([radius-i, -radius, i])
# for i in range(0, radius):
# l.append([-i, -radius+i, radius])
# for i in range(0, radius):
# l.append([-radius, i, radius-i])
# ---
# depth first
# l = get_all_hexagons_rec([], 0, 0, 0)
# ---
# linear sweep
# for r in range(-GRID_RADIUS+1, +GRID_RADIUS):
# for q in range(-GRID_RADIUS+1, +GRID_RADIUS):
# if r+q<-GRID_RADIUS+1 or r+q>GRID_RADIUS-1:
# continue
# s = -q -r
# l.append([q, r, s])
# ---
# custom (alternating along the q axis from center first)
indices = [[x, -x] for x in range(1, +GRID_RADIUS)]
indices = [0] + list(itertools.chain(*indices))
for q in indices:
for r in indices:
if r+q<-GRID_RADIUS+1 or r+q>GRID_RADIUS-1:
continue
s = -q -r
l.append([q, r, s])
return l
def init_h():
h = {}
for r in range(-GRID_RADIUS, +GRID_RADIUS+1):
for q in range(-GRID_RADIUS, +GRID_RADIUS+1):
if r+q<-GRID_RADIUS or r+q>GRID_RADIUS:
continue
s = -q -r
h["{}|{}|{}".format(q, r, s)] = None
return h
def build_pattern_list():
l = list(itertools.product(list(range(NUM_COLORS)), repeat=7))
l = [list(x) for x in l] # convert list of tuples to list of lists
return l
def match_pattern(a, b):
if len(a) != len(b):
raise Exception("comparison failed, pattern length do not match ({}, {})".format(len(a), len(b)))
for i in range(0, len(a)):
if a[i] is None or b[i] is None: # None = don't care
continue
if a[i] != b[i]:
return False
return True
def fill(h, lookup, q, r, s, patterns, lookup_keys):
values = get(h, q, r, s)
rot_collision_patterns = []
for i in range(0, len(patterns)):
p = patterns[i]
# check if proposed pattern matches non-None parts of existing patterns
if not match_pattern(p, values):
continue
# collision
if lookup_keys[i] in lookup:
continue
# rotate pattern by -1 and check for collisions
p_rot = [p[0]] + p[-5:] + p[1:-5]
if make_lookup_key(p_rot) in lookup:
rot_collision_patterns.append(p)
continue
# rotate pattern by +1 and check for collisions
p_rot = [p[0]] + p[-1:] + p[1:-1]
if make_lookup_key(p_rot) in lookup:
rot_collision_patterns.append(p)
continue
# no collisions and no +1/-1 rotated collisions, all good
set(h, lookup, q, r, s, p)
return True
# no perfect pattern found
if not ABORT_FAST and len(rot_collision_patterns) > 0:
set(h, lookup, q, r, s, rot_collision_patterns[0])
return True
return False
def is_filled(h, q, r, s):
values = get(h, q, r, s)
if None in values:
return False
else:
return True
def fill_all(h, lookup, all_hexagons, patterns):
# generate all lookup keys beforehand so it won't be redone
# during pattern matching
lookup_keys = []
for p in patterns:
lookup_keys.append(make_lookup_key(p))
for q, r, s in all_hexagons:
if not is_filled(h, q, r, s):
success = fill(h, lookup, q, r, s, patterns, lookup_keys)
if not success:
return False
return True
def fill_random(h, lookup):
for r in range(-GRID_RADIUS, +(GRID_RADIUS+1)):
for q in range(-GRID_RADIUS, +(GRID_RADIUS+1)):
if r+q<-GRID_RADIUS+1 or r+q>GRID_RADIUS-1:
continue
s = -q -r
h["{}|{}|{}".format(q, r, s)] = choice([0, 1])
for key in h.keys():
q, r, s = [int(x) for x in key.split("|")]
p = [h["{}|{}|{}".format(q, r, s)]]
neighbours = get_neighbours(q, r, s)
for n in neighbours:
p.append(h["{}|{}|{}".format(*n)])
# ignore the outermost ring of hexagons
if len(p) != 7:
continue
lookup_key = make_lookup_key(p)
if lookup_key in lookup:
return False
else:
lookup[lookup_key] = [q, r, s]
return True
def get(h, q, r, s):
values = []
values.append(h["{}|{}|{}".format(q, r, s)])
for n in get_neighbours(q, r, s):
values.append(h["{}|{}|{}".format(*n)])
return values
def make_lookup_key(values):
# if None in values:
# raise Exception("None value in key")
key = [str(x) for x in values]
return " ".join(key)
def set(h, lookup, q, r, s, values):
# if None in values:
# raise Exception("setting None values: ", values)
n = get_neighbours(q, r, s)
h["{}|{}|{}".format(q, r, s)] = values[0]
h["{}|{}|{}".format(*n[0])] = values[1]
h["{}|{}|{}".format(*n[1])] = values[2]
h["{}|{}|{}".format(*n[2])] = values[3]
h["{}|{}|{}".format(*n[3])] = values[4]
h["{}|{}|{}".format(*n[4])] = values[5]
h["{}|{}|{}".format(*n[5])] = values[6]
key = make_lookup_key(values)
# if key in lookup:
# raise Exception("creating collision")
lookup[key] = [q, r, s]
def get_neighbours(q, r, s):
if abs(q) == GRID_RADIUS:
return []
if abs(r) == GRID_RADIUS:
return []
if abs(s) == GRID_RADIUS:
return []
return [
[q+1, r-1, s+0], # top right (1 o'clock)
[q+1, r+0, s-1], # right (3 o'clock)
[q+0, r+1, s-1], # ...
[q-1, r+1, s+0],
[q-1, r+0, s+1],
[q+0, r-1, s+1], # top left (11 o'clock)
]
def pointy_hex_to_pixel(q, r, s, center=[0, 0]):
x = HEX_SIZE * (math.sqrt(3) * q + math.sqrt(3)/2 * r)
y = HEX_SIZE * (3./2 * r)
return (x + center[0], y + center[1])
def convert(coords):
# scale
coords_scaled = [c*IMAGE_SCALE for c in coords]
# flip axis for PIL's top-left coordinate system
# coords_scaled[1] = DIMENSIONS[1]*IMAGE_SCALE - coords_scaled[1]
# if len(coords) == 4:
# coords_scaled[3] = DIMENSIONS[1]*IMAGE_SCALE - coords_scaled[3]
return tuple(coords_scaled)
def draw_image(h, info):
with Image.new(mode="RGB", size=[DIMENSIONS[0]*IMAGE_SCALE, DIMENSIONS[1]*IMAGE_SCALE]) as im:
draw = ImageDraw.Draw(im, "RGBA")
# rot_marker_radius = 0.7
rot_1 = info["rot_1"]
rot_n = info["rot_n"]
# # mark all hexagons that have a rotation duplicate that requires more than one rotation step
# for item in rot_n:
# q, r, s = item
# x, y = pointy_hex_to_pixel(q, r, s, center=[DIMENSIONS[0]/2, DIMENSIONS[1]/2])
# draw.ellipse(
# convert([x-rot_marker_radius, y+rot_marker_radius, x+rot_marker_radius, y-rot_marker_radius]),
# fill=None, outline=(100, 100, 100), width=3)
# # mark all hexagons that have a rotation duplicate with only a single rotation step difference
# for item in rot_1:
# q, r, s = item
# x, y = pointy_hex_to_pixel(q, r, s, center=[DIMENSIONS[0]/2, DIMENSIONS[1]/2])
# draw.ellipse(
# convert([x-rot_marker_radius, y+rot_marker_radius, x+rot_marker_radius, y-rot_marker_radius]),
# fill=None, outline=(255, 0, 0), width=3)
rot_n_occurences = Counter(rot_n)
rot_1_occurences = Counter(rot_1)
for r in range(-GRID_RADIUS, +(GRID_RADIUS+1)):
for q in range(-GRID_RADIUS, +(GRID_RADIUS+1)):
if r+q<-GRID_RADIUS or r+q>GRID_RADIUS:
continue
s = -q -r
x, y = pointy_hex_to_pixel(q, r, s, center=[DIMENSIONS[0]/2, DIMENSIONS[1]/2])
# hexagon fill (sides are drawn separately since PIL creates fat, overlapping lines when the polygons have an outline)
f = None
c = None
rot_marker_radius = 0.7
key = "{}|{}|{}".format(q, r, s)
if key in rot_n_occurences:
c = (40*rot_n_occurences[key], 40*rot_n_occurences[key], 40*rot_n_occurences[key])
if key in rot_1_occurences:
c = (120*rot_1_occurences[key], 0, 0)
if c is not None:
draw.ellipse(
convert([x-rot_marker_radius, y-rot_marker_radius, x+rot_marker_radius, y+rot_marker_radius]),
fill=None, outline=c, width=4)
draw.polygon([
convert([x, y+HEX_SIZE, ]),
convert([x+HEX_HORIZONTAL/2, y+HEX_VERTICAL/3]),
convert([x+HEX_HORIZONTAL/2, y-HEX_VERTICAL/3]),
convert([x, y-HEX_SIZE, ]),
convert([x-HEX_HORIZONTAL/2, y-HEX_VERTICAL/3]),
convert([x-HEX_HORIZONTAL/2, y+HEX_VERTICAL/3])
], outline=None, fill=f)
# rot_marker_radius = 0.8
# draw.ellipse(convert([x-rot_marker_radius, y+rot_marker_radius, x+rot_marker_radius, y-rot_marker_radius]), fill=(0, 0, 0))
# hexagon sides
c = (60, 60, 60)
draw.line(convert([x, y+HEX_SIZE, x+HEX_HORIZONTAL/2, y+HEX_VERTICAL/3]), width=3, fill=c)
draw.line(convert([x+HEX_HORIZONTAL/2, y+HEX_VERTICAL/3, x+HEX_HORIZONTAL/2, y-HEX_VERTICAL/3]), width=3, fill=c)
draw.line(convert([x+HEX_HORIZONTAL/2, y-HEX_VERTICAL/3, x, y-HEX_SIZE]), width=3, fill=c)
draw.line(convert([x, y-HEX_SIZE, x-HEX_HORIZONTAL/2, y-HEX_VERTICAL/3]), width=3, fill=c)
draw.line(convert([x-HEX_HORIZONTAL/2, y-HEX_VERTICAL/3, x-HEX_HORIZONTAL/2, y+HEX_VERTICAL/3]), width=3, fill=c)
draw.line(convert([x-HEX_HORIZONTAL/2, y+HEX_VERTICAL/3, x, y+HEX_SIZE]), width=3, fill=c)
c = (25, 25, 25)
try:
val = h["{}|{}|{}".format(q, r, s)]
if val is None:
c = (100, 100, 100)
else:
c = COLORS[val]
except KeyError as ke:
pass
# assuming 0.8mm drill size
draw.ellipse(convert([x-.4, y-.4, x+.4, y+.4]), fill=c, width=10)
# QRS coordinates for each hexagon
draw.text(convert([x-0.75, y-0.75]), "q: {}".format(q), (120, 120, 120), font=font)
draw.text(convert([x+0.25, y-0.25]), "r: {}".format(r), (120, 120, 120), font=font)
draw.text(convert([x-0.75, y+0.5]), "s: {}".format(s), (120, 120, 120), font=font)
draw.text((50, 5+50), "HEX SIZE:", (255, 255, 255), font=font_large)
draw.text((50+300, 5+50), " {:2.3f} mm".format(HEX_SIZE), (255, 255, 255), font=font_large_bold)
draw.text((50, 5+50*2), "INNER CIRCLE:", (255, 255, 255), font=font_large)
draw.text((50+300, 5+50*2), " {:2.2f} mm".format(INNER_CIRCLE_DIAM), (255, 255, 255), font=font_large_bold)
draw.text((50, 5+50*3), "NUM COLORS:", (255, 255, 255), font=font_large)
draw.text((50+300, 5+50*3), " {}".format(NUM_COLORS), (255, 255, 255), font=font_large_bold)
draw.text((50, 5+50*4), "GRID RADIUS:", (255, 255, 255), font=font_large)
draw.text((50+300, 5+50*4), " {}".format(GRID_RADIUS), (255, 255, 255), font=font_large_bold)
draw.line([50, 20+50*5, 350, 20+50*5], width=1, fill=(80, 80, 80))
draw.text((50, 5+50*6), "total points:", (255, 255, 255), font=font_large)
draw.text((50+300, 5+50*6), " {}".format(len(h.keys())), (255, 255, 255), font=font_large_bold)
draw.text((50, 5+50*7), "penalties:", (255, 255, 255), font=font_large)
draw.text((50+300, 5+50*7), " {}".format(info["penalties"]), (255, 255, 255), font=font_large_bold)
draw.text((50, 5+50*8), "iteration:", (255, 255, 255), font=font_large)
draw.text((50+300, 5+50*8), " {}".format(info["iteration"]), (255, 255, 255), font=font_large_bold)
draw.ellipse(convert([
DIMENSIONS[0]/2-INNER_CIRCLE_DIAM/2, DIMENSIONS[1]/2-INNER_CIRCLE_DIAM/2,
DIMENSIONS[0]/2+INNER_CIRCLE_DIAM/2, DIMENSIONS[1]/2+INNER_CIRCLE_DIAM/2]), fill=None, outline=(100, 100, 100), width=3)
filename = OUTPUT_IMAGE.format(prefix=info["prefix"], iteration=info["iteration"], penalties=info["penalties"])
filename = os.path.join(OUTPUT_DIR.format(NUM_COLORS, GRID_RADIUS), filename)
im.save(filename)
# print("image written to: {}".format(filename))
def save_to_file(h, lookup, info):
filename = OUTPUT_JSON.format(prefix=info["prefix"], iteration=info["iteration"], penalties=info["penalties"])
filename = os.path.join(OUTPUT_DIR.format(NUM_COLORS, GRID_RADIUS), filename)
with open(filename, "w") as f:
data = {
"HEX_SIZE": HEX_SIZE,
"HEX_HORIZONTAL": HEX_HORIZONTAL,
"HEX_VERTICAL": HEX_VERTICAL,
"NUM_COLORS": NUM_COLORS,
"GRID_RADIUS": GRID_RADIUS,
"data": {}, # "-2|1|1" --> 1
"lookup_table": lookup # "0 1 0 0 1 0 0" --> "-2|1|1"
}
for key in h.keys():
data["data"][key] = h[key]
json.dump(data, f)
print("json written to: {}".format(filename))
def run(process_name, pattern_list, iterations):
h = {}
lookup = {}
all_hexagons = get_all_hexagons()
valid_results = 0
penalties = []
for i in range(0, int(iterations)):
shuffle(pattern_list)
# beware of shuffling the order of looking at the hexagons
# if filling the hexes is randomized, filled hexes may
# "encircle" an unfilled one and fill it passively (without
# collision checks and lookup table entries). Results in
# undetected duplicates.
# shuffle(all_hexagons)
try:
h = init_h()
lookup = {}
# pre-fill center hexagon and neighbours
set(h, lookup, 0, 0, 0, UNIQUE_PATTERN)
success = fill_all(h, lookup, all_hexagons, pattern_list)
# success = fill_random(h, lookup)
if success:
valid_results += 1
penalty_points, rot_1, rot_n = calculate_penalties(h, lookup)
# print("{} | {:6} | generated pattern {} | penalty_points: {}".format(process_name, i, valid_results, penalty_points))
if len(penalties) > 0 and penalty_points < min(penalties):
info = {
"penalties": penalty_points,
"iteration": i,
"rot_1": rot_1,
"rot_n": rot_n,
"prefix": process_name
}
if WRITE_IMAGE:
draw_image(h, info)
if WRITE_JSON:
save_to_file(h, lookup, info)
penalties.append(penalty_points)
if penalty_points == 0:
print("{} DONE. min penalty: {}, avg penalty {:5.2f}".format(process_name, min(penalties), sum(penalties)/len(penalties)))
sys.exit(0)
except Exception as e:
print("filling failed: {}".format(e))
exc_info = sys.exc_info()
traceback.print_exception(*exc_info)
sys.exit(-1)
if __name__ == '__main__':
try:
os.makedirs(OUTPUT_DIR.format(NUM_COLORS, GRID_RADIUS))
except Exception as e:
pass
print("total number of hexagons: {}".format(int(1 + 6 * (GRID_RADIUS**2 + GRID_RADIUS)/2)))
print("total number of patterns: {} / {}".format(int(1 + 6 * ((GRID_RADIUS-1)**2 + (GRID_RADIUS-1))/2), NUM_COLORS**7))
# print("total number of permutations: {}".format(math.factorial(NUM_COLORS**7)))
pattern_list = build_pattern_list()
# remove unique pattern (and all rotational duplicates) from pattern_list
for i in range(0, 6):
p_rot = UNIQUE_PATTERN[i:] + UNIQUE_PATTERN[:i]
try:
pattern_list.remove(p_rot)
except Exception as e:
pass
timer_start = datetime.datetime.now()
#run("0", pattern_list, NUM_ITERATIONS)
pool = multiprocessing.Pool()
num_processes = 4
iterations_per_process = int(NUM_ITERATIONS/num_processes)
print("running processes: {} / iterations per process: {}".format(num_processes, iterations_per_process))
results = []
for name in [str(i) for i in range(0, num_processes)]:
results.append(pool.apply_async(run, [name, copy.deepcopy(pattern_list), iterations_per_process]))
for res in results:
res.get()
timer_end = datetime.datetime.now()
diff = timer_end - timer_start
print("total time: {}s".format(diff.total_seconds()))
with open(OUTPUT_REPORT, "a") as f:
f.write("started: {} | finished {} | iterations: {} | duration: {:10.2f}s\n".format(timer_start, timer_end, int(NUM_ITERATIONS), diff.total_seconds()))
| volzotan/LensLeech | pattern/generate.py | generate.py | py | 26,043 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "os.path.join",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "math.sqrt",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "PIL.ImageFont.load_default",
... |
25305990197 | import math
import os
import pygame
import random
from pygame.locals import *
import subprocess
directory = os.getcwd()
#window
pygame.init()
WIDTH = 800
HEIGHT = 500
win = pygame.display.set_mode((WIDTH, HEIGHT))
pygame.display.set_caption("Tic Tac Toe")
#button vars
RADIUS = 20
GAP = 20
letters = []
startx = round((WIDTH - (RADIUS * 2 + GAP) * 13) / 2)
starty = 370
#font
LETTER_FONT = pygame.font.SysFont("comicsans", 30)
WORD_FONT = pygame.font.SysFont("comicsans", 50)
TITLE_FONT = pygame.font.SysFont("comicsans", 60)
#colors
WHITE = (255,255,255)
BLACK = (0, 0, 0,)
GREEN = (0, 255, 0)
#game loop
FPS = 60
clock = pygame.time.Clock()
run = True
#game vars
turn_count = -1
board = [["h", "h", "h"],
["h", "h", "h"],
["h", "h", "h"]]
class Button:
def __init__(self, x, y, width, height, color, text):
self.rect = pygame.Rect(x, y, width, height)
self.color = color
self.text = text
def drawButton(self, screen):
# Check if the mouse is hovering over the button
if self.rect.collidepoint(pygame.mouse.get_pos()):
# Use a lighter color when the mouse is hovering
hover_color = tuple(min(c + 0, 200) for c in self.color)
pygame.draw.rect(screen, hover_color, self.rect)
else:
pygame.draw.rect(screen, self.color, self.rect)
font = pygame.font.Font(None, 80)
text = font.render(str(self.text), True, BLACK)
text_rect = text.get_rect(center=self.rect.center)
screen.blit(text, text_rect)
def setText(self, screen, msg):
self.text = msg
font = pygame.font.Font(None, 80)
text = font.render(str(msg), True, BLACK)
text_rect = text.get_rect(center=self.rect.center)
screen.blit(text, text_rect)
def getText(self):
return self.text
#buttons
b_1 = Button(190, 110, 120, 120, WHITE, "")
b_2 = Button(190+120+5, 110, 120, 120, WHITE, "")
b_3 = Button(190+240, 110, 120, 120, WHITE, "")
b_4 = Button(190, 110+120+5, 120, 120, WHITE, "")
b_5 = Button(190+120+5, 110+120+5, 120, 120, WHITE, "")
b_6 = Button(190+240, 110+120+5, 120, 120, WHITE, "")
b_7 = Button(190, 110+240, 120, 120, WHITE, "")
b_8 = Button(190+120+5, 110+240, 120, 120, WHITE, "")
b_9 = Button(190+240, 110+240, 120, 120, WHITE, "")
buttons = [b_1, b_2, b_3, b_4, b_5, b_6, b_7, b_8, b_9]
#example funcs
def start_game():
print("Starting the game!")
def quit_game():
print("Quitting the game!")
pygame.quit()
def draw():
win.fill(WHITE)
#draw title
text = TITLE_FONT.render("TIC TAC TOE", 1, BLACK)
win.blit(text, (WIDTH/2 - text.get_width()/2, 20))
#draw player turn
text = WORD_FONT.render("TURN:", 1, BLACK)
win.blit(text, (600, 155))
if turn_count == -1:
text = LETTER_FONT.render("Player 1", 1, BLACK)
elif turn_count % 2 != 0: #even
text = LETTER_FONT.render("Player 1", 1, BLACK)
else:
text = LETTER_FONT.render("Player 2", 1, BLACK)
win.blit(text, (610, 230))
#draw lines
pygame.draw.rect(win, BLACK, Rect(220, 94, 375, 5)) #underline
#draw buttons
for button in buttons:
button.drawButton(win)
if button.text:
button.setText(win, button.text)
#draw board
pygame.draw.rect(win, BLACK, Rect(310, 110, 5, 120*3))
pygame.draw.rect(win, BLACK, Rect(310+120, 110, 5, 120*3))
pygame.draw.rect(win, BLACK, Rect(310-120, 110+120, 120*3, 5))
pygame.draw.rect(win, BLACK, Rect(310-120, 110+120*2, 120*3, 5))
pygame.display.update()
def button_clicked(button):
if button == b_1:
if not button.text:
if turn_count % 2 == 0: #even:
button.setText(win, "X")
board[0][0] = "x"
else:
button.setText(win, "O")
board[0][0] = "o"
elif button == b_2:
if not button.text:
if turn_count % 2 == 0: #even:
button.setText(win, "X")
board[0][1] = "x"
else:
button.setText(win, "O")
board[0][1] = "o"
elif button == b_3:
if not button.text:
if turn_count % 2 == 0: #even:
button.setText(win, "X")
board[0][2] = "x"
else:
button.setText(win, "O")
board[0][2] = "o"
elif button == b_4:
if not button.text:
if turn_count % 2 == 0: #even:
button.setText(win, "X")
board[1][0] = "x"
else:
button.setText(win, "O")
board[1][0] = "o"
elif button == b_5:
if not button.text:
if turn_count % 2 == 0: #even:
button.setText(win, "X")
board[1][1] = "x"
else:
button.setText(win, "O")
board[1][1] = "o"
elif button == b_6:
if not button.text:
if turn_count % 2 == 0: #even:
button.setText(win, "X")
board[1][2] = "x"
else:
button.setText(win, "O")
board[1][2] = "o"
elif button == b_7:
if not button.text:
if turn_count % 2 == 0: #even:
button.setText(win, "X")
board[2][0] = "x"
else:
button.setText(win, "O")
board[2][0] = "o"
elif button == b_8:
if not button.text:
if turn_count % 2 == 0: #even:
button.setText(win, "X")
board[2][1] = "x"
else:
button.setText(win, "O")
board[2][1] = "o"
elif button == b_9:
if not button.text:
if turn_count % 2 == 0: #even:
button.setText(win, "X")
board[2][2] = "x"
else:
button.setText(win, "O")
board[2][2] = "o"
pygame.display.update()
def display_message(message):
pygame.time.delay(1200) #wait 1 second
win.fill(WHITE) #fill screen with blank white
text = WORD_FONT.render(message, 1, BLACK) #render param 'message'
win.blit(text, (WIDTH/2 - text.get_width()/2, HEIGHT/2 - text.get_height()/2)) # blit/put text in middle of screen
pygame.display.update() #update screen
pygame.time.delay(3000) #wait 3 seconds
def winCheckX(matrix):
# Win conditions: Horizontal, Vertical, Main Diagonal, Other Diagonal
win_conditions = [
matrix[0] == ['x', 'x', 'x'],
matrix[1] == ['x', 'x', 'x'],
matrix[2] == ['x', 'x', 'x'],
[matrix[i][0] for i in range(3)] == ['x', 'x', 'x'],
[matrix[i][1] for i in range(3)] == ['x', 'x', 'x'],
[matrix[i][2] for i in range(3)] == ['x', 'x', 'x'],
[matrix[i][i] for i in range(3)] == ['x', 'x', 'x'],
[matrix[i][2-i] for i in range(3)] == ['x', 'x', 'x']
]
return any(win_conditions)
def winCheckO(matrix):
# Win conditions: Horizontal, Vertical, Main Diagonal, Other Diagonal
win_conditions = [
matrix[0] == ['o', 'o', 'o'],
matrix[1] == ['o', 'o', 'o'],
matrix[2] == ['o', 'o', 'o'],
[matrix[i][0] for i in range(3)] == ['o', 'o', 'o'],
[matrix[i][1] for i in range(3)] == ['o', 'o', 'o'],
[matrix[i][2] for i in range(3)] == ['o', 'o', 'o'],
[matrix[i][i] for i in range(3)] == ['o', 'o', 'o'],
[matrix[i][2-i] for i in range(3)] == ['o', 'o', 'o']
]
return any(win_conditions)
def checkNotH (matrix):
# Win conditions: Horizontal, Vertical, Main Diagonal, Other Diagonal
win_conditions = [
matrix[0] == ['h', 'h', 'h'],
matrix[1] == ['h', 'h', 'h'],
matrix[2] == ['h', 'h', 'h'],
[matrix[i][0] for i in range(3)] == ['h', 'h', 'h'],
[matrix[i][1] for i in range(3)] == ['h', 'h', 'h'],
[matrix[i][2] for i in range(3)] == ['h', 'h', 'h'],
[matrix[i][i] for i in range(3)] == ['h', 'h', 'h'],
[matrix[i][2-i] for i in range(3)] == ['h', 'h', 'h']
]
return any(win_conditions)
def display_message(message):
pygame.time.delay(1000) #wait 1 second
win.fill(WHITE) #fill screen with blank white
text = WORD_FONT.render(message, 1, BLACK) #render param 'message'
win.blit(text, (WIDTH/2 - text.get_width()/2, HEIGHT/2 - text.get_height()/2)) # blit/put text in middle of screen
pygame.display.update() #update screen
pygame.time.delay(3000) #wait 3 seconds
while run:
clock.tick(FPS)
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
if event.type == pygame.MOUSEBUTTONDOWN:
pos = pygame.mouse.get_pos()
for button in buttons:
if button.rect.collidepoint(pos):
# Button clicked
turn_count += 1
#print(turn_count)
button_clicked(button)
print(pygame.mouse.get_pos())
print(board)
#pygame.draw.rect(win, BLACK, Rect(425, 130, 300, 90)) # top right
draw()
if winCheckX(board):
display_message('Player 1 wins!')
break
if winCheckO(board):
display_message('Player 2 wins!')
break
pygame.quit() | Durp06/Pycade | tictactoe.py | tictactoe.py | py | 9,355 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.getcwd",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pygame.init",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pygame.display.set_mode",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"li... |
31046654638 | import asyncio
import uuid
from pycaret.datasets import get_data
from sqlalchemy.sql import text
from .app.database import async_engine
from .app.models import (
Base,
Client,
Payment,
)
def split_data_client_payments():
df = get_data("credit").reset_index().rename(columns={"index": "ID"})
clients_df = df.loc[:, ["ID", "SEX", "EDUCATION", "MARRIAGE", "AGE"]]
payments_df = df.loc[
:, ["ID", "LIMIT_BAL", "PAY_1", "BILL_AMT1", "PAY_AMT1", "default"]
]
transaction_id = payments_df.apply(lambda _: str(uuid.uuid1()), axis=1)
payments_df.insert(0, "trans_id", transaction_id)
clients_df.columns = [c.name for c in Client.__table__.columns]
payments_df.columns = [c.name for c in Payment.__table__.columns]
return payments_df, clients_df
async def async_main():
payments_df, clients_df = split_data_client_payments()
async with async_engine.begin() as conn:
await conn.run_sync(Base.metadata.drop_all)
await conn.run_sync(Base.metadata.create_all)
await conn.execute(text("TRUNCATE TABLE clients CASCADE"))
await conn.run_sync(_write_sql, clients_df, "clients")
await conn.run_sync(_write_sql, payments_df, "payments")
async with async_engine.connect() as conn:
results = await conn.execute(text("SELECT COUNT(*) FROM payments"))
print(f"Number of rows: {results.fetchall()[0][0]}")
await async_engine.dispose()
def _write_sql(con, df, stmt):
df.to_sql(stmt, con=con, if_exists="append", index=False)
if __name__ == "__main__":
asyncio.run(async_main())
| ryankarlos/FastAPI-example-ml | src/load_data_into_tables.py | load_data_into_tables.py | py | 1,599 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "pycaret.datasets.get_data",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "uuid.uuid1",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "app.models.Client.__table__",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": ... |
31279302042 | import json
import ray
import requests
from fastapi import Body, FastAPI
from typing import List, Dict
from ray import serve
from ray.serve.handle import RayServeDeploymentHandle
from fi.service.lastpx import LastpxService
from fi.service.side import SideService
from fi.service.trddate import TrddateService
from fi.service.volume import VolumeService
from fi.service.yfinance import YfinanceService
from fi.service.marginBalance import MarginBalanceService
from fi.service.marketdata import MarketdataService
from fi.model.post import PostStr, PostItem
app = FastAPI()
@serve.deployment()
@serve.ingress(app)
class Finra:
def __init__(
self,
lastpx_handle: RayServeDeploymentHandle,
side_handle: RayServeDeploymentHandle,
trddate_handle: RayServeDeploymentHandle,
volume_handle: RayServeDeploymentHandle,
yfinance_handle: RayServeDeploymentHandle,
marginBalance_handle: RayServeDeploymentHandle,
marketdataService_handle: RayServeDeploymentHandle,
):
self._lastpx_handle = lastpx_handle
self._side_handle = side_handle
self._trddate_handle = trddate_handle
self._volume_handle = volume_handle
self._yfinance_handle = yfinance_handle
self._marginBalance_handle = marginBalance_handle
self._marketdataService_handle = marketdataService_handle
@app.post("/fi/lastpx")
async def root1(self, body: Dict):
result = await self._lastpx_handle.Lastpx.remote(body)
r = json.dumps(await result)
print(await result)
return r
@app.post("/fi/side")
async def root2(self, body: Dict):
result = await self._side_handle.Side.remote(body)
r = json.dumps(await result)
print(await result)
return r
@app.post("/fi/trddate")
async def root3(self, body: Dict):
result = await self._trddate_handle.Trddate.remote(body)
r = json.dumps(await result)
print(await result)
return r
@app.post("/fi/volume")
async def root4(self, body: Dict):
result = await self._volume_handle.Volume.remote(body)
r = json.dumps(await result)
print(await result)
return r
@app.post("/fi/yfinance")
async def root5(self, body: Dict):
result = await self._yfinance_handle.Yfinance.remote(body)
r = json.dumps(await result)
print(await result)
return r
@app.post("/fi/volume")
async def root6(self, body: Dict):
result = await self._volume_handle.Volume.remote(body)
r = json.dumps(await result)
print(await result)
return r
@app.post("/fi/marginBalance")
async def root7(self, dic: List):
result = await self._marginBalance_handle.MarginBalance.remote(dic)
r = json.dumps(await result)
print(await result)
return r
@app.post("/fi/marketdata")
async def root8(self, dic: Dict):
result = await self._marketdataService_handle.Marketdata.remote(dic)
r = json.dumps(await result)
print(await result)
return r
# @app.post("/so/lastpx")
# async def root(self, body: PostItem = Body(embed=True)):
# result = await self._lastpx_handle.Lastpx.remote(body)
# print(result)
# return {"status ": 200}
#
# @app.post("/so/side")
# async def root(self, body: PostItem = Body(embed=True)):
# result = await self._side_handle.Side.remote(body)
# print(result)
# return {"status ": 200}
#
# @app.post("/so/trddate")
# async def root(self, body: PostItem = Body(embed=True)):
# result = await self._trddate_handle.Trddate.remote(body)
# print(result)
# return {"status ": 200}
#
# @app.post("/so/volume")
# async def root(self, body: PostItem = Body(embed=True)):
# result = await self._volume_handle.Volume.remote(body)
# print(result)
# return {"status ": 200}
#
# @app.post("/so/yfinance")
# async def root(self, req: PostStr):
# result = await self._lastpx_handle.Lastpx.remote(req)
# print(result)
# return {"status ": 200}
@app.post("/fi/test1")
async def root(self, body: PostItem = Body(embed=True)):
results = {"item": body.portfolioType, "aaa": body.portfolio}
return results
# return "hello"+ body.portfolioType
@app.get("/fi/test2")
async def root(self, body: Dict):
print(123)
print(body.values())
return {"status ": 200}
lastpx_handle = LastpxService.bind()
side_handle = SideService.bind()
trddate_handle = TrddateService.bind()
volume_handle = VolumeService.bind()
yfinance_handle = YfinanceService.bind()
marginBalance_handle = MarginBalanceService.bind()
marketdataService_handle = MarketdataService.bind()
ingress = Finra.bind(lastpx_handle,side_handle,trddate_handle,volume_handle,yfinance_handle,marginBalance_handle,marketdataService_handle)
| tju-hwh/Yet-Another-Serverless-Benchmark | finra/ray/fi/main.py | main.py | py | 5,041 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "fastapi.FastAPI",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "ray.serve.handle.RayServeDeploymentHandle",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "ray.serve.handle.RayServeDeploymentHandle",
"line_number": 28,
"usage_type": "n... |
39955757095 | #!/usr/bin/env python
#This script is to sort ArXiv papers according to your interest using Machine Learning algorithms.
import os
import urllib
import itertools
import feedparser
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
#from sklearn.linear_model import Lasso # ElasticNet
import numpy as np
# For the HTTP Server
from wsgiref.simple_server import make_server
from cgi import parse_qs, escape
# Location to save files
DIR = os.path.expanduser("~/ArXivSorter/")
class TopicSet:
""" The class instant of each topic, one wishes to have independent instances of Archive Sorter. """
def __int__(self,name,categories):
""" Input : name str, the name of this Topic Archive Sorter.
categories: input string tuple of ArXive Categories from which papers have to be retrieved."""
self.name = name
self.category = '%28cat:'+'+OR+cat:'.join(categories)+'%29+AND+' # %28 and %29 are ( and ) in ascii
self.InterestedArXivIDfile = os.path.join(DIR,self.name+'_Interested.list') #Filename of the file which contains list of Interested papers's ArXive Ids
self.NotInterestedArXivIDfile = os.path.join(DIR,self.name+'_NotInterested.list') #Filename which contains list of Not Interested papers's ArXive Ids
self.NoofInt = 0 # No: of Interested Papers trained
self.NoofNotInt = 0 # No: of Not Interested Papers trained
self.vectorizer = TfidfVectorizer(min_df=1)
def RetrieveArXivbyId(self,paperId):
""" Retrieves the Paper's title, abstract, authors etc via ArXive's API in xml format
Input: paperID is a string of ArXiv paper ids (multiple papers ids should be comma seperated)
Output: string, the full xml output of ArXiv API. """
BasicURL = 'http://export.arxiv.org/api/query?'
SearchQuery = 'id_list='+paperId
url = BasicURL+SearchQuery
print('Retrieving: '+url)
return urllib.urlopen(url).read()
def RetrieveArXivbyDate(self,StartDate=20140129,EndDate=20140131,MaxResults=50):
""" Retrieves all the Paper's title, abstract, authors etc via ArXive's API in xml format
Input : StartDate int, which gives the starting date of the search in format of YYYYMMDDHHMM
EndDate int, which gives the endinging date of the search in format of YYYYMMDDHHMM
MaxResults int, which is a safe maximum limits of papers to retrieve
Output: string, the full xml output of ArXiv API. """
# Category='%28cat:astro-ph.SR+OR+cat:astro-ph.EP%29+AND+'
BasicURL = 'http://export.arxiv.org/api/query?'
Maxcondition = '&max_results={0:d}'.format(MaxResults)
SearchQuery = 'search_query='
DateCondition = 'submittedDate:[{0:d}+TO+{1:d}]'.format(StartDate,EndDate)
SearchQuery = SearchQuery+self.category+DateCondition+Maxcondition
#Example: http://export.arxiv.org/api/query?search_query=submittedDate:[200901130630+TO+200901131645]&max_results=200
url = BasicURL+SearchQuery
print('Retrieving: '+url)
return urllib.urlopen(url).read()
def LoadIDsfromFile(self):
""" Returns the list of Interested and Not Interested Papers Ids from corresponding files """
with open(self.InterestedArXivIDfile,'r') as f :
self.InterestedIds = [line.rstrip().split()[0] for line in f]
with open(self.NotInterestedArXivIDfile,'r') as f :
self.NotInterestedIds = [line.rstrip().split()[0] for line in f]
return
def RetrieveDataToTrain(self,SampleData=None):
""" Retrieves data from Archive to train the Model
Input: SampleData : feedparser output, if given will be used as Typical Data in."""
self.Interested_data = feedparser.parse(self.RetrieveArXivbyId(','.join(self.InterestedIds)))
self.NotInterested_data = feedparser.parse(self.RetrieveArXivbyId(','.join(self.NotInterestedIds)))
self.Typical_data = SampleData or feedparser.parse(self.RetrieveArXivbyDate(StartDate=20140201,EndDate=20140207,MaxResults=100))
def TrainOnData(self,SampleData=None):
""" Trains the Model to find the Vector which reprosents Interested Paper and Also the Vector which represents Not interested papers
Input: SampleData : feedparser output, if given will be used as Typical Data in."""
self.LoadIDsfromFile()
self.RetrieveDataToTrain(SampleData=SampleData)
Text_interested = (entry.title + entry.summary for entry in self.Interested_data.entries ) #String of Title and Summary text.
Text_Notinterested = (entry.title + entry.summary for entry in self.NotInterested_data.entries )
Text_Typical = (entry.title + entry.summary for entry in self.Typical_data.entries)
Data_2train = itertools.chain(Text_interested,Text_Notinterested,Text_2Rank)
Vectors_2train = self.vectorizer.fit_transform(Data_2train) # Fitting TF-IDF of from data
self.NoofInt = len(self.Interested_data.entries)
self.NoofNotInt = len(self.NotInterested_data.entries)
print('Trained with a sample Vectors of size '+str(Vectors_2train.shape))
print('Sample size of Interested papers {0:d}'.format(self.NoofInt))
print('Sample size of Not Interested papers {0:d}'.format(self.NoofNotInt))
#Now we take mean of the Interested and NotInterested vectors to later use to find cosine similarity.
self.InterestedVector=Vectors_2train[:self.NoofInt,:].mean(axis=0)
self.NotInterestedVector=Vectors_2train[self.NoofInt:self.NoofInt+self.NoofNotInt,:].mean(axis=0)
def ReturnRank(self,Data_2Rank):
""" Returns the Rank indexes [-1 to 1] of the data based on cosine similarity with Interested and Notinterested vectors
Input: Data_2Rank, is feedparser output of the data to rank
Output: Rank indexes [-1 to +1] of the input data based on cosine similarity. -1 is highest rank, and +1 is the lowest rank. """
Text_2Rank = (entry.title + entry.summary for entry in Data_2Rank.entries)
Vectors_2Rank = self.vectorizer.transform(Text_2Rank)
InterestedCosineRank = cosine_similarity(self.InterestedVector,Vectors_2Rank)[0]
NotInterestedCosineRank = cosine_similarity(self.NotInterestedVector,Vectors_2Rank)[0]
return NotInterestedCosineRank - InterestedCosineRank
def RetrieveAndRank(self,StartDate=20140129,EndDate=20140131,MaxResults=200):
""" Returns the Papers of input date range, and the rank to sort them
Input : StartDate int, which gives the starting date of the search in format of YYYYMMDDHHMM
EndDate int, which gives the endinging date of the search in format of YYYYMMDDHHMM
MaxResults int, which is a safe maximum limits of papers to retrieve
Output: tuple(The Retrieved Full data in feedparser output format, List of Ranks of each entry) """
Data_2Rank = feedparser.parse(self.RetrieveArXivbyDate(StartDate=StartDate,EndDate=StartDate,MaxResults=MaxResults))
return Data_2Rank,self.ReturnRank(Data_2Rank)
Mainhtml = """
<html>
<title>ArXive Sorter</title>
<body bgcolor="#ffffff">
</head>
<a name="top"></a>
<center>
<h1>ArXive Sorter</h1> <p>
<h3> <i> Your intelegent assistent to sort ArXive Papers to your taste </i> </h3> <br>
</center>
<a href="#Forms">[Go down to inputs]</a>
<!---All the Rank based Sorted Papers Starts Here --->
<p>
{SortedPapers}
</p>
<a href="#top">[Back to top]</a>
<hr size=5>
<!---All the Rank based Sorted Papers Ends Here --->
<!---Different input forms below --->
<a name="Forms"></a>
<p>
{ToChooseTopic}
</p>
<p>
{ToChooseStartEndDate}
</p>
<p>
{ToCreateNewTopic}
</p>
<p>
{ToEditTopic}
</p>
<p>
{ToTrain}
</p>
<a href="#top">[Back to top]</a> <p>
Github Code Repository : <a href="http://indiajoe.github.io/ArXivSorter/">ArXivSorter</a>
<!---End of the document--->
</body>
</html>
<form method="post" action="parsing_post.wsgi">
<p>
Age: <input type="text" name="age" value="99">
</p>
<p>
Hobbies:
<input name="hobbies" type="checkbox" value="software"> Software
<input name="hobbies" type="checkbox" value="tunning"> Auto Tunning
</p>
<p>
<input type="submit" value="Submit">
</p>
</form>
<p>
Age: %s<br>
Hobbies: %s
</p>
</body>
</html>
"""
#Template for html forms, which has to be inserted in Main html page
htmlform="""
<form method="post" action="parsing_post.wsgi">
<p>
{0}
</p>
</form>
"""
FormsDict=dict() #Dictionary of Forms to format Main html page
FormsDict[SortedPapers] = """ No Papers Loaded Yet!!. \n To Load Papers, Choose settings below.."""
FormsDict[ToChooseTopic] = """ """
FormsDict[ToChooseStartEndDate] = htmlform.format(""" Start Date: <input type="text" name="StartDate" placeholder="YYYYMMDD" > End Date: <input type="text" name="EndDate" placeholder="YYYYMMDD" >
<input type="submit" value="Submit">
""")
FormsDict[ToCreateNewTopic] = """ """
FormsDict[ToEditTopic] = """ """
FormsDict[ToTrain] = """ """
def ReturnhtmlForInput(inpdata):
""" Returns the html page to display based on the user input from the page
Input: inpdata, the putput from parse_qs
Output: html string to display """
def application(environ, start_response):
""" This part is coded loosely based on example in http://webpython.codepoint.net/wsgi_tutorial """
# the environment variable CONTENT_LENGTH may be empty or missing
try:
request_body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
request_body_size = 0
# When the method is POST the query string will be sent
# in the HTTP request body which is passed by the WSGI server
# in the file like wsgi.input environment variable.
request_body = environ['wsgi.input'].read(request_body_size)
inpdata = parse_qs(request_body)
response_body = ReturnhtmlForInput(inpdata)
age = d.get('age', [''])[0] # Returns the first age value.
hobbies = d.get('hobbies', []) # Returns a list of hobbies.
# Always escape user input to avoid script injection
age = escape(age)
hobbies = [escape(hobby) for hobby in hobbies]
response_body = html % (age or 'Empty',
', '.join(hobbies or ['No Hobbies']))
status = '200 OK'
response_headers = [('Content-Type', 'text/html'),
('Content-Length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body]
httpd = make_server('localhost', 8051, application)
httpd.serve_forever()
#-------------------------- End of the Code------ indiajoe@gmail.com
InterestedArXivIDfile = 'Interested.list'
NotInterestedArXivIDfile = 'NotInterested.list'
with open(InterestedArXivIDfile,'r') as f :
InterestedIds = [line.rstrip() for line in f]
with open(NotInterestedArXivIDfile,'r') as f :
NotInterestedIds = [line.rstrip() for line in f]
Interested_data = feedparser.parse(RetrieveArXivbyId(','.join(InterestedIds)))
NotInterested_data = feedparser.parse(RetrieveArXivbyId(','.join(NotInterestedIds)))
Data_2Rank = feedparser.parse(RetrieveArXivbyDate(StartDate=20140205,EndDate=20140207,MaxResults=100))
#Rank_2train=np.array([0.0]*len(InterestedIds) + [1.0]*len(NotInterestedIds))
Text_interested = (entry.title + entry.summary for entry in Interested_data.entries )
Text_Notinterested = (entry.title + entry.summary for entry in NotInterested_data.entries )
Text_2Rank = (entry.title + entry.summary for entry in Data_2Rank.entries)
Data_2train = itertools.chain(Text_interested,Text_Notinterested,Text_2Rank)
vectorizer = TfidfVectorizer(min_df=1)
Vectors_2train = vectorizer.fit_transform(Data_2train)
print(Vectors_2train.shape)
NoOfIntIds = len(InterestedIds)
NoOfNotIntIds = len(NotInterestedIds)
InterestedVector = Vectors_2train[:NoOfIntIds,:].mean(axis=0)
NotInterestedVector = Vectors_2train[NoOfIntIds:NoOfIntIds+NoOfNotIntIds,:].mean(axis=0)
#Model=ElasticNet()
#Model=Lasso()
#Model.fit(Vectors_2train,Rank_2train)
Text_2Rank = (entry.title + entry.summary for entry in Data_2Rank.entries)
Vectors_2Rank = vectorizer.transform(Text_2Rank)
InterestedCosineRank = cosine_similarity(InterestedVector,Vectors_2Rank)[0]
NotInterestedCosineRank = cosine_similarity(NotInterestedVector,Vectors_2Rank)[0]
FinalRank = NotInterestedCosineRank - InterestedCosineRank
#PredictedRank= Model.predict(Vector_2Rank)
SortOrder = np.argsort(FinalRank)
print(SortOrder)
for i in SortOrder:
print('*'*30)
print(FinalRank[i])
print(Data_2Rank.entries[i].title)
print(Data_2Rank.entries[i].authors)
# print(Data_2Rank.entries[i].summary)
print(Data_2Rank.entries[i].id.split('/abs/')[-1])
# for entry in data.entries:
# print 'arxiv-id: %s' % entry.id.split('/abs/')[-1]
# print 'Title: %s' % entry.title
# # feedparser v4.1 only grabs the first author
# print 'First Author: %s' % entry.authors
#titles=(entry.title for entry in data.entries)
#vectorizer = TfidfVectorizer(min_df=1)
#Vectors=vectorizer.fit_transform(titles)
#print(Vectors.toarray())
| indiajoe/ArXivSorter | ArXivSorter.py | ArXivSorter.py | py | 13,331 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.expanduser",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_numb... |
11263854107 | from dataclasses import dataclass
from enum import Enum
from typing import Optional
@dataclass
class Objective:
id: Optional[int]
user_id: int
name: str
initial_date: str
final_date: str
initial_investment: str
recurring_investment: str
goal_value: str
@dataclass
class User:
user_id: Optional[str]
name: str
email: str
class InvestmentClass(Enum):
UNKNOWN = 0
VARIABLE_INCOME = 1
POST_FIXED = 2
MULTI_MARKET = 3
GLOBAL = 4
INFLATION = 5
@dataclass
class Investment:
identity: str
bankId: str
description: str
type: str
classification: InvestmentClass
value: float
dueDate: Optional[str]
profitability: Optional[float]
risk: int
acquisitionDate: str
@dataclass
class Transaction:
amount: float
description: str
date: str
@dataclass
class UserTransactions:
user_id: str
suitability: float
transactions: list[Transaction]
| brunotsantos1997/robson-api | app/data/model.py | model.py | py | 964 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "typing.Optional",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "dataclasses.dataclass",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "dataclasses.datac... |
35217612682 | from enum import IntEnum
import requests
from urllib.request import urlopen
import urllib
from selenium import webdriver
from bs4 import BeautifulSoup
import http.client
from openpyxl import Workbook
from openpyxl import load_workbook
from openpyxl.writer.excel import ExcelWriter
from openpyxl.cell.cell import ILLEGAL_CHARACTERS_RE
import cfscrape
import json
import string
import re
import time
import math
import numpy as np
http.client._MAXHEADERS = 1000
def urllib_download(IMAGE_URL, pName):
try:
opener = urllib.request.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
urllib.request.install_opener(opener)
urllib.request.urlretrieve(IMAGE_URL, pName.replace("/","").replace("\\","")+'.jpg')
except:
print('no')
def getNodeText(node):
if(node == None):
return ""
else:
return node.get_text().strip()
def getHtmlFromUrl(url, type="get", para={}):
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.82 Safari/537.36",
"cookie":"_ga=GA1.2.1846208171.1605273760; href=https%3A%2F%2Fwww.sinobiological.com%2Fresearch%2Ftargeted-therapy; accessId=5aff5fb0-84db-11e8-a3b3-d368cce40a8e; _gcl_au=1.1.1660157260.1645016298; Hm_lvt_2d911fad88dfe99ff8bbf610824f3e67=1645016298; sbb=%252be43ohTbVTr09K%252bxQlr1%252bK0onQvF%252bMIXgZM%252bveGXMHU%253dXTaJopSyq01ihC4cD5jOfAa8AEgsWX792EAllASK%252bEKohz0p3JxSEJFavoBnvUmw4fhBpwMcWGZ2Qy%252bRRF2U%252bMSxcQdMfdwOcT%252bR%252bo7qyEU%252br8SBQuGE8GJWgDFeSTZ4QS0HvJFVazETAoyuKMwGHYRoD68%252f7qno5Bg%252bEH9sSXM4upMLtz%252f4IdNkjX6GD0JYHbiUh%252blGTwi25Iz3IKocTDD58DE1yYiY3DxeifN7Qz6OxtXX21lrBpnvgDu9ANN%252f7TTxWWMmOIjxVG772o%252bYGkE9AMxcU5O4cIrT9cubm6dAdgw6n%252fQRZpTVxNv2TGHdHZblPNcfu4dTWVsL3aqaag%253d%253d; _gid=GA1.2.832211649.1645016298; _ce.s=v11.rlc~1645016301520; pageViewNum=13; Hm_lpvt_2d911fad88dfe99ff8bbf610824f3e67=1645017042; Currency=RMB; LocationCode=CN"
}
scraper = cfscrape.create_scraper()
html_code = scraper.get(url,headers=headers).text
return BeautifulSoup(html_code, "html.parser",from_encoding="utf-8")
def requestJson(url):
r = requests.post(url,data={"input":"atcc"}, headers={
'Content-Type': 'application/x-www-form-urlencoded',
'cookie':'visid_incap_2255650=4oBBaRPnQfCVoYEiTmjTq/NVAWEAAAAAQUIPAAAAAAD69PQHUoB0KplKq7/j0+gH; nlbi_2255650=CJKhHYlMm17tpKyoBzOViAAAAACDEjp3gL6bj6YL8j9XE0d/; incap_ses_893_2255650=m1tJIuDRUEp3FE/5GpNkDPRVAWEAAAAAM2KkDpvtARtZral+cMXSVw==; _gcl_au=1.1.76703404.1627477493; _gid=GA1.2.730047202.1627477493; BCSessionID=83af10b8-9488-4b7b-a3b1-3640f178dca2; categoryView=grid; _ga_S46FST9X1M=GS1.1.1627477492.1.1.1627478562.0; _ga=GA1.2.31731397.1627477493; _gat_UA-139934-1=1; _uetsid=69fc2d30efa411eb8818eb045f8760e5; _uetvid=69fc3a70efa411ebba3a23c153f6e477; .Nop.Customer=d664d529-d14a-44b1-86b3-cbf5373277b4',
"User-Agent": "Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.86 Safari/537.36"
})
return BeautifulSoup(r.text, "html.parser",from_encoding="utf-8")
def getRenderdHtmlFromUrl(url):
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('--headless')
chrome_options.add_argument('--disable-gpu')
chrome_options.add_argument("window-size=1024,768")
chrome_options.add_argument("--no-sandbox")
browser = webdriver.Chrome(chrome_options=chrome_options)
browser.get(url)
return BeautifulSoup(browser.page_source, "html.parser")
def writeExcel(workSheet, headers, rowIndex, info):
cellIndex=1
for head in headers:
try:
if head in info:
content = ILLEGAL_CHARACTERS_RE.sub(r'', info[head])
workSheet.cell(rowIndex, cellIndex).value = content.strip()
else:
workSheet.cell(rowIndex, cellIndex).value = ""
cellIndex=cellIndex+1
except:
print(rowIndex)
def getProductInfo(url, type):
print(str(len(products))+":"+url)
sope = getHtmlFromUrl(url)
for br in sope.find_all("br"):
br.replaceWith("\n")
pInfo = {"link":url,"Category": type}
pName = sope.find("h1", attrs={"class":"j-prod-info_title"})
pInfo["Product Name"] = getNodeText(pName)
dts = sope.find_all("dt")
for dt in dts:
title = getNodeText(dt)
value = getNodeText(dt.findNextSibling("dd"))
pInfo[title] = value
descs = sope.find("div", attrs={"class":"j-prod-info_desc"}).children
for desc in descs:
if desc.name != "br":
descValues = desc.split(':')
if len(descValues) == 2:
pInfo[descValues[0].replace('\n','').strip()] = descValues[1]
h2s = sope.find_all("h2")
ps = sope.find_all("p")
h2s.extend(ps)
for strong in h2s:
title = getNodeText(strong.find("strong"))
value = getNodeText(strong)
if title.find("Introduction") == 0:
if title == value:
pInfo["Introduction"] = getNodeText(strong.findNextSibling("p"))
else:
pInfo["Introduction"] = getNodeText(strong)
pInfo["Introduction"] = pInfo["Introduction"].replace(title, "")
if title.find("Function") == 0:
if title == value:
pInfo["Function"] = getNodeText(strong.findNextSibling("p"))
else:
pInfo["Function"] = getNodeText(strong)
pInfo["Function"] = pInfo["Function"].replace(title, "")
if title.find("Application") == 0:
if title == value:
pInfo["Application"] = getNodeText(strong.findNextSibling("p"))
else:
pInfo["Application"] = getNodeText(strong)
pInfo["Application"] = pInfo["Application"].replace(title, "")
products.append(pInfo.copy());
print(pInfo)
def getProductList(url, type):
html_code = getHtmlFromUrl(url)
proList = html_code.find_all("div", attrs={"class":"j-prod-list-itemdivs"})
for tr in proList:
pLink = tr.find("a")
getProductInfo(pLink["href"], type)
products = []
# getProductList('https://www.herealth.com/page1/organic-extract.html','Organic Product')
# getProductInfo("https://www.herealth.com/products/organic-reishi-mushroom-extract.html", "ttt")
for pIndex in range(1, 3):
getProductList('https://www.herealth.com/page'+str(pIndex)+'/organic-extract.html','Organic Product')
for pIndex in range(1, 9):
getProductList('https://www.herealth.com/plant-extract.html','Plant Extract')
for pIndex in range(1, 2):
getProductList('https://www.herealth.com/ginseng-product.html','Ginseng Family')
for pIndex in range(1, 2):
getProductList('https://www.herealth.com/mushroom-product.html','Mushroom Family')
for pIndex in range(1, 3):
getProductList('https://www.herealth.com/fruit-powder.html','Fruit Powder')
for pIndex in range(1, 2):
getProductList('https://www.herealth.com/protein-family.html','Protein Family')
for pIndex in range(1, 2):
getProductList('https://www.herealth.com/spices-powder.html','Spices Powder')
excelFileName="herealth.xlsx"
wb = Workbook()
workSheet = wb.active
headers=[
'link','Category','Product Name','Introduction',
'Function','Application','Model NO.','Delivery:','Minimum order quantity:','Supply Ability:','Country of Origin:'
,'Stock Time:','Product Code','Specification'
,'Assay Method','Botanical Source','Botanical Part Used','Character','Certification','Process Flow'
]
for index,head in enumerate(headers):
workSheet.cell(1, index+1).value = head.strip()
for index,p in enumerate(products):
writeExcel(workSheet, headers, index + 2, p)
print("flish")
wb.save(excelFileName) | Just-Doing/python-caiji | src/work/20220420/herealth.py | herealth.py | py | 7,238 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "http.client.client",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "http.client",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "urllib.request.build_opener",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "urlli... |
40656824900 | # python train_particleTest.py -gpu 2 -ep 20 -bs 128 -vSize 22 -vm 10 -zdim 30 -hdim 64 -enc plain -dec plain -log log_particleTest -name Plain_Plain_bs128_z30h64_gs8_gm22 MDSets/2560_smallGrid/
import tensorflow as tf
import numpy as np
import scipy
import time
import math
import argparse
import random
import sys
import os
from termcolor import colored, cprint
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = BASE_DIR
sys.path.append(BASE_DIR) # model
sys.path.append(os.path.join(ROOT_DIR, 'models'))
sys.path.append(os.path.join(ROOT_DIR, 'utils'))
sys.path.append(os.path.join(ROOT_DIR, 'data_prep'))
sys.path.append(os.path.join(ROOT_DIR, '../'))
import model_fc_upconv as model_net
# import model_graph as model
# from model_graph import model_particles as model_net
# import dataLoad_particleTest as dataLoad # Legacy method, strongly disagree with i.i.d. distribution among batch(epoch)es.
import dataLoad_graph as dataLoad # New method, shuffle & mixed randomly
from time import gmtime, strftime
import progressbar
from tensorflow.python import debug as tf_debug
from tensorflow.python.client import timeline
from tensorflow.contrib.tensorboard.plugins import projector
parser = argparse.ArgumentParser(description="Run the NN for particle simulation")
parser.add_argument('datapath')
parser.add_argument('-gpu', '--cuda-gpus')
parser.add_argument('-ep', '--epochs', type = int, default = 20)
parser.add_argument('-bs', '--batch-size', type = int, default = 16)
parser.add_argument('-vLen', '--voxel-length', type = int, default = 96, help = "Size of voxel (0 to use voxel length stored in file)")
parser.add_argument('-vSize', '--voxel-size', type = int, default = 2560, help = "Max amount of particles in a voxel")
parser.add_argument('-vm', '--velocity-multiplier', type = float, default = 1.0, help = "Multiplies the velocity (input[..., 3:]) by this factor")
parser.add_argument('-norm', '--normalize', type = float, default = 1.0, help = "stddev of input data")
parser.add_argument('-zdim', '--latent-dim', type = int, default = 512, help = "Length of the latent vector")
parser.add_argument('-hdim', '--hidden-dim', type = int, default = 64, help = "Length of the hidden vector inside network")
parser.add_argument('-cdim', '--cluster-dim', type = int, default = 128, help = "How many neighbors should be considered in the graph network")
parser.add_argument('-ccnt', '--cluster-count', type = int, default = 256, help = "How many neighbors should be considered in the graph network")
parser.add_argument('-odim', '--output-dim', type = int, default = 6, help = "What kind of data should we output?")
parser.add_argument('-knnk', '--nearest-neighbor', type = int, default = 16, help = "How many neighbors should be considered in the graph network")
parser.add_argument('-loop', '--loop-sim', type = int, default = 5, help = "Loop simulation sim count")
parser.add_argument('-lr', '--learning-rate', type = float, default = 0.0003, help = "learning rate")
parser.add_argument('-beta1', '--beta1', type = float, default = 0.9, help = "beta1")
parser.add_argument('-beta2', '--beta2', type = float, default = 0.999, help = "beta2")
parser.add_argument('-l2', '--l2-loss', dest = 'loss_func', action='store_const', default = tf.abs, const = tf.square, help = "use L2 Loss")
parser.add_argument('-maxpool', '--maxpool', dest = 'combine_method', action='store_const', default = tf.reduce_mean, const = tf.reduce_max, help = "use Max pooling instead of sum up for permutation invariance")
parser.add_argument('-adam', '--adam', dest = 'adam', action='store_const', default = False, const = True, help = "Use Adam optimizer")
parser.add_argument('-fp16', '--fp16', dest = 'dtype', action='store_const', default = tf.float32, const = tf.float16, help = "Use FP16 instead of FP32")
parser.add_argument('-nloop', '--no-loop', dest = 'doloop', action='store_const', default = True, const = False, help = "Don't loop simulation regularization")
parser.add_argument('-nsim', '--no-sim', dest = 'dosim', action='store_const', default = True, const = False, help = "Don't do Simulation")
parser.add_argument('-log', '--log', type = str, default = "logs", help = "Path to log dir")
parser.add_argument('-name', '--name', type = str, default = "NoName", help = "Name to show on tensor board")
parser.add_argument('-preview', '--previewName', type = str, default = "unnamed", help = "Name for save preview point clouds")
parser.add_argument('-save', '--save', type = str, default = "model", help = "Path to store trained model")
parser.add_argument('-load', '--load', type = str, default = "None", help = "File to load to continue training")
parser.add_argument('-debug', '--debug', dest = "enable_debug", action = 'store_const', default = False, const = True, help = "Enable debugging")
parser.add_argument('-prof', '--profile', dest = "profile", action = 'store_const', default = False, const = True, help = "Enable profiling (at step 10)")
# parser.add_argument('-prof', '--profile', type = str, default = "None", help = "Path to store profiling timeline (at step 100)")
args = parser.parse_args()
def write_models(array, meta, dirc, name):
if not os.path.exists(dirc):
os.makedirs(dirc)
with open(os.path.join(dirc, name), 'w') as model_file:
for pi in range(array.shape[0]):
for ci in range(array.shape[1]):
model_file.write('%f ' % array[pi, ci])
if meta is not None:
for mi in range(len(meta)):
pCount = array.shape[0] // meta[mi]
model_file.write('%d ' % (pi // pCount))
model_file.write('\n')
dataLoad.maxParticlesPerGrid = args.voxel_size
if args.voxel_length == 0:
dataLoad.overrideGrid = False
else:
dataLoad.overrideGrid = True
dataLoad.overrideGridSize = args.voxel_length
if args.name == "NoName":
args.name = "[NPY][NoCard][1st2ndmomentEdges(edgeMask,[u;v;edg])][NoPosInVertFeature] E(%s)-D(%s)-%d^3(%d)g%dh%dz-bs%dlr%f-%s" % ("graph", "graph", args.voxel_length, args.voxel_size, args.hidden_dim, args.latent_dim, args.batch_size, args.learning_rate, 'Adam' if args.adam else 'mSGD')
if args.previewName == 'unnamed':
args.previewName = args.name
os.environ["CUDA_VISIBLE_DEVICES"] = args.cuda_gpus
logPath = os.path.join(args.log, args.name + "(" + strftime("%Y-%m-%d %H-%Mm-%Ss", gmtime()) + ")/")
# Create the model
_, _, normalize = dataLoad.get_fileNames(args.datapath)
oD = args.output_dim
bs = args.batch_size
gN = args.voxel_size
ph_X = tf.placeholder(tf.float32, [bs, gN, 4])
normalized_X = (ph_X[:, :, 0:oD] - tf.broadcast_to(normalize['mean'], [bs, gN, oD])) / tf.broadcast_to(normalize['std'], [bs, gN, oD])
with tf.variable_scope('net', reuse = False):
train_rec, train_ep = model_net.get_model(ph_X, True, 0.98)
with tf.variable_scope('net', reuse = True):
val_rec, val_ep = model_net.get_model(ph_X, False, 0.98)
train_loss, train_ep = model_net.get_loss(train_rec, normalized_X, train_ep)
val_loss, val_ep = model_net.get_loss(val_rec, normalized_X, val_ep)
optimizer = tf.train.AdamOptimizer(learning_rate = args.learning_rate, beta1 = args.beta1, beta2 = args.beta2, epsilon=1e-8)
train_op = optimizer.minimize(train_loss)
val_rec_out = val_rec * tf.broadcast_to(normalize['std'], [bs, gN, oD]) + tf.broadcast_to(normalize['mean'], [bs, gN, oD])
# Summary the variables
tras = tf.summary.scalar('Training Loss', train_loss)
vals = tf.summary.scalar('Validation Loss', val_loss)
merged_train = tf.summary.merge([tras])
merged_val = tf.summary.merge([vals])
# Create session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config = config)
if args.enable_debug:
sess = tf_debug.LocalCLIDebugWrapperSession(sess)
sess.add_tensor_filter("has_inf_or_nan", tf_debug.has_inf_or_nan)
train_writer = tf.summary.FileWriter(logPath + '/train', sess.graph)
val_writer = tf.summary.FileWriter(logPath + '/validation', sess.graph)
sess.run(tf.local_variables_initializer())
sess.run(tf.global_variables_initializer())
save_path = "savedModels/" + args.name + "/"
if not os.path.exists(save_path):
os.makedirs(save_path)
# Save & Load
saver = tf.train.Saver()
if args.load == "auto" or args.load == "Auto":
latest_ckpt = tf.train.latest_checkpoint(save_path)
if latest_ckpt is not None:
saver.restore(sess, latest_ckpt)
print("Check point loaded: %s" % latest_ckpt)
elif args.load != "None":
saver.restore(sess, args.load)
batch_idx_train = 0
batch_idx_test = 0
epoch_idx = 0
iteration = 0
epCount = dataLoad.fileCount(args.datapath)
stepFactor = 9
epochs = dataLoad.gen_epochs(args.epochs, args.datapath, args.batch_size, args.velocity_multiplier, True, args.output_dim)
while True:
batch_train, batch_validate = next(epochs, [None, None])
epoch_idx += 1
if batch_train == None:
break
print(colored("Epoch %03d" % (epoch_idx), 'yellow'))
# Training loop
while True:
_x, _x_size = next(batch_train, [None, None])
if _x == None:
break
if batch_idx_train == 10 and args.profile:
raise NotImplementedError
else:
feed_dict = { ph_X: _x[0] }
_, n_loss, summary = sess.run([train_op, train_loss, merged_train], feed_dict = feed_dict)
train_writer.add_summary(summary, batch_idx_train)
batch_idx_train += 1
print(colored("Ep %04d" % epoch_idx, 'yellow') + ' - ' + colored("It %08d" % batch_idx_train, 'magenta') + ' - ', end = '')
print(colored("Train =%7.4f" % (n_loss), 'green'), end = ' ')
_vx, _vx_size = next(batch_validate, [None, None])
feed_dict = { ph_X: _vx[0] }
if batch_idx_test % 100 == 0:
n_loss, summary, _rec = sess.run([val_loss, merged_val, val_rec_out[0, :, :]], feed_dict = feed_dict)
_gt = _vx[0][0, :, 0:3]
val_writer.add_summary(summary, batch_idx_test)
write_models(_rec, None, './previews/%s' % args.previewName, 'validation-%d-rec.asc' % batch_idx_test)
write_models(_gt, None, './previews/%s' % args.previewName, 'validation-%d-gt.asc' % batch_idx_test)
else:
n_loss, summary = sess.run([val_loss, merged_val], feed_dict = feed_dict)
val_writer.add_summary(summary, batch_idx_test)
batch_idx_test += 1
print(colored("(val =%7.4f)" % n_loss, 'blue'))
if batch_idx_train % (16000 // args.batch_size) == 0:
sav = saver.save(sess, save_path + args.save + ".ckpt", global_step = batch_idx_train)
print("Checkpoint saved in %s" % (sav))
# Save the network
if(args.save != "None"):
save_path = saver.save(sess, "savedModels/" + args.save + ".ckpt")
print("Model saved in %s" % (save_path))
| betairylia/NNParticles | Comparision/train_betairya.py | train_betairya.py | py | 10,993 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.dirname",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "sys.path.append",
"l... |
22682169641 | import sys
import time
import numpy as np
import cv2
import blazeface_utils as but
# import original modules
sys.path.append('../../util')
from utils import get_base_parser, update_parser, get_savepath # noqa: E402
from model_utils import check_and_download_models # noqa: E402
from image_utils import load_image # noqa: E402
import webcamera_utils # noqa: E402
# logger
from logging import getLogger # noqa: E402
logger = getLogger(__name__)
# ======================
# PARAMETERS
# ======================
MODEL_NAME = 'blazeface'
MODEL_FLOAT_PATH = 'face_detection_front.tflite'
MODEL_INT_PATH = 'face_detection_front_128_full_integer_quant.tflite'
REMOTE_PATH = f'https://storage.googleapis.com/ailia-models-tflite/{MODEL_NAME}/'
IMAGE_PATH = 'input.png'
SAVE_IMAGE_PATH = 'result.png'
IMAGE_HEIGHT = 128
IMAGE_WIDTH = 128
# ======================
# Argument Parser Config
# ======================
parser = get_base_parser(
'BlazeFace is a fast and light-weight face detector.',
IMAGE_PATH,
SAVE_IMAGE_PATH,
)
args = update_parser(parser)
if args.tflite:
import tensorflow as tf
else:
import ailia_tflite
if args.shape:
IMAGE_HEIGHT = args.shape
IMAGE_WIDTH = args.shape
# ======================
# Main functions
# ======================
def recognize_from_image():
# net initialize
if args.float:
MODEL_PATH = MODEL_FLOAT_PATH
else:
MODEL_PATH = MODEL_INT_PATH
if args.tflite:
interpreter = tf.lite.Interpreter(model_path=MODEL_PATH)
else:
if args.flags or args.memory_mode or args.env_id:
interpreter = ailia_tflite.Interpreter(model_path=MODEL_PATH, memory_mode = args.memory_mode, flags = args.flags, env_id = args.env_id)
else:
interpreter = ailia_tflite.Interpreter(model_path=MODEL_PATH)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
if args.shape:
logger.info(f"update input shape {[1, IMAGE_HEIGHT, IMAGE_WIDTH, 3]}")
interpreter.resize_tensor_input(input_details[0]["index"], [1, IMAGE_HEIGHT, IMAGE_WIDTH, 3])
interpreter.allocate_tensors()
for image_path in args.input:
# prepare input data
org_img = load_image(
image_path,
(IMAGE_HEIGHT, IMAGE_WIDTH),
)
input_data = load_image(
image_path,
(IMAGE_HEIGHT, IMAGE_WIDTH),
normalize_type='127.5',
gen_input_ailia_tflite=True
)
# inference
logger.info('Start inference...')
if args.benchmark:
logger.info('BENCHMARK mode')
average_time = 0
for i in range(args.benchmark_count):
start = int(round(time.time() * 1000))
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
end = int(round(time.time() * 1000))
average_time = average_time + (end - start)
logger.info(f'\tailia processing time {end - start} ms')
logger.info(f'\taverage time {average_time / args.benchmark_count} ms')
else:
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
preds_tf_lite = {}
if args.float:
preds_tf_lite[0] = interpreter.get_tensor(output_details[0]['index']) #1x896x16 regressors
preds_tf_lite[1] = interpreter.get_tensor(output_details[1]['index']) #1x896x1 classificators
else:
preds_tf_lite[0] = interpreter.get_tensor(output_details[1]['index']) #1x896x16 regressors
preds_tf_lite[1] = interpreter.get_tensor(output_details[0]['index']) #1x896x1 classificators
# postprocessing
detections = but.postprocess(preds_tf_lite)
savepath = get_savepath(args.savepath, image_path)
logger.info(f'saved at : {savepath}')
# generate detections
for detection in detections:
logger.info(f'Found {detection.shape[0]} faces')
but.plot_detections(org_img, detection, save_image_path=savepath)
logger.info('Script finished successfully.')
def recognize_from_video():
# net initialize
if args.float:
MODEL_PATH = "face_detection_front.tflite"
else:
MODEL_PATH = "face_detection_front_128_full_integer_quant.tflite"
if args.tflite:
interpreter = tf.lite.Interpreter(model_path=MODEL_PATH)
else:
if args.flags or args.memory_mode or args.env_id:
interpreter = ailia_tflite.Interpreter(model_path=MODEL_PATH, memory_mode = args.memory_mode, flags = args.flags, env_id = args.env_id)
else:
interpreter = ailia_tflite.Interpreter(model_path=MODEL_PATH)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
capture = webcamera_utils.get_capture(args.video)
# create video writer if savepath is specified as video format
if args.savepath != SAVE_IMAGE_PATH:
f_h = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
f_w = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH))
save_h, save_w = webcamera_utils.calc_adjust_fsize(
f_h, f_w, IMAGE_HEIGHT, IMAGE_WIDTH
)
writer = webcamera_utils.get_writer(args.savepath, save_h, save_w)
else:
writer = None
while(True):
ret, frame = capture.read()
if (cv2.waitKey(1) & 0xFF == ord('q')) or not ret:
break
input_image, input_data = webcamera_utils.preprocess_frame(
frame, IMAGE_HEIGHT, IMAGE_WIDTH, normalize_type='127.5'
)
# inference
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
preds_tf_lite = {}
if args.float:
preds_tf_lite[0] = interpreter.get_tensor(output_details[0]['index']) #1x896x16 regressors
preds_tf_lite[1] = interpreter.get_tensor(output_details[1]['index']) #1x896x1 classificators
else:
preds_tf_lite[0] = interpreter.get_tensor(output_details[1]['index']) #1x896x16 regressors
preds_tf_lite[1] = interpreter.get_tensor(output_details[0]['index']) #1x896x1 classificators
# postprocessing
detections = but.postprocess(preds_tf_lite)
but.show_result(input_image, detections)
cv2.imshow('frame', input_image)
# save results
if writer is not None:
writer.write(input_image)
capture.release()
cv2.destroyAllWindows()
if writer is not None:
writer.release()
logger.info('Script finished successfully.')
def main():
# model files check and download
check_and_download_models(MODEL_FLOAT_PATH, REMOTE_PATH)
check_and_download_models(MODEL_INT_PATH, REMOTE_PATH)
if args.video is not None:
# video mode
recognize_from_video()
else:
# image mode
recognize_from_image()
if __name__ == '__main__':
main()
| axinc-ai/ailia-models-tflite | face_detection/blazeface/blazeface.py | blazeface.py | py | 7,170 | python | en | code | 14 | github-code | 36 | [
{
"api_name": "sys.path.append",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "utils.get_base_parser... |
11537413568 | import cv2
def getdifference(a, b):
# test image
#a, b should be image.jpeg
image = cv2.imread('./frames/'+str(a))
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
histogram = cv2.calcHist([gray_image], [0],
None, [256], [0, 256])
# data1 image
image = cv2.imread('./frames/'+str(b))
gray_image1 = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
histogram1 = cv2.calcHist([gray_image1], [0],
None, [256], [0, 256])
c1 = 0
# Euclidean Distance between data1 and test
i = 0
while i<len(histogram) and i<len(histogram1):
c1+=(histogram[i]-histogram1[i])**2
i+= 1
c1 = c1**(1 / 2)
return c1
| pa-kh039/nlp_video_descriptor | diff.py | diff.py | py | 618 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "cv2.imread",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "cv2.calcHist",
"line... |
26453541576 | import gmpy2
thenumber = 600851475143
prime = 2
answer = 0
while thenumber > 1:
if thenumber % prime == 0:
thenumber = thenumber//prime
continue
prime = gmpy2.next_prime(prime)
answer = prime
print(answer)
"""
gmpy2 에 있는 next_prime 을 썼음. 다음에 소수에 대한 함수들을 만들어봅시다요
""" | hojin-kim/projectEuler | prob003.py | prob003.py | py | 348 | python | ko | code | 2 | github-code | 36 | [
{
"api_name": "gmpy2.next_prime",
"line_number": 10,
"usage_type": "call"
}
] |
18399557832 | from fastapi import APIRouter, Request, HTTPException, Body, Path
from linebot import LineBotApi, WebhookHandler
from linebot.exceptions import InvalidSignatureError
from linebot.models import StickerSendMessage, TextSendMessage
from machine_leanning.model_text_classifire import intent_model
from models.token_line import TokenLINE
from random import randint
from bson import ObjectId
from config.object_str import CutId
from typing import Optional
from config.db import MongoDB
from numpy import random
import uuid
import json
import os
client = os.environ.get('MONGODB_URI')
# client = 'mongodb://127.0.0.1:27017'
db = MongoDB(database_name='Mango', uri=client)
collection = 'line_bot'
router = APIRouter()
@router.post('/save')
async def save(item: TokenLINE):
key = CutId(_id=ObjectId()).dict()['id']
path_wh = uuid.uuid4().hex
result = item.dict()
result['id'] = key
result['token'] = path_wh
result['webhook'] = f'https://mango2smartmarketing.herokuapp.com/callback/{path_wh}'
db.insert_one(collection=collection, data=result)
del result['_id']
return result
def get_profile(user_id, q):
line_bot_api = q['ACCESS_TOKEN']
line_bot_api = LineBotApi(line_bot_api)
profile = line_bot_api.get_profile(user_id)
displayName = profile.display_name
userId = profile.user_id
img = profile.picture_url
status = profile.status_message
result = {'displayName': displayName, 'userId': userId, 'img': img, 'status': status}
return result
@router.post('/{token}')
async def webhook(
request: Request,
token: Optional[str] = Path(...),
raw_json: Optional[dict] = Body(None)
):
q = db.find_one(collection=collection, query={'token': token})
q = dict(q)
handler = q['SECRET_LINE']
handler = WebhookHandler(handler)
with open('static/line_log.json', 'w') as log_line:
json.dump(raw_json, log_line)
try:
signature = request.headers['X-Line-Signature']
body = await request.body()
events = raw_json['events'][0]
_type = events['type']
if _type == 'follow':
userId = events['source']['userId']
profile = get_profile(userId, q)
inserted = {'displayName': profile['displayName'], 'userId': userId, 'img': profile['img'],
'status': profile['status']}
db.insert_one(collection='line_follower', data=inserted)
elif _type == 'unfollow':
userId = events['source']['userId']
db.delete_one('line_follower', query={'userId': userId})
elif _type == 'postback':
event_postback(events, q)
elif _type == 'message':
message_type = events['message']['type']
if message_type == 'text':
try:
userId = events['source']['userId']
message = events['message']['text']
profile = get_profile(userId, q)
push_message = {'user_id': userId, 'message': message, 'display_name': profile['displayName'],
'img': profile['img'],
'status': profile['status']}
db.insert_one(collection='message_user', data=push_message)
handler.handle(str(body, encoding='utf8'), signature)
handler_message(events, q)
except InvalidSignatureError as v:
api_error = {'status_code': v.status_code, 'message': v.message}
raise HTTPException(status_code=400, detail=api_error)
else:
no_event = len(raw_json['events'])
for i in range(no_event):
events = raw_json['events'][i]
event_handler(events, q)
except IndexError:
raise HTTPException(status_code=200, detail={'Index': 'null'})
return raw_json
def event_handler(events, q):
line_bot_api = q['ACCESS_TOKEN']
line_bot_api = LineBotApi(line_bot_api)
replyToken = events['replyToken']
package_id = '446'
stickerId = randint(1988, 2027)
line_bot_api.reply_message(replyToken, StickerSendMessage(package_id, str(stickerId)))
def event_postback(events, q):
pass
def handler_message(events, q):
line_bot_api = q['ACCESS_TOKEN']
line_bot_api = LineBotApi(line_bot_api)
text = events['message']['text']
replyToken = events['replyToken']
user_id = events['source']['userId']
data = intent_model(text, q['ACCESS_TOKEN'])
if data.get('require'):
line_bot_api.reply_message(replyToken, TextSendMessage(text=data.get('require')))
label = data['predict']
choice_answers = data['answers']
confident = data['confident'][0] * 100
user = get_profile(user_id, q)
displayName = user['displayName']
if confident > 69:
choice = random.choice(choice_answers[int(label)])
line_bot_api.reply_message(replyToken, TextSendMessage(text=choice))
else:
line_bot_api.reply_message(replyToken, TextSendMessage(text='ฉันไม่เข้าใจ'))
| watcharap0n/m-business | routers/wh_client.py | wh_client.py | py | 5,144 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "os.environ.get",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "config.db.MongoDB",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "fastapi.APIRouter",
... |
38164483571 | import itertools
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import gridspec
from scipy import linalg
from scipy.optimize import basinhopping
from skimage.filters import gabor_kernel
from skimage.transform import resize
from sklearn import feature_selection
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error, explained_variance_score
from sklearn.model_selection import ParameterGrid
from torch import nn
from base_models import get_model
from plot.plot_data import plot_subplots_histograms, plot_3d, plot_heatmap, plot_2d, plot_histogram, \
plot_matrixImage
from utils.correlation import generate_correlation_map, pca, fit_data, multivariate_gaussian
from utils.distributions import mixture_gaussian
from utils.gabors import gabor_kernel_3, normalize, similarity
def score_kernel(X, theta, frequency, sigma, offset, stds):
kernel = np.real(gabor_kernel(frequency, theta=theta,
sigma_x=sigma, sigma_y=sigma, n_stds=stds, offset=offset))
kernel = resize(kernel, (X.shape[0], X.shape[0]),
anti_aliasing=True)
kernel = normalize(kernel)
kernel = np.nan_to_num(kernel, posinf=1, neginf=-1, nan=0)
score = explained_variance_score(kernel, X)
return score
def objective_function_2(beta, X, size=3):
kernel = gabor_kernel_3(beta[0], theta=beta[1], sigma_x=beta[2], sigma_y=beta[3],
offset=beta[4], x_c=beta[5],
y_c=beta[6], scale=beta[7], ks=size)
kernel = normalize(kernel)
error = mean_squared_error(kernel, X)
return (error)
def objective_function(beta, X):
kernel = np.real(gabor_kernel(beta[0], theta=beta[1],
sigma_x=beta[2], sigma_y=beta[3], offset=beta[4], n_stds=beta[5]))
kernel = resize(kernel, (X.shape[0], X.shape[0]),
anti_aliasing=True)
kernel = normalize(kernel)
kernel = np.nan_to_num(kernel, posinf=1, neginf=-1, nan=0)
error = mean_squared_error(kernel, X)
return (error)
def linear_regression(x, y, lin: LinearRegression):
y_hat = lin.predict(x)
return 1 - explained_variance_score(y, y_hat)
def fit_linear_regression():
input = np.random.random([1, 8])
output = np.random.random([1, 9])
lin = LinearRegression()
lin.fit(input, output)
model = get_model('CORnet-S_base', True)
counter = 0
np.random.seed(1)
def print_fun(x, f, accepted):
print("at minima %.4f accepted %d" % (f, int(accepted)))
fun = lambda x, y: linear_regression(x, y, lin)
for name, m in model.named_modules():
if type(m) == nn.Conv2d and counter == 0:
weights = m.weight.data.cpu().numpy()
lin_params = np.zeros([weights.shape[0], weights.shape[1], 8])
for i in range(0, weights.shape[0]):
for j in range(0, weights.shape[1]):
kernel = weights[i, j].flatten()
minimizer_kwargs = {"method": "L-BFGS-B", 'args': (kernel),
'options': {'maxiter': 200000, 'gtol': 1e-25}}
result = basinhopping(fun, [0, 0, 0, 0, 0, 0, 0, 0], niter=15,
minimizer_kwargs=minimizer_kwargs,
callback=print_fun, T=0.00001)
y_hat = result.x
lin_params[i, j] = y_hat
def hyperparam_gabor():
model = get_model('CORnet-S_base', True)
counter = 0
gabor_params = np.zeros([64, 3, 5])
np.random.seed(1)
for name, m in model.named_modules():
if type(m) == nn.Conv2d and counter == 0:
weights = m.weight.data.cpu().numpy()
for i in range(0, 10):
for j in range(0, 3):
kernel = weights[i, j]
kernel = normalize(kernel)
tuned_params = {"theta": np.arange(0, 1, 0.05),
"frequency": np.arange(0, np.pi, np.pi / 12),
"sigma": np.arange(0, 4, 0.25),
"offset": np.arange(-2, 2, 0.5),
"stds": np.arange(1, 4, 0.5),
}
best_score = np.NINF
best_params = {}
for g in ParameterGrid(tuned_params):
score = score_kernel(kernel, **g)
if score > best_score:
best_score = score
print(f'Update best score: {score}')
best_params = g
print(f'Best grid:{best_params} for kernel {i}, filter {j}')
gabor_params[i, j] = np.fromiter(best_params.values(), dtype=float)
np.save('gabor_params_grid_search_long.npy', gabor_params)
return
def fit_gabors(version='V1', file='gabor_params_basinhopping', layers=['V1.conv1', 'V2.conv2']):
model = get_model('CORnet-S_full_epoch_43', True)
counter = 0
length = 7 if version is 'V1' else 9
np.random.seed(1)
for name, m in model.named_modules():
if type(m) == nn.Conv2d:
if name in layers:
weights = m.weight.data.cpu().numpy()
gabor_params = np.zeros([weights.shape[0], weights.shape[1], length])
for i in range(0, weights.shape[0]):
for j in range(0, weights.shape[1]):
kernel = weights[i, j]
kernel = normalize(kernel)
bnds = ((0, 0.5), (-np.pi, 2 * np.pi), (-4, 4), (-4, 4), (-3, 3))
params = [0.5, np.pi / 2, 2, 2, 0]
def print_fun(x, f, accepted):
print("at minima %.4f accepted %d" % (f, int(accepted)))
if version is 'V1':
print('Use sklearn version')
bnds = (
(-0.5, 1.5), (-np.pi, 2 * np.pi), (-4, 4), (-4, 4), (-3, 3), (-5, 5))
params = [0.5, np.pi / 2, 2, 2, 0, 3]
minimizer_kwargs = {"method": "L-BFGS-B", "bounds": bnds,
'args': (kernel),
'options': {'maxiter': 200000, 'gtol': 1e-25}}
result = basinhopping(objective_function, params, niter=15,
minimizer_kwargs=minimizer_kwargs,
callback=print_fun, T=0.00001)
else:
print('Use Tiagos version')
bnds = (
(1 / 14, 0.5), (-2 * np.pi, 2 * np.pi), (2, 14), (2, 14),
(-2 * np.pi, 2 * np.pi),
(-2, 2),
(-2, 2), (1e-5, 2))
params = [0.2, 0, 4, 4, 0, 0, 0, 1]
minimizer_kwargs = {"method": "L-BFGS-B", "bounds": bnds,
'args': (kernel),
'options': {'maxiter': 200000, 'gtol': 1e-25}}
result = basinhopping(objective_function_2, params, niter=15,
minimizer_kwargs=minimizer_kwargs,
callback=print_fun, T=0.00001)
beta_hat = result.x
gabor_params[i, j] = np.append(beta_hat, result.fun)
print(f'Kernel {i}, filter {j}:')
print(beta_hat)
np.save(f'{file}.npy', gabor_params)
return
counter += 1
def get_fist_layer_weights():
model = get_model('CORnet-S_full_epoch_43', True)
counter = 0
plt.figure(figsize=(20, 20))
gs = gridspec.GridSpec(10, 3, width_ratios=[1] * 3,
wspace=0.5, hspace=0.5, top=0.95, bottom=0.05, left=0.1, right=0.95)
for name, m in model.named_modules():
if type(m) == nn.Conv2d and counter == 0:
weights = m.weight.data.squeeze().numpy()
idx = 1
for i in range(0, 10):
kernel1 = weights[i, 0]
kernel2 = weights[i, 1]
kernel3 = weights[i, 2]
ax = plt.subplot(gs[i, 0])
ax.set_xticks([])
ax.set_yticks([])
ax.set_title(f'Samples parameter K1', pad=2, fontsize=5)
idx += 1
plt.imshow(kernel2, cmap='gray')
ax = plt.subplot(gs[i, 1])
ax.set_title(f'Samples parameter K2', pad=2, fontsize=5)
idx += 1
plt.imshow(kernel1, cmap='gray')
ax.set_xticks([])
ax.set_yticks([])
ax = plt.subplot(gs[i, 2])
ax.set_title(f'Samples parameter K3', pad=2, fontsize=5)
plt.imshow(kernel3, cmap='gray')
ax.set_xticks([])
ax.set_yticks([])
idx += 1
plt.tight_layout()
plt.savefig('full_kernels.png')
plt.show()
return
def compare_gabors(version='V1', file='gabor_params_basinhopping_bound_6_params'):
gabor_params = np.load(f'{file}.npy')
model = get_model('CORnet-S_full_epoch_43', True)
counter = 0
plt.figure(figsize=(4, 25))
gs = gridspec.GridSpec(20, 3, width_ratios=[1] * 3,
wspace=0.5, hspace=0.5, top=0.95, bottom=0.05, left=0.1, right=0.95)
for name, m in model.named_modules():
if type(m) == nn.Conv2d and counter == 0:
weights = m.weight.data.squeeze().numpy()
index = 1
for i in range(0, 10):
for j in range(0, 3):
beta = gabor_params[i, j]
kernel2 = weights[i, j]
kernel2 = normalize(kernel2)
if version is "V1":
kernel1 = np.real(gabor_kernel(beta[0], theta=beta[1],
sigma_x=beta[2], sigma_y=beta[3],
offset=beta[4],
n_stds=beta[5]))
kernel1 = np.nan_to_num(kernel1).astype(np.float32)
kernel1 = resize(kernel1, (kernel2.shape[0], kernel2.shape[0]),
anti_aliasing=True, preserve_range=True)
kernel1 = normalize(kernel1)
else:
kernel1 = gabor_kernel_3(beta[0], theta=beta[1],
sigma_x=beta[2], sigma_y=beta[3], offset=beta[4],
x_c=beta[5],
y_c=beta[6], scale=beta[7], ks=7)
kernel1 = normalize(kernel1)
ax = plt.subplot(gs[i * 2, j])
ax.set_xticks([])
ax.set_yticks([])
ax.set_title(f'K {i}, F {j}', pad=3)
plt.imshow(kernel2, cmap='gray')
ax = plt.subplot(gs[(i * 2) + 1, j])
ax.set_title(f'Error {beta[-1]:.3}', pad=10)
plt.imshow(kernel1, cmap='gray')
index += 1
index += 3
plt.savefig(f'{file}.png')
plt.show()
return
def rank_errors(name1, name2):
n1 = np.load(f'{name1}.npy')
n2 = np.load(f'{name2}.npy')
value = np.mean(n1[:, -1])
value2 = np.mean(n2[:, -1])
print(f'Mean error of fit {name1} is {value}, mean error of fit {name2} is {value2}')
def plot_parameter_distribution(name):
params = np.load(f'{name}.npy')
data = {}
names = ['Frequency', 'Theta', 'Sigma X', 'Sigma Y', 'Offset', 'Center X', 'Center Y']
variables = np.zeros((7, 192))
for i in range(params.shape[2] - 1):
param = params[:, :, i]
data[names[i]] = param.flatten()
variables[i] = param.flatten()
bnds = (
(1 / 14, 0.5), (-2 * np.pi, 2 * np.pi), (2, 14), (2, 14), (-2 * np.pi, 2 * np.pi), (-2, 2),
(-2, 2))
plot_subplots_histograms(data, 'Gabor parameter distributions', bins=10, bounds=bnds)
def mutual_information():
model = get_model('CORnet-S_base', True)
plt.figure(figsize=(4, 25))
gs = gridspec.GridSpec(20, 3, width_ratios=[1] * 3,
wspace=0.5, hspace=0.5, top=0.95, bottom=0.05, left=0.1, right=0.95)
for name, m in model.named_modules():
if type(m) == nn.Conv2d and 'V1' not in name:
weights = m.weight.data.squeeze().numpy()
scores = np.zeros([weights.shape[0], weights.shape[0]])
kernels = weights.shape[0]
for i in range(kernels):
for j in range(kernels):
scores[i, j] = feature_selection.mutual_info_regression(
weights[i].flatten().reshape(-1, 1),
weights[j].flatten())
print(f'Kernel mean mutual information {np.mean(scores)}')
plot_heatmap(scores, title=f'Kernel mutual information {name}', col_labels='Kernels',
row_labels='Kernels')
if len(weights.shape) > 2:
channels = weights.shape[1]
scores = np.zeros([kernels, channels, channels])
for i in range(kernels):
for j in range(channels):
for k in range(channels):
scores[i, j, k] = feature_selection.mutual_info_regression(
weights[i, j].flatten().reshape(-1, 1), weights[i, k].flatten())
scores = np.mean(scores, axis=0)
plot_heatmap(scores, title=f'Channel mean mutual information {name}',
col_labels='Channel',
row_labels='Channel')
def kernel_similarity():
model = get_model('CORnet-S_train_second_kernel_conv_epoch_00', True)
counter = 0
plt.figure(figsize=(4, 25))
gs = gridspec.GridSpec(20, 3, width_ratios=[1] * 3,
wspace=0.5, hspace=0.5, top=0.95, bottom=0.05, left=0.1, right=0.95)
kernel_avgs = []
for name, m in model.named_modules():
if type(m) == nn.Conv2d:
if counter == 1:
weights = m.weight.data.squeeze().numpy().transpose(1, 0, 2, 3)
for i in range(0, 64):
avgs = []
for j in range(0, 64):
for k in range(j, 64):
if k != j:
c1 = weights[i, j]
c2 = weights[i, k]
avgs.append(similarity(c2, c1))
kernel_avgs.append(np.mean(avgs))
plot_histogram(kernel_avgs, 'Channel similarity within a kernel', bins=20)
print('Similarity avg:' + str(np.mean(kernel_avgs)))
return
counter += 1
def mixture_analysis(pi, mu, cov):
# k components: pi = (k) , mu = (k), cov = (k,k,k)
print(pi.shape, mu.shape, cov.shape)
def gaussian_mixture_channels(name):
params = np.load(f'{name}.npy')
param = params[:, :, :-1]
param = param.reshape(-1, 8)
mixture_base(param, [(0, 10)], [4096, 1])
def gaussian_mixture(name, name2=None):
params = np.load(f'{name}.npy')
names = ['Frequency', 'Theta', 'Sigma X', 'Sigma Y', 'Offset', 'Center X', 'Center Y']
shape = [64, 64, 3, 3]
param = params[:, :, :-1]
param = param.reshape(64, -1)
tuples = []
for i in range(param.shape[1]):
tuples.append((i * 10, (i + 1) * 10))
if name2 is not None:
params2 = np.load(f'{name}.npy')
p2 = params[:, :, :-1]
p2 = p2.reshape(64, -1)
param = np.concatenate((param, p2), axis=0)
mixture_base(param, tuples, shape)
def mixture_base(param, tuples, shape, size=3):
new_param = np.zeros((shape[0], 10 * shape[1]))
for i in range(shape[0]):
for s, e in tuples:
p = param[i, int(s * 8 / 10):int(e * 8 / 10)]
new_param[i, s:e] = (
p[0], np.sin(2 * p[1]), np.cos(2 * p[1]), p[2], p[3], np.sin(p[4]), np.cos(p[4]),
p[5], p[6], p[7])
# plot_bic(best_gmm, param)
best_gmm = mixture_gaussian(new_param)
samples = best_gmm.sample(new_param.shape[0])[0]
idx = 1
plt.figure(figsize=(10, 20))
gs = gridspec.GridSpec(22, 3, width_ratios=[1] * 3,
wspace=0.5, hspace=0.5, top=0.95, bottom=0.05, left=0.1, right=0.95)
for i in range(shape[0]):
for s, e in tuples:
beta = samples[i, s:e]
kernel2 = gabor_kernel_3(beta[0], theta=np.arctan2(beta[1], beta[2]),
sigma_x=beta[3], sigma_y=beta[4],
offset=np.arctan2(beta[5], beta[6]), x_c=beta[7],
y_c=beta[8],
scale=beta[9], ks=size)
ax = plt.subplot(gs[i, int(s / 10)])
ax.set_xticks([])
ax.set_yticks([])
plt.imshow(kernel2, cmap='gray')
idx += 1
plt.figure(figsize=(15, 15))
length = 64 * 3
matrix = np.empty([length, 0])
weights = samples.reshape(64, 64, 10)
for i in range(0, 64):
row = np.empty([0, 3])
for j in range(0, 64):
beta = weights[i, j]
channel = gabor_kernel_3(beta[0], theta=np.arctan2(beta[1], beta[2]),
sigma_x=beta[3], sigma_y=beta[4],
offset=np.arctan2(beta[5], beta[6]), x_c=beta[7],
y_c=beta[8],
scale=beta[9], ks=size)
row = np.concatenate((row, channel), axis=0)
f_min, f_max = np.min(row), np.max(row)
row = (row - f_min) / (f_max - f_min)
matrix = np.concatenate((matrix, row), axis=1)
plot_matrixImage(matrix, 'Gabor_conv2')
plt.tight_layout()
plt.show()
def plot_bic(clf, X):
plt.figure(figsize=(20, 20))
color_iter = itertools.cycle(['navy', 'turquoise', 'cornflowerblue',
'darkorange', 'red', 'yellow'])
size = X.shape[1]
Y_ = clf.predict(X)
idx = 1
for l in range(size):
for j in range(X.shape[1]):
splot = plt.subplot(size, size, idx)
idx += 1
for i, (mean, cov, color) in enumerate(zip(clf.means_, clf.covariances_,
color_iter)):
v, w = linalg.eigh(cov)
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, l], X[Y_ == i, j], 1, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan2(w[0][j], w[0][l])
angle = 180. * angle / np.pi # convert to degrees
v = 2. * np.sqrt(2.) * np.sqrt(v)
ell = mpl.patches.Ellipse(mean, v[l], v[j], 180. + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(.5)
splot.add_artist(ell)
plt.xticks(())
plt.yticks(())
plt.title(f'Dimension {l} and {j}')
plt.subplots_adjust(hspace=.35, bottom=.02)
plt.savefig('Mixture gaussians')
plt.show()
def analyze_param_dist(name, plot=False):
params = np.load(f'{name}.npy')
names = ['Frequency', 'Theta', 'Sigma X', 'Sigma Y', 'Offset', 'Center X', 'Center Y']
variables = np.zeros((7, 192))
param = params[:, :, :-1]
param = param.reshape(64, -1)
pca_res = pca(param, n_components=21)
principal_components = pca_res.transform(param)
if plot:
plot_2d(principal_components[:, 0], principal_components[:, 1], 'PC 1 & 2')
plot_2d(principal_components[:, 1], principal_components[:, 2], 'PC 2 & 3')
plot_2d(principal_components[:, 0], principal_components[:, 2], 'PC 1 & 3')
plot_2d(principal_components[:, 0], principal_components[:, 3], 'PC 1 & 4')
plot_2d(principal_components[:, 0], principal_components[:, 4], 'PC 1 & 5')
plot_3d(principal_components[:, 0], principal_components[:, 1], principal_components[:, 2],
'Principal components of gabor filter params')
reg = fit_data(principal_components, variables.T)
small_samples = multivariate_gaussian(principal_components.T, 10)
full_params = reg.predict(small_samples.T) # shape (10, 7)
small_samples_hat = pca_res.transform(full_params) # shape (10,3)
full_params_hat = reg.predict(small_samples_hat)
print(mean_squared_error(small_samples.T, small_samples_hat))
idx = 0
plt.figure(figsize=(5, 10))
gs = gridspec.GridSpec(20, 3, width_ratios=[1] * 3,
wspace=0.5, hspace=0.5, top=0.95, bottom=0.05, left=0.1, right=0.95)
for i in range(10):
alpha = full_params[i]
beta = full_params_hat[i]
kernel2 = gabor_kernel_3(beta[0], theta=beta[1],
sigma_x=beta[2], sigma_y=beta[3], offset=beta[4], x_c=beta[5],
y_c=beta[6], ks=7)
kernel1 = gabor_kernel_3(alpha[0], theta=alpha[1],
sigma_x=alpha[2], sigma_y=alpha[3], offset=alpha[4], x_c=alpha[5],
y_c=alpha[6], ks=7)
ax = plt.subplot(gs[i, 0])
ax.set_xticks([])
ax.set_yticks([])
ax.set_title(f'Samples parameter set', pad=3, fontsize=5)
idx += 1
plt.imshow(kernel2, cmap='gray')
ax = plt.subplot(gs[i, 1])
ax.set_title(f'Reconstruction', pad=10, fontsize=5)
plt.imshow(kernel1, cmap='gray')
ax.set_xticks([])
ax.set_yticks([])
plt.tight_layout()
idx += 1
plt.savefig(f'reconstructions.png')
plt.show()
if plot:
principal_components = principal_components.T
corr_pca = generate_correlation_map(principal_components, principal_components)
corr_map = generate_correlation_map(variables, variables)
mask = np.zeros_like(corr_map)
mask[np.triu_indices_from(mask)] = True
plot_heatmap(corr_map, names, names, title='Gabor parameter correlation', mask=mask)
mask = np.zeros_like(corr_pca)
mask[np.triu_indices_from(mask)] = True
plot_heatmap(corr_pca, names, names, title='PCA correlations', mask=mask)
def compare_two_values():
# ((-0.5, 1.5), (-np.pi, 2 * np.pi), (-4, 4), (-4, 4), (-3, 3), (-5, 5))
params = [-0.5, -2 * np.pi / 8, 0, 0, 0, 0]
params = [0.5, np.pi + ((-2 * np.pi / 8) % np.pi), 0, 0, 0, 0]
gabor_kernel_3(*params)
if __name__ == '__main__':
fit_gabors('V2', "output_file_name")
# name = 'gabors_tiago'
# rank_errors(name, 'gabors_sklearn')
# show_options()
# compare_gabors()
# name = 'gabors_V2.conv2'
# # name = 'gabors_tiago_scaled'
# np.random.seed(0)
# fit_gabors('V2', name)
# compare_gabors('V2', name)
# analyze_param_dist(name, True)
# gaussian_mixture(name)
# gaussian_mixture_channels(name)
# kernel_similarity()
# get_fist_layer_weights()
# mutual_information()
fit_linear_regression()
| franzigeiger/training_reductions | analysis/fit_gabor_filters.py | fit_gabor_filters.py | py | 23,923 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "numpy.real",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "skimage.filters.gabor_kernel",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "skimage.transform.resize",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "util... |
28328861625 | from asyncio.windows_events import NULL
import pstats
import sys
import tkinter as tk
import tkinter.ttk as ttk
import time
class Window(tk.Frame):
FRAME_PADX: int = 30
FRAME_PADY: int = 15
FRAME_SIZEX: int = 1000
FRAME_SIZEY: int = 300
BUTTON_SIZEX: int = 10
BUTTON_SIZEY: int = 30
BUTTON_DISABLED_BG_COLOR: str = "whitesmoke"
INJECTEAIR_BUTTON_BG_COLOR: str = "lightblue"
SENDDATA_BUTTON_BG_COLOR: str = "lightpink"
START_BUTTON_BG_COLOR: str = "lightgreen"
STOP_BUTTON_BG_COLOR: str = "lemonchiffon"
SET_BUTTON_BG_COLOR: str = "lightpink"
RESET_BUTTON_BG_COLOR: str = "lightblue"
@property
def InputPressure(self) -> int:
return int(self.__pressure)
@InputPressure.setter
def InputPressure(self, value):
if value == '':
self.__pressure = -1
self.sendDataStateLabel["text"] += '[設定圧力に無効な入力]'
elif int(value) == 9999:
self.__pressure = 0
self.sendDataStateLabel["text"] = '[一時停止]'
elif int(value) > 1000:
self.__pressure = -1
self.sendDataStateLabel["text"] += '[設定圧力は1000pa以下]'
elif int(value) < 0:
self.__pressure = -1
self.sendDataStateLabel["text"] += '[設定圧力は正値]'
else:
self.__pressure = value
@property
def InputDeltaTime(self) -> int:
return int(self.__deltatime)
@InputDeltaTime.setter
def InputDeltaTime(self, value):
if value == '':
self.__deltatime = -1
self.sendDataStateLabel["text"] += '[設定時間に無効な入力]'
elif int(value) == 9999:
self.__pressure = 0
self.sendDataStateLabel["text"] = '[一時停止]'
elif int(value) > 5000:
self.__deltatime = -1
self.sendDataStateLabel["text"] += '[設定時間は5000ms以下]'
elif int(value) < 0:
self.__deltatime = -1
self.sendDataStateLabel["text"] += '[設定時間は正値]'
else:
self.__deltatime = value
@property
def IsMeasuring(self) -> bool:
return self.__isMeasuring
@IsMeasuring.setter
def IsMeasuring(self, value: bool = False):
self.__isMeasuring = value
def __init__(self, root, mySerial, myCSV, myVision):
super().__init__(root)
self.__myVision = myVision
self.__myCSV = myCSV
self.__root = root
self.__mySerial = mySerial
self.__pressure: int = 0
self.__deltatime: int = 0
self.__isMeasuring: bool = False
root.option_add('*font', 'MSゴシック 22')
root.title("装置との通信用窓")
# ウィンドウの閉じるボタンは無効にする
root.protocol('WM_DELETE_WINDOW', (lambda: 'pass')())
# 設定
self.tf1 = tk.Frame(root, relief='groove',
bd=5)
self.tf1.grid(column=0, row=0, padx=self.FRAME_PADX,
pady=self.FRAME_PADY, sticky=tk.E + tk.W)
# self.tf1.place(height=200, width=500)
# 1,送信ボタン
self.sendDataButton = tk.Button(
self.tf1, text='データ送信', bg=self.SENDDATA_BUTTON_BG_COLOR, bd=4, width=15)
self.sendDataButton.grid(
padx=10, pady=10, column=0, row=0, sticky='w')
self.sendDataButton.bind(
"<Button-1>", self.SendDataButtonEventHandler)
# 1,送信状態表示ラベル
self.sendDataStateLabel = tk.Label(
self.tf1, text='', fg="red")
self.sendDataStateLabel.grid(
padx=10, pady=10, column=1, columnspan=2, row=0, sticky='w')
# 入力圧力の設定
self.pLabel = tk.Label(self.tf1, text='設定圧力[pa]')
self.pLabel.grid(padx=10, pady=10, column=0, row=1, sticky='e')
self.pEntry = tk.StringVar()
self.pEntry.set('')
self.pEntryBox = tk.Entry(
self.tf1, textvariable=self.pEntry, bd=5, relief='groove')
# self.pEntry.bind('<Return>', self.entf1unc)
self.pEntryBox.grid(padx=10, column=1, row=1, sticky='w')
# 入力時間の設定
self.dtLabel = tk.Label(
self.tf1, text='設定時間[ms]')
self.dtLabel.grid(padx=10, pady=10, column=0, row=2, sticky='e')
self.dtEntry = tk.StringVar()
self.dtEntry.set('')
self.dtEntryBox = tk.Entry(
self.tf1, textvariable=self.dtEntry, bd=5, relief='groove')
# self.dtEntry.bind('<Return>', self.entf1unc)
self.dtEntryBox.grid(padx=10, column=1, row=2, sticky='w')
# border1 = ttk.Separator(root, orient="horizontal")
# border1.grid(column=0, columnspan=2, row=3, sticky="ew")
# 設定
self.tf2 = tk.Frame(root, relief='groove', bd=5)
self.tf2.grid(column=0, row=1, padx=self.FRAME_PADX,
pady=self.FRAME_PADY, sticky=tk.E + tk.W)
# 2,計測開始ボタン
self.startButton = tk.Button(
self.tf2, text='計測開始', bg=self.BUTTON_DISABLED_BG_COLOR, bd=4, width=15)
self.startButton.grid(padx=10, pady=10, column=0, row=0, sticky='w')
self.startButton.bind("<Button-1>", self.StartButtonEventHandler)
self.startButton["state"] = "disabled"
# ファイル名ラベル
self.fileNameLabel = tk.Label(
self.tf2, text='ファイル名')
self.fileNameLabel.grid(padx=10, pady=10, column=0, row=1, sticky='e')
# 2,計測状態表示ラベル
self.measuringStateLabel = tk.Label(
self.tf2, text='', fg="black")
self.measuringStateLabel.grid(
padx=10, pady=10, column=1, columnspan=2, row=1, sticky='w')
# 3,計測終了ボタン
self.stopButton = tk.Button(
self.tf2, text='計測終了', bg=self.BUTTON_DISABLED_BG_COLOR, bd=4, width=15)
self.stopButton.grid(padx=10, pady=10, column=0, row=5, sticky='w')
self.stopButton.bind("<Button-1>", self.StopButtonEventHandler)
self.stopButton["state"] = "disabled"
# 設定
self.tf3 = tk.Frame(root, relief='groove', bd=5)
self.tf3.grid(column=0, row=2, padx=self.FRAME_PADX,
pady=self.FRAME_PADY, sticky=tk.E + tk.W)
# 4,空気発射ボタン
self.injectAirButton = tk.Button(
self.tf3, text='空気発射', bg=self.BUTTON_DISABLED_BG_COLOR, bd=4, width=15)
self.injectAirButton.grid(
padx=10, pady=10, column=0, row=0, sticky='w')
self.injectAirButton.bind("<Button-1>", self.InjectAirEventHandler)
self.injectAirButton["state"] = "disable"
# 設定
self.tf4 = tk.Frame(root, relief='groove', bd=5)
self.tf4.grid(column=0, row=3, padx=self.FRAME_PADX,
pady=self.FRAME_PADY, sticky=tk.E + tk.W)
# 4,マスク固定ボタン
self.setMaskButton = tk.Button(
self.tf4, text='マスク固定', bg=self.SET_BUTTON_BG_COLOR, bd=4, width=15)
self.setMaskButton.grid(
padx=10, pady=10, column=0, row=0, sticky='w')
self.setMaskButton.bind("<Button-1>", self.SetMaskButtonEventHandler)
self.setMaskButton["state"] = "normal"
# 4,マスク解除ボタン
self.resetMaskButton = tk.Button(
self.tf4, text='マスク解除', bg=self.BUTTON_DISABLED_BG_COLOR, bd=4, width=15)
self.resetMaskButton.grid(
padx=10, pady=10, column=0, row=1, sticky='w')
self.resetMaskButton.bind(
"<Button-1>", self.ResetMaskButtonEventHandler)
self.resetMaskButton["state"] = "disable"
# 4,窓表示非表示ボタン
self.windowOnOffButton = tk.Button(
self.tf4, text='窓On/Off', bg=self.START_BUTTON_BG_COLOR, bd=4, width=15)
self.windowOnOffButton.grid(
padx=10, pady=10, column=0, row=2, sticky='w')
self.windowOnOffButton.bind(
"<Button-1>", self.SetWindoDisableButton)
self.windowOnOffButton["state"] = "normal"
def SetWindoDisableButton(self, event):
self.__myVision.DestroyAllWindow()
self.__myVision.WINDOW_DISABLE_FLAG =\
not self.__myVision.WINDOW_DISABLE_FLAG
def SetMaskButtonEventHandler(self, event):
self.resetMaskButton["state"] = "normal"
self.resetMaskButton["bg"] = self.RESET_BUTTON_BG_COLOR
self.setMaskButton["state"] = "disabled"
self.setMaskButton["bg"] = self.BUTTON_DISABLED_BG_COLOR
self.__myVision.MASK_FIXED_FLAG = True
def ResetMaskButtonEventHandler(self, event):
self.setMaskButton["state"] = "normal"
self.setMaskButton["bg"] = self.SET_BUTTON_BG_COLOR
self.resetMaskButton["state"] = "disabled"
self.resetMaskButton["bg"] = self.BUTTON_DISABLED_BG_COLOR
self.__myVision.MASK_FIXED_FLAG = False
def SendDataButtonEventHandler(self, event):
if self.sendDataButton["state"] == "disabled":
return NULL
self.sendDataStateLabel["text"] = ''
self.InputPressure = self.pEntry.get()
self.InputDeltaTime = self.dtEntry.get()
if (int(self.InputPressure) == -1) or (int(self.InputDeltaTime) == -1):
self.stopButton["state"] = "disabled"
self.stopButton["bg"] = self.BUTTON_DISABLED_BG_COLOR
self.startButton["state"] = "disabled"
self.startButton["bg"] = self.BUTTON_DISABLED_BG_COLOR
self.injectAirButton["state"] = "disabled"
self.injectAirButton["bg"] = self.BUTTON_DISABLED_BG_COLOR
return NULL
if (int(self.InputPressure) == 0) or (int(self.InputDeltaTime) == 0):
self.__mySerial.SendInitializeData(
int(9999), int(9999))
self.__mySerial.SendInitializeData(
int(self.InputPressure), int(self.InputDeltaTime))
self.startButton["state"] = "normal"
self.startButton["bg"] = self.START_BUTTON_BG_COLOR
self.injectAirButton["state"] = "normal"
self.injectAirButton["bg"] = self.INJECTEAIR_BUTTON_BG_COLOR
def StartButtonEventHandler(self, event):
if self.startButton["state"] == "disabled":
return NULL
if (self.__myVision.DANGOMUSI_X == 0) or (self.__myVision.DANGOMUSI_Y == 0) or (self.__myVision.NOZLE_DANGOMUSI_DISTANCE == 0):
return NULL
print("[I'm measuring now...]")
self.IsMeasuring = True
filename = self.__myCSV.MakeFile(
self.InputPressure, self.InputDeltaTime)
self.measuringStateLabel["text"] = filename
self.__mySerial.SendStartMeasuringSignal()
self.__myVision.SEND_INJECT_AIR_SIGNAL_FLAG = True
self.sendDataButton["state"] = "disabled"
self.sendDataButton["bg"] = self.BUTTON_DISABLED_BG_COLOR
self.startButton["state"] = "disabled"
self.startButton["bg"] = self.BUTTON_DISABLED_BG_COLOR
self.stopButton["state"] = "normal"
self.stopButton["bg"] = self.STOP_BUTTON_BG_COLOR
def StopButtonEventHandler(self, event):
if self.stopButton["state"] == "disabled":
return NULL
print("[Stop measuring]")
self.IsMeasuring = False
self.sendDataButton["state"] = "normal"
self.sendDataButton["bg"] = self.SENDDATA_BUTTON_BG_COLOR
self.__mySerial.SendStopMeasuringSignal()
self.__myVision.SEND_INJECT_AIR_SIGNAL_FLAG = False
if self.__myCSV.IsFileOpened():
self.__myCSV.CloseFile()
self.stopButton["state"] = "disabled"
self.stopButton["bg"] = self.BUTTON_DISABLED_BG_COLOR
def InjectAirEventHandler(self, event):
if self.injectAirButton["state"] == "disabled":
return NULL
elif self.injectAirButton["state"] == "normal":
self.injectAirButton["bg"] = self.INJECTEAIR_BUTTON_BG_COLOR
self.__mySerial.SendInjectAirSignalFromWindow()
print("[Inject Air!]")
def __del__(self):
self.__root.destroy()
if __name__ == '__main__':
root = tk.Tk()
myapp = Window(root)
myapp.mainloop()
| izumi0x01/psych-antam-advanced-experiment | myWindow.py | myWindow.py | py | 12,304 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "tkinter.Frame",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "tkinter.Frame",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "tkinter.E",
"line_number": 96,
"usage_type": "attribute"
},
{
"api_name": "tkinter.W",
"line_... |
36173603713 | import random
import re
from collections import Counter, namedtuple
re_smiley = re.compile(r'[8;:=%][-oc*^]?[)(D\/\\]')
re_smiley_reversed = re.compile(r'[)(D\/\\][-oc*^]?[8;:=%]')
re_smiley_asian = re.compile(r'\^[o_.]?\^')
extra_smileys = ['<3', '\o', '\o/', 'o/']
re_smileys = [re_smiley, re_smiley_reversed, re_smiley_asian]
re_url = re.compile(r'(?:(?:https?|ftp):\/\/.*)')
Word = namedtuple('Word', ['word', 'score'])
def is_smiley(word):
'''
check if ``word`` is a smiley
:param word: word to check
:type word: str
:return: result of check
:rtype: bool
'''
if word in extra_smileys:
return True
for re_smiley in re_smileys:
if re_smiley.match(word):
return True
return False
def prepare_line(line):
'''
split words to line, lower words, add newlines and remove invalid chars
:param line: line to prepare
:type line: str
:return: prepared line
:rtype: list
'''
words_ = line.split()
words = []
for word_ in words_:
word = None
# prevent smileys and urls from getting lower'd
if is_smiley(word_) or re_url.match(word_):
word = word_
else:
word = word_.lower()
if len(word) >= 1:
words.append(word)
words.append('\n')
return words
class MarkovPy:
def __init__(self, store):
'''
:param store: a compatible markov-store
:type store: class
'''
self.store = store
def _best_known_word(self, words):
'''
Find the best known word out of `words`
:param words: list of words
:type words: list
:returns: best known word
:rtype: str
'''
# build word_relations-list (word, relation-count)
word_relations = [
Word(word, score=self.store.relation_count(word))
for word in words if word in self.store
]
# no known words => None
if not word_relations:
return None
# only one word in the list => return
if len(word_relations) == 1:
return word_relations[0].word
else:
# sort words by relation-count
sorted_word_relations = sorted(
word_relations, key=lambda x: x.score, reverse=True
)
highest_num = sorted_word_relations[0].score
# add word with most relations to the best_known_words-list
best_known_words = [sorted_word_relations[0].word]
for word, num in word_relations:
# check if word has the same (highest) relation-count
if num == highest_num:
# => add it to the best_known_words-list
best_known_words.append(word)
else:
break
# choose a random word from that list
if len(best_known_words) == 1:
return best_known_words[0]
else:
return random.choice(best_known_words)
def learn(self, line, prepared=False):
'''
learn from ``line``
:param line: line to add
:param prepared: line was already split to words
:type line: str
:type prepared: bool
'''
if prepared:
words = line
else:
words = prepare_line(line)
for i in range(0, len(words)):
current_word = words[i]
if len(words) <= i + 1:
break
next_word = words[i + 1]
self.store.insert(current_word, next_word)
def reply(self, start, min_length=5, max_length=10, prepared=False):
'''
generate a reply to ``start``
:param min_length: minimal length of reply
:param max_length: max length of reply
:param prepared: line was already split to words
:type min_length: int
:type max_length: int
:type prepared: bool
:return: response
:rtype: str
'''
if prepared:
start_words = start
else:
start_words = prepare_line(start)
# gen answer
start_word = self._best_known_word(start_words)
if not start_word:
return None
if min_length > max_length:
max_length = min_length + 1
answer = [start_word]
while len(answer) < max_length:
# key doesn't exist => no possible next words
if answer[-1] not in self.store:
break
possible_words = self.store.next_words(answer[-1])
if len(possible_words) == 1:
word = possible_words[0][0]
else:
# sort random word but weight
word = random.choice(
list(Counter(dict(possible_words)).elements())
)
# choosen word == line-end => break
if word == '\n':
break
# add to answer
answer.append(word)
if len(answer) < min_length:
# dont return too short answers
return None
return ' '.join(answer)
def process(self, line, learn=True, reply=True):
'''
process ``line``
:param line: line to process
:param learn: learn from line
:param reply: reply to line (and return answer)
:type line: str
:type learn: bool
:type reply: bool
:return: answer if ``reply``
:rtype: str
'''
prepared_line = prepare_line(line)
if learn:
self.learn(prepared_line, prepared=True)
if reply:
return self.reply(prepared_line, prepared=True)
def __len__(self):
return len(self.store)
| Thor77/MarkovPy | markov/markov.py | markov.py | py | 5,811 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "re.compile",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 10,
... |
72694771304 | from typing import List
class Solution:
def fourSumCount(self, A: List[int], B: List[int], C: List[int], D: List[int]) -> int:
if not A:
return 0
def sameNum(arr: List[int]) -> dict:
result = {}
for i in arr:
if i in result:
result[i] += 1
else:
result[i] = 1
return result
def zeroSubtraction(da: dict, db: dict) -> dict:
result: dict = {}
for a in da.keys():
for b in db.keys():
if result.__contains__((a + b)):
result[(a + b)] += da[a] * db[b]
else:
result[(a + b)] = da[a] * db[b]
return result
def theEnd(da: dict, db: dict) -> int:
result = 0
for a in da.keys():
if db.__contains__(-a):
result += da[a] * db[-a]
return result
da = sameNum(A)
db = sameNum(B)
dc = sameNum(C)
dd = sameNum(D)
d1 = zeroSubtraction(da, db)
d2 = zeroSubtraction(dc, dd)
result = theEnd(d1, d2)
return result
if __name__ == '__main__':
print(Solution().fourSumCount([-1,-1],[-1,1], [-1,1],[1,-1]))
| githubli97/leetcode-python | 202011/20201127/q454.py | q454.py | py | 1,337 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "typing.List",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 8,
"usage_type": "name"
}
] |
28867458807 | #!/usr/bin/env python3
import zipfile
import wget
import re
# CoVoST2 translations file
covost2_url = 'https://dl.fbaipublicfiles.com/covost/covost2.zip'
# File with CommonVoice English sentences translated to Catalan by humans
covost2_source_file = 'validated.en_ca.en'
# Output file
covost2_target_file = 'covost2-ca.txt'
# Discarted sentences
discarted_file = 'covost2-ca-discarted.txt'
# char replacements table
source_chars = '´‘’“”«»'
target_chars ='\'\'\'""""'
translation = str.maketrans(source_chars, target_chars)
def normalize_line(line):
line = line.translate(translation)
return(line)
def validate_line(line):
if (re.match("^[a-zA-ZçÇáàèéíïòóóúüÁÀÈÉÍÏÒÓÚÜ·¡¿!\? ,;:.\"'\.\(\)/–-]+\n$", line)):
return True
return False
# Download CoVoST2 translations
wget.download(covost2_url)
# Get English to Catalan translated sentences
with zipfile.ZipFile('covost2.zip','r') as zip_ref:
zip_ref.extract('covost2/' + covost2_source_file)
# Parse translations
# Store lines already seen
lines_seen = set()
with open(covost2_target_file, 'w') as output_file, open(discarted_file, 'w') as removed_file:
for line in open('covost2/' + covost2_source_file, 'r'):
# Normalize line
line = normalize_line(line)
# Check if line is duplicated
if line not in lines_seen:
# Store line as seen
lines_seen.add(line)
# If it's a valid sentece, write it
if validate_line(line):
output_file.write(line)
else:
removed_file.write(line)
| jmontane/covost2-ca | get_covost2.py | get_covost2.py | py | 1,632 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "re.match",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "wget.download",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "zipfile.ZipFile",
"line_number": 40,
"usage_type": "call"
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.