seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
37407108595 | from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from sklearn.ensemble import RandomForestClassifier
import xgboost as xgb
from sklearn.linear_model import LinearRegression
# dummy data
X, y = make_classification(n_samples=1000, n_features=10,
n_informative=5, n_redundant=0,
random_state=0, shuffle=False)
# splitting dataset into training and testing data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
# models
linear = LinearRegression()
xgb_ = xgb.XGBRegressor()
forest = RandomForestClassifier()
# training models on training data
linear.fit(X_train, y_train)
xgb_.fit(X_train, y_train)
forest.fit(X_train, y_train)
# predictions for each model
pred_1 = linear.predict(X_test)
pred_2 = xgb_.predict(X_test)
pred_3 = forest.predict(X_test)
# see what we are working with
print("this is pred_1: ", pred_1)
print("this is the length of pred_1: ", len(pred_1))
# MSE for individual models
print("MSE pred_1:", mean_squared_error(y_test, pred_1))
print("MSE pred_2:", mean_squared_error(y_test, pred_2))
print("MSE pred_3:", mean_squared_error(y_test, pred_3))
# averaging model predicitions
final = (pred_1 + pred_2 + pred_3)/3
# MSE for ensemble model
print("Final MSE:", mean_squared_error(y_test, final))
| HyperionDevBootcamps/C4_DS_lecture_examples | Lecture code/Machine Learning/Decision Trees/Ensemble.py | Ensemble.py | py | 1,443 | python | en | code | 37 | github-code | 36 |
4419668118 | #服务器
import socket
import time
SERVER_IP = "127.0.0.1"
SERVER_PORT = 8000
server = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
server.bind((SERVER_IP,SERVER_PORT))
print("Waiting...")
players = []
while len(players) < 4:
message, address = server.recvfrom(1024)
message = message.decode()
if message == "loading game":
print("players [%s%s]加入游戏" % (address[0],address[1]))
if(address not in players):
players.append(address)
print("game is starting!")
for player in players:
server.sento('starting'.encode(),player)
for i in range(5):
for player in players:
server.sendto("playing".encode(),player)
time.sleep(1)
for player in players:
server.sendto("ending".encode(),player())
server.close() | Julia1976/python-project | Network/3.13网络爬虫/3.27work/server play.py | server play.py | py | 818 | python | en | code | 0 | github-code | 36 |
5762295604 | import os
import sys
from datetime import datetime, timedelta
from airflow.models import DAG
from airflow.operators.python import PythonOperator
from airflow.utils.dates import days_ago
PARENT_DIR = os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
)
sys.path.append(PARENT_DIR)
from codes.test import add, sub
default_args = {
"owner": "airflow",
}
with DAG(
dag_id="aflow_dag",
description="Datalake to datawarehouse creation",
default_args=default_args,
# schedule_interval="@daily",
schedule_interval=None,
start_date=days_ago(2),
tags=["aflow", "dwh", "datalake", "etl/elt"],
) as dag:
add = PythonOperator(
task_id="add", python_callable=add,
)
sub = PythonOperator(
task_id="sub", python_callable=sub,
)
add >> sub | bhuiyanmobasshir94/Apache-Airflow-Starter | airflow/dags/aflow_dag.py | aflow_dag.py | py | 823 | python | en | code | 0 | github-code | 36 |
30722724901 | #programmers_단어 변환
#=== import module ===#
from collections import deque
#=== variable declare ===#
#=== Function define ===#
def solution(begin, target, words):
if target not in words: return 0; #불가능한 경우
queue = deque();
queue.append([begin,0]); #current, visited
level = 0;
succeed = False;
while queue and not succeed:
level += 1;
for i in range(len(queue)):
current,visited = queue.popleft();
for idx in range(len(words)):
if visited & (1 << idx) != 0: continue; #이미 방문한 단어
nextWord = words[idx];
diff = 0;
for i in range(len(current)):
if current[i] != nextWord[i]: diff += 1;
if diff != 1: continue; #다른 것이 2개 이상이라서 한번에 변환 불가능
if nextWord == target: #성공 조건
succeed = True; break;
queue.append([nextWord,visited | (1 << idx)]);
if succeed: return level;
else: return 0;
#=== main function ===#
print(solution("hit","cog",["hot", "dot", "dog", "lot", "log"]));
| Hoony0321/Algorithm | 2022_02/26/programmers_단어 변환.py | programmers_단어 변환.py | py | 1,080 | python | en | code | 0 | github-code | 36 |
4035544255 | #User function Template for python3
class Solution:
def maxDiamonds(self, A, N, K):
import heapq
l = []
heapq.heapify(l)
for i in A:
heapq.heappush(l,-1*i)
ans = 0
while(K!=0):
x = -1*heapq.heappop(l)
ans = ans +x
x = x//2
heapq.heappush(l,-1*x)
K=K-1
return ans
#{
# Driver Code Starts
#Initial Template for Python 3
if __name__ == '__main__':
t = int (input ())
for _ in range (t):
N,K=map(int,input().split())
A=list(map(int,input().split()))
ob = Solution()
print(ob.maxDiamonds(A,N,K))
# } Driver Code Ends | 20A31A0563/LeetCode | Maximum Diamonds - GFG/maximum-diamonds.py | maximum-diamonds.py | py | 739 | python | en | code | 0 | github-code | 36 |
71960799785 | import gluonbook as gb
from mxnet.gluon import data as gdata
import sys
import time
import matplotlib.pyplot as plt
mnist_train = gdata.vision.FashionMNIST(train=True)
mnist_test = gdata.vision.FashionMNIST(train=False)
# 训练集和测试集中每个类别的图像分别为6000, 1000, 因此len(mnist_train)=60000, len(mnist_test) = 10000
print(len(mnist_train), len(mnist_test))
# feature 对应高和宽均为28像素的图像, 每个像素的数值为0-255之间的8位无符号整数(unit8). 使用三维NDArray存储
feature, label = mnist_train[0]
print(feature.shape, feature.dtype)
print(label, type(label), label.dtype)
# 将数值标签转成相应的文本标签
def get_fashion_mnist_labels(labels):
text_labels = ['t-shirt', 'trouser', 'pullover', 'dress', 'coat',
'sandal', 'shirt', 'sneaker', 'bag', 'ankle boot']
return [text_labels[int(i)] for i in labels]
# 定义可以在一行里画出多个图像和对应标签的函数
def show_fashion_mnist(images, labels):
#gb.use_svg_display()
# 这里的 _ 表示我们忽略(不使用)的变量。
_, figs = plt.subplots(1, len(images), figsize=(12, 12))
# zip() 函数用于将可迭代对象作为参数,将对象中对应的元素打包成一个个元组,然后返回由这些元组组成的对象。
# 如果各个可迭代对象的元素个数不一致,则返回的对象长度与最短的可迭代对象相同。
for f, img, lbl in zip(figs, images, labels):
f.imshow(img.reshape((28, 28)).asnumpy())
f.set_title(lbl)
f.axes.get_xaxis().set_visible(False)
f.axes.get_yaxis().set_visible(False)
plt.show()
# 显示训练集中0-11号图像
X, y = mnist_train[0:12]
show_fashion_mnist(X, get_fashion_mnist_labels(y))
batch_size = 256
# Vision Transforms: Transforms can be used to augment input data during training. You can compose multiple transforms sequentially
# ToTensor: Converts an image NDArray to a tensor NDArray.
# 通过ToTensor类将图像数据从 uint8 格式变换成 32 位浮点数格式,并除以 255 使得所有像素的数值均在 0 到 1 之间。
# ToTensor类还将图像通道从最后一维移到最前一维来方便之后介绍的卷积神经网络计算。
transformer = gdata.vision.transforms.ToTensor()
# Gluon的DataLoader允许使用多进程来加速数据读取(暂不支持 Windows 操作系统)
# 通过参数num_workers来设置4个进程读取数据。
if sys.platform.startswith('win'):
num_workers = 0
else:
num_workers = 4
# transform_first(fn, lazy=True): Returns a new dataset with the first element of each sample transformed by the transformer function fn.
# 通过数据集的transform_first函数,我们将ToTensor的变换应用在每个数据样本(图像和标签)的第一个元素,即图像之上。
# class mxnet.gluon.data.DataLoader(dataset, batch_size=None, shuffle=False, sampler=None, last_batch=None, batch_sampler=None,
# batchify_fn=None, num_workers=0, pin_memory=False, prefetch=None)
train_iter = gdata.DataLoader(mnist_train.transform_first(transformer),
batch_size, shuffle=True,
num_workers=num_workers)
# print(train_iter)
test_iter = gdata.DataLoader(mnist_test.transform_first(transformer),
batch_size, shuffle=False,
num_workers=num_workers)
# print(test_iter)
start = time.time()
for X, y in train_iter:
continue
print('%.2f sec' % (time.time() - start)) | fulinli/DeepLearning_MXNet | Fashion-MNIST.py | Fashion-MNIST.py | py | 3,590 | python | zh | code | 0 | github-code | 36 |
13536005652 | from lib import setter, getter, io_tools
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--config", type = str, help = "path to campaigns config json file")
parser.add_argument("--PU", type = str, help = "name of the pileup sample to set sitewhitelist for")
parser.add_argument("--sites", type = str, nargs = "*", help = "site whitelist for the pileup")
args = parser.parse_args()
config_dict = io_tools.import_jsonfile_as_OrderedDict(args.config)
campaigns = getter.get_campaigns_given_PU(config_dict, args.PU)
for campaign in campaigns:
config_dict[campaign]['secondaries'][args.PU]['SiteWhitelist'] = args.sites
io_tools.export_dict_to_jsonfile(config_dict, 'campaigns.json')
| tyjyang/CampaignManager | scripts/set-sitewhitelist-for-PU.py | set-sitewhitelist-for-PU.py | py | 712 | python | en | code | 0 | github-code | 36 |
4013502332 | # 문제 출처 : https://programmers.co.kr/learn/courses/30/lessons/12982
def solution(d, budget):
answer = 0
d = sorted(d)
for cost in d:
if budget < cost:
break
else:
budget -= cost
answer += 1
print(answer)
return answer
| ThreeFive85/Algorithm | Programmers/level1/budget/budget.py | budget.py | py | 298 | python | en | code | 1 | github-code | 36 |
951612582 | pkgname = "less"
pkgver = "643"
pkgrel = 0
build_style = "gnu_configure"
configure_args = ["--with-regex=posix"]
make_cmd = "gmake"
hostmakedepends = ["gmake"]
makedepends = ["ncurses-devel"]
checkdepends = ["perl"]
pkgdesc = "Pager program similar to more(1)"
maintainer = "q66 <q66@chimera-linux.org>"
license = "custom:less OR GPL-3.0-or-later"
url = "http://www.greenwoodsoftware.com/less"
source = f"http://www.greenwoodsoftware.com/less/less-{pkgver}.tar.gz"
sha256 = "2911b5432c836fa084c8a2e68f6cd6312372c026a58faaa98862731c8b6052e8"
hardening = ["vis", "cfi"]
# less -> perl -> less cycle
options = ["!check"]
def post_install(self):
self.install_license("LICENSE")
self.install_file(self.files_path / "lesspipe.sh", "usr/bin", mode=0o755)
self.install_file(
self.files_path / "zless.sh", "usr/bin", mode=0o755, name="zless"
)
self.install_link("less", "usr/bin/more")
self.install_link("less.1", "usr/share/man/man1/more.1")
self.install_link("zless", "usr/bin/bzless")
self.install_link("zless", "usr/bin/xzless")
self.install_link("zless", "usr/bin/lzless")
self.install_link("zless", "usr/bin/zstdless")
configure_gen = []
| chimera-linux/cports | main/less/template.py | template.py | py | 1,187 | python | en | code | 119 | github-code | 36 |
70110044584 | from django.contrib.auth import get_user_model
from django.core.validators import MaxValueValidator, MinValueValidator
from django.db import models
from django.utils.translation import gettext_lazy as _
from library_test_project.users.models import ScoreAbs
User = get_user_model()
class Author(models.Model):
name = models.CharField(_("Name of author"), max_length=255)
class Genre(models.Model):
name = models.CharField(_("Name of genre"), max_length=255)
class Book(ScoreAbs, models.Model):
author = models.ForeignKey(Author, on_delete=models.CASCADE, related_name="books", verbose_name=_("author"))
genre = models.ForeignKey(Genre, on_delete=models.CASCADE, related_name="genre", verbose_name=_("genre"))
name = models.CharField(_("Name of book"), max_length=255)
description = models.TextField(_("Description"))
published_date = models.DateTimeField(_("Published date"), auto_now_add=True)
scored_users = models.ManyToManyField(User, through="BookScoredUsers")
class Comment(models.Model):
owner = models.ForeignKey(User, on_delete=models.CASCADE, related_name="comments", verbose_name=_("Owner"))
book = models.ForeignKey(Book, on_delete=models.CASCADE, related_name="comments", verbose_name=_("Book"))
text = models.TextField(_("Text"))
created_at = models.DateTimeField(_("Date of creation"), auto_now_add=True)
class UserFavoriteBooks(models.Model):
book = models.ForeignKey(Book, on_delete=models.CASCADE, related_name="favorited_users")
user = models.ForeignKey(User, on_delete=models.CASCADE, related_name="favorites")
class Meta:
unique_together = ["book", "user"]
class BookScoredUsers(models.Model):
book = models.ForeignKey(Book, on_delete=models.CASCADE)
user = models.ForeignKey(User, on_delete=models.CASCADE)
score = models.FloatField(validators=[MinValueValidator(1), MaxValueValidator(10)])
class Meta:
unique_together = ["book", "user"]
| Bakdolot/library_test_project | library_test_project/library/models.py | models.py | py | 1,968 | python | en | code | 0 | github-code | 36 |
8139333027 | '''
Æfingarverkefni 7 recursion
Hrólfur Gylfason
31/10/2018
'''
def finnaHeildarsummu(tala, summa = 0):
if tala > 0:
summa += tala
return finnaHeildarsummu(tala-1, summa)
else:
return summa
def finnaHeildarsummuOdda(tala, summa = 0):
if tala > 0 and tala % 2 == 1:
summa += tala
return finnaHeildarsummuOdda(tala-1, summa)
elif tala > 0 and tala % 2 == 0:
return finnaHeildarsummuOdda(tala-1, summa)
else:
return summa
valmynd = ""
while valmynd != "3":
for tel in range(50):#Þessi for lúppa gerir línu sem er auðvelt að stjórna stærðinni á
print("-",end="")
print("\n")#Þetta er til þess að gera tvö enter
print("Ýttu á 1 til þess að fá dæmi 1")
print("Ýttu á 2 til þess að fá dæmi 2")
print("Ýttu á 3 til þess að hætta")
valmynd = input("-------------------->>> ")#Hérna velur notandinn hvaða lið hann ætlar að fara í
print()#Þetta er til þess að gera enter
for tel in range(50):#Þessi for lúppa gerir línu sem er auðvelt að stjórna stærðinni á
print("-",end="")
print()#Þetta er til þess að gera enter
if valmynd == "1":#Liður 1
print("Úr return:",finnaHeildarsummu(7))
elif valmynd == "2":#Liður 2
tala = 24
heildarsumma = finnaHeildarsummuOdda(tala)
print("Heildarsumma oddatalna niður og með "+str(tala)+":",heildarsumma)
elif valmynd == "3":#Þetta er til þess að það komi ekki "ERROR Sláðu inn tölu á milli 1 og 3" þegar maður er að hætta í forritinu
pass
else:
print("ERROR\tSláðu inn tölu á milli 1 og 3")
| hrolfurgylfa/Forritun | Python/FORR2HF05CU/Æfingarverkefni/14. Æfingarverkefni 7 recursion/Æfingarverkefni_7.py | Æfingarverkefni_7.py | py | 1,693 | python | is | code | 0 | github-code | 36 |
21694318257 | import os
import numpy as np
import matplotlib.pyplot as plt
import re
from io import StringIO
from skimage.external.tifffile import imsave
from scipy.interpolate import griddata
from scipy.signal import medfilt
def GetChunkFromTextFile(FileName, StartStr, StopStr, skip_header=0, skip_footer=0, LastHit=True, DataType='array'):
# DataType means we can extract the chunk and then turn it into:
# 1) Numpy table 'numpy'
# 2) return the raw text 'raw'
DataType = DataType.lower()
# Read the file.
try:
with open(FileName, 'r') as myfile:
data = myfile.read()
except:
print('Failed to open ' + FileName + '. Skipping.')
return
# This regex looks for the data between the start and top strings.
reout = re.compile('%s(.*?)%s' % (StartStr, StopStr), re.S)
try:
# Extract just the data we want.
if LastHit == False:
SectionStr = reout.search(data).group(1)
else:
SectionStr = reout.findall(data)[-1]
except:
# It is possible that the user asked for something that isn't in the file. If so, just bail.
return None
if DataType == 'raw':
# Now apply skip_header and skip_footer
SectionData = SectionStr
SectionData = ''.join(SectionData.splitlines(True)[skip_header:])
if skip_footer > 0:
SectionData = ''.join(SectionData.splitlines(True)[:-skip_footer])
if DataType == 'float':
SectionData = np.float(SectionStr)
if DataType == 'array':
# Convert it into a numpy array.
SectionData = np.genfromtxt(StringIO(SectionStr), skip_header=skip_header, skip_footer=skip_footer, dtype=None)
return SectionData
def ReadXSFVolume(FileName, verbose=True, WFOffset=(0,0,0), Cutoff=0.0):
print(FileName)
Datagrid = GetChunkFromTextFile(FileName,'BEGIN_DATAGRID_3D_UNKNOWN','END_DATAGRID_3D', DataType='raw')
lines = Datagrid.splitlines()
# Line 0 is the 'BEGIN_DATAGRID_3D_UNKNOWN' header.
# Line 1 is the x, y, z dimensions of the cube in pixels.
xPixels, yPixels, zPixels = map(int, lines[1].split())
if verbose==True:
print(f'Dimension of data cube is ({xPixels}, {yPixels}, {zPixels}) pixels.')
# Line 2 is the origin.
xOrigin, yOrigin, zOrigin = map(float, lines[2].split())
if verbose==True:
print(f'Origin of data cube is ({xOrigin}, {yOrigin}, {zOrigin}) angstroms.')
# Lines 3-5 are the metric (or identify matrix if this is a cube with sides of length 1).
Mstr = ' '.join(lines[3:6])
M = np.array(list(map(float, Mstr.split()))).reshape(3,3).T
if verbose==True:
print('Metric is:')
print(M)
# All the rest of the lines are the volume values.
vstr = ' '.join(lines[6:])
v = np.array(list(map(float, vstr.split()))).reshape(xPixels, yPixels, zPixels)
# Next we need a datacube which encompases the entire volume.
# Make a cartesian grid of width 1 but same number of pixels as the xsf datacube.
yp,xp,zp = np.meshgrid(np.linspace(0,1,xPixels), np.linspace(0,1,yPixels), np.linspace(0,1,zPixels))
# Transform those coordinates to the same coordinate system as the xsf datacube.
C = np.stack([xp,yp,zp], axis=0)
x,y,z = np.einsum('ij,jklm->iklm', M,C)
# Shift the origin to zero.
x += xOrigin + WFOffset[0]
y += yOrigin + WFOffset[1]
z += zOrigin + WFOffset[2]
# The cube x,y,z now represents the coordinates of the actual space that the orbital exists in.
# we want to resample now using a new larger cube that includes the Wannier function.
# Find the bounds of the cube.
xmin = np.min(x); xmax = np.max(x);
ymin = np.min(y); ymax = np.max(y);
zmin = np.min(z); zmax = np.max(z);
# Calculate the pixel sizes from the previous coordinate system.
dx = np.linalg.norm(M.T[:,0])/xPixels
dy = np.linalg.norm(M.T[:,1])/yPixels
dz = np.linalg.norm(M.T[:,2])/zPixels
# We want our new pixels to be square, so choose the smallest dx,dy,dz.
dx = dy = dz = np.min([dx,dy,dz])
# Calculate how many pixels that now is in our new cube.
nx = np.ceil((xmax-xmin)/dx).astype(int)
ny = np.ceil((ymax-ymin)/dy).astype(int)
nz = np.ceil((zmax-zmin)/dz).astype(int)
Y,X,Z = np.meshgrid(np.linspace(xmin,xmax,nx), np.linspace(ymin,ymax,ny), np.linspace(zmin,zmax,nz))
# We are going to interpolate using griddata.
# It expects an (n,D) array of points, whereas we have (x,y,z,D)
# So collapse the first three dimensions (kind of, ravel all but the last dimension).
xyz = np.stack([x,y,z],axis=3).reshape(-1,3)
xyz.shape
XYZ = np.stack([X,Y,Z],axis=3).reshape(-1,3)
XYZ.shape
# And interpolate/extrapolate v->V from xyz->XYZ.
V = griddata(xyz, v.ravel(), XYZ, method='nearest')
# Now that we are interpolated, reshape back to (x,y,z,D).
V = V.reshape(X.shape)
# Since we use nearest interpolation it comes out a bit noisy. Fix it.
V = medfilt(V)
# # Now eliminate values close to zero.
# # Vnew = np.zeros(V.shape)
# # Vnew[V>Cutoff] = V
# print(Cutoff)
# Vind1 = V<Cutoff
# Vind2 = V>(-Cutoff)
# Vind = Vind1&Vind2
# print(Vind)
# V[Vind] = 1e-25
# Our pixel sizes are different, and medfilt can also change the amplitudes a little.
# Renormalize so that the total intensity in our new cube is the same as outside the cube.
V /= np.sum(V)
# V *= np.sum(v)
# Note this will fail if the edge of the cube doesn't have zeros or close because the extrapolation
# will extend that edge value out...
# Now eliminate values close to zero.
# Vnew = np.zeros(V.shape)
# Vnew[V>Cutoff] = V
print(Cutoff)
Vind1 = V<Cutoff
Vind2 = V>(-Cutoff)
Vind = Vind1&Vind2
V[Vind] = 1e-9
return(X, Y, Z, V.astype('float32'))
if __name__ == '__main__':
X,Y,Z,V = ReadXSFVolume('NiO_00001.xsf', verbose=False) #, Cutoff=0.001) #, WFOffset=(0,0,3.5945353))
imsave('NiO_00001.tif', V)
print('Done.')
| ZGainsforth/QEScripts | Wannier/ReadXSFVolume.py | ReadXSFVolume.py | py | 6,099 | python | en | code | 4 | github-code | 36 |
23591319702 | from imdb import IMDb
import pickle
import os
DIR = 'movies/'
movie_files = os.listdir('movies')
actors_list = list()
# for file in movie_files:
# with open(DIR + file, 'rb') as file:
# movie = pickle.loads(file.read())
# with open(DIR + movie.movieID + "_actors.txt", "w", encoding='utf-8') as file:
# try:
# for actor in movie['cast']:
# actors_list.append(actor)
# except Exception as e:
# pass
# finally:
# pass
for file in movie_files:
if file.endswith('.txt'):
with open(DIR + file, 'r', encoding='utf-8') as file:
for line in file:
actors_list.append(line)
with open('all_actors.txt', 'w', encoding='utf-8') as file:
for actor in list(sorted(set(actors_list))):
file.write(actor) | 7tg/networkx | actors.py | actors.py | py | 908 | python | en | code | 1 | github-code | 36 |
36950841559 | import sys
sys.path.append("/mnt/data0/ravi/work/wiredtiger/bench/workgen/runner")
from runner import *
from wiredtiger import *
from workgen import *
''' The original wtperf input file follows:
# This workload uses several tens of thousands of tables and the workload is evenly distributed
# among them. The workload creates, opens and drop tables, and it generates warning if the time
# taken is more than the configured max_idle_table_cycle.
conn_config="cache_size=10G,eviction=(threads_min=4,threads_max=4),file_manager=(close_idle_time=30),session_max=1000"
table_config="type=file"
table_count=15000
#max_idle_table_cycle=2
# Uncomment to fail instead of generating a warning
# max_idle_table_cycle_fatal=true
random_range=1500000000
pareto=10
range_partition=true
report_interval=5
checkpoint_threads=1
checkpoint_interval=30
populate_threads=1
#pre_load_data=true
# Uncomment to skip the populate phase, and use a database from a previous run as the baseline.
# create=false
icount=15000000
run_time=900
threads=((count=10,inserts=1,throttle=1000),(count=10,reads=1))
max_latency=1000
sample_interval=5
sample_rate=1
'''
context = Context()
conn_config = ""
conn_config += ",cache_size=10G,eviction=(threads_min=4,threads_max=4),file_manager=(close_idle_time=30),session_max=1000,statistics=[all,clear],statistics_log=(wait=1,json=false,on_close=true)" # explicitly added
conn = context.wiredtiger_open("create," + conn_config)
s = conn.open_session("")
wtperf_table_config = "key_format=S,value_format=S," +\
"exclusive=true,allocation_size=4kb," +\
"internal_page_max=64kb,leaf_page_max=4kb,split_pct=100,"
compress_table_config = ""
table_config = "type=file"
tables = []
table_count = 15000
for i in range(0, table_count):
tname = "table:test" + str(i)
table = Table(tname)
s.create(tname, wtperf_table_config +\
compress_table_config + table_config)
table.options.key_size = 20
table.options.value_size = 100
table.options.range = 101000
tables.append(table)
populate_threads = 1
icount = 15000000
random_range = 1500000000
pop_ops = Operation(Operation.OP_INSERT, tables[0])
pop_ops = op_populate_with_range(pop_ops, tables, icount, random_range, populate_threads)
pop_thread = Thread(pop_ops)
pop_workload = Workload(context, populate_threads * pop_thread)
ret = pop_workload.run(conn)
assert ret == 0, ret
ops = Operation(Operation.OP_INSERT, tables[0], Key(Key.KEYGEN_PARETO, 0, ParetoOptions(10)))
# Updated the range_partition to False, because workgen has some issues with range_partition true.
# Revert it back after WT-7332.
ops = op_multi_table(ops, tables, False)
thread0 = Thread(ops)
thread0.options.throttle=1000
thread0.options.throttle_burst=1.0
ops = Operation(Operation.OP_SEARCH, tables[0], Key(Key.KEYGEN_PARETO, 0, ParetoOptions(10)))
ops = op_multi_table(ops, tables, False)
thread1 = Thread(ops)
ops = Operation(Operation.OP_SLEEP, "30") + \
Operation(Operation.OP_CHECKPOINT, "")
checkpoint_thread = Thread(ops)
workload = Workload(context, 10 * thread0 + 10 * thread1 + checkpoint_thread)
workload.options.report_interval=5
workload.options.run_time=900
workload.options.max_latency=60
workload.options.sample_rate=1
workload.options.sample_interval_ms = 5000
# Uncomment to fail instead of generating a warning
# workload.options.max_idle_table_cycle_fatal = True
workload.options.max_idle_table_cycle = 2
ret = workload.run(conn)
assert ret == 0, ret
latency_filename = context.args.home + "/latency.out"
latency.workload_latency(workload, latency_filename)
conn.close()
| mongodb/mongo | src/third_party/wiredtiger/bench/workgen/runner/many-dhandle-stress.py | many-dhandle-stress.py | py | 3,579 | python | en | code | 24,670 | github-code | 36 |
34866939002 | import math
from src.getTickers import *
from src.importData import *
from backtrader.indicators import ema
import datetime
GOINGDOWN_DAYS = 60
def hasNotIncreaseTooMuch(datahigh,datalow):
heighest=0
lowest=10000
for i in range(-5, 0):
heighest = max(heighest, datahigh[i])
lowest = min(lowest, datalow[i])
return datahigh < datalow*1.3
def todayIsLowest(dataclose):
lowestClose = 10000
for i in range(-GOINGDOWN_DAYS, -1):
lowestClose = min(lowestClose, dataclose[i])
return dataclose[0] <= lowestClose
def todayIsLowestClose(datalastclose,datalow):
lowest = 10000
for i in range(-GOINGDOWN_DAYS, -1):
lowest = min(lowest, datalow[i])
return datalastclose <= lowest
def findHighest(dataHighest):
maxPrice = 0
for i in range(-len(dataHighest)+1,0):
maxPrice = max(maxPrice, dataHighest[i])
return maxPrice
class zhaoMaoPiao(bt.Strategy):
def log(self, txt, dt=None):
dt = dt or self.datas[0].datetime.date(0)
print('%s, %s' % (dt.isoformat(), txt))
def __init__(self):
self.ema18 = bt.ind.EMA(self.data, period=18)
self.ema60 = bt.ind.EMA(self.data, period=60)
self.dataClose = self.datas[0].close
self.dataHigh = self.datas[0].high
self.dataLow = self.datas[0].low
def next(self):
isGoingDownLongEnough = len(self) > GOINGDOWN_DAYS
today = datetime.date(2021, 6, 11)
curdate = self.datetime.date(ago=0) # 0 is the default
if(isGoingDownLongEnough and curdate==today):
compareData = findHighest(self.dataHigh)
print(curdate)
if(self.dataClose[0] < compareData/1.5 and
todayIsLowest(self.dataClose) and
self.dataClose[0] < 20):
if CURRENT_TICKER not in SELECTED_TICKERS:
print(CURRENT_TICKER)
print(curdate)
print(self.dataClose[0])
print(compareData)
SELECTED_TICKERS.append(CURRENT_TICKER)
#print('date %s, current price %.2f, previous price %.2f' % (self.datas[0].datetime.datetime(), self.sampleData.close[0], self.sampleData.close[-1]))
tickers = getAllTickers()
for ticker in tickers:
data0 = getDataFromYahooFinance(ticker)
cerebro = bt.Cerebro()
cerebro.addstrategy(zhaoMaoPiao)
cerebro.adddata(data0)
# print('----------------------------')
print('Checking ticker: %s' % ticker)
# print('Starting Portfolio Value: %.2f' % cerebro.broker.getvalue())
CURRENT_TICKER = ticker
SELECTED_FLAG = False
cerebro.run()
print(SELECTED_TICKERS)
| lumeng3/luluquant | src/strategy/goingDown.py | goingDown.py | py | 2,672 | python | en | code | 1 | github-code | 36 |
30820901838 | #Extracts second-column values from .dat files and prints them out, comma-separated, so they can be used as a colormap in VARNA
#It'll do this for all .dat files you have in your directory. If you don't want this feature just comment out everything with read_files in it
#and unindent as needed.
#I also plot out the values for just A/C reads.
#I'm also printing out Yeo-Johnson or arcsinh-transformed reads--this is useful if there's a wide range of values [0 included] and you don't want a high-read nt to affect your colormap visualization dramatically.
#I also plot reads for a given sequence transformed both ways for the sake of comparison.
#If you're curious about Yeo-Johnson--its main benefit is that it can transform exponentially distributed data into normally-distributed data, with the additional perk of being able to deal with negative/zero values [unlike a Boxcox transform]
#https://machinelearningmastery.com/how-to-transform-data-to-fit-the-normal-distribution/ does a nice job explaining what the Yeo-Johnson is/what it does.
import re
import numpy as np
import glob
import matplotlib.pyplot as plt
from sklearn.preprocessing import PowerTransformer
read_files = glob.glob("*.dat")
sequences = open("21_cleavage_windows_final.txt", "r")
all_sequences = {}
for line in sequences:
if ">" in line:
seqname = line[1:-1]
else:
all_sequences[seqname]=line[:-1]
sequences.close()
j = 1
for datfile in read_files:
infile = open(datfile, "r")
#comment out this regex stuff if your .dat file isn't named "gene.dat"--with my naming convention this extracts the gene name for me
regex = r"^[a-zA-Z]+"
matches = re.findall(regex, datfile) #say the filename is atpi.dat. This extracts "atpi"
name = matches[0]
values = [] #array of all second-column values, i.e. the values of interest for the colormap
for line in infile:
reads = line.split("\t")[1] #Each line is tab-separated. We want the value in the second column.
reads = reads[:-1] #There's a \n at the end of the "reads" value, which counts as a single character.
values.append(reads)
values = np.array(values[:]).astype(float)
ac_values = []
sequence = all_sequences[name]
for i in range(len(sequence)):
if sequence[i]=="A" or sequence[i]=="C":
ac_values.append(values[i]) #only add dms reads corresponding to A/C nts to ac_values
#########plotting reads for all nts###########
'''
plt.figure(j)
plt.hist(values, color="lemonchiffon", bins=np.arange(0, max(values)+2,1.0), edgecolor="darkgoldenrod",align="mid")
plt.xticks(np.arange(min(values), max(values)+2, 1.0),rotation="vertical")
plt.autoscale()
plt.xlabel("Read count")
plt.ylabel("Frequency")
plt.title(name+" DMS untransformed reads")
j += 1
plt.draw()
'''
values_to_transform = values[:] #The dms values were strings earlier--we need to convert to floats to manipulate
#log transform
for i in range(len(values_to_transform)):
value = values_to_transform[i]
if value == 0:
values_to_transform[i] = 1e-7 #add as a pseudocount
transformed_vals = np.log(values_to_transform)
#This gets a bit convoluted. Basically I find the second-smallest value in transformedvals [so, the smallest nonzero value], add that value to all values in
#transformedvals and then set any negative values to 0
findmin = transformed_vals[:]
minval = min(findmin)
findmin = findmin[findmin!=minval] #from https://stackoverflow.com/questions/53541156/how-to-remove-all-occurrences-of-an-element-from-numpy-array
smallestnonzero = min(findmin)
offset = 1 #set the second-lowest values to 1
transformed_vals = [i+np.abs(smallestnonzero)+offset for i in transformed_vals]
for i in range(len(transformed_vals)):
value = transformed_vals[i]
if value < offset: #if it's <offset it's smaller than smallestnonzero
transformed_vals[i] = 0
#arcsinh transform
#transformed_vals = np.arcsinh(values_to_transform)
#implementing Yeo-Johnson as per https://stackoverflow.com/questions/53624804/how-to-normalize-a-non-normal-distribution
#values_to_transform = values_to_transform.reshape(-1,1) #convert to a 2d array
#pt = PowerTransformer(method='yeo-johnson')
#calculate the right parameters to fit the data [this is lambda from the transform]
#pt.fit(values_to_transform)
#transformed_vals = pt.transform(values_to_transform)
plt.figure(j)
plt.hist(transformed_vals, color="tomato", bins=np.arange(0, max(transformed_vals)+2,1.0), edgecolor="white",align="mid")
plt.xticks(np.arange(min(transformed_vals), max(transformed_vals)+2, 1.0),rotation="vertical")
plt.autoscale()
plt.xlabel("Read count")
plt.ylabel("Frequency")
plt.title(name+" DMS log-transformed reads")
j += 1
plt.draw()
#######plotting reads for a/c only########
'''
plt.figure(j)
plt.hist(ac_values, color="goldenrod", bins=np.arange(0, max(ac_values)+2,1.0), edgecolor="white",align="mid")
plt.xticks(np.arange(min(ac_values), max(ac_values)+2, 1.0),rotation="vertical")
plt.autoscale()
plt.xlabel("Read count")
plt.ylabel("Frequency")
plt.title(name+" DMS untransformed A/C reads")
j += 1
plt.draw()
'''
ac_values_to_transform = ac_values[:] #The dms values were strings earlier--we need to convert to floats to manipulate
#log transform
for i in range(len(ac_values_to_transform)):
value = ac_values_to_transform[i]
if value == 0:
ac_values_to_transform[i] = 1e-7
ac_transformed_vals = np.log(ac_values_to_transform)
#This gets a bit convoluted. Basically I find the second-smallest value in transformedvals [so, the smallest nonzero value], add that value to all values in
#transformedvals and then set any negative values to 0
findminac = ac_transformed_vals[:]
minac = min(findminac)
findminac = findminac[findminac!=minac] #findminac with all instances of the smallest value removed
smallestnonzeroac = min(findminac)
offset = 1 #the difference you want between the smallest [0] value and the second-smallest value
ac_transformed_vals = [i+np.abs(smallestnonzeroac)+offset for i in ac_transformed_vals]
for i in range(len(ac_transformed_vals)):
value = ac_transformed_vals[i]
if value < offset:
ac_transformed_vals[i] = 0
#arcsinh transform
#ac_transformed_vals = np.arcsinh(ac_values_to_transform)
'''
#implementing Yeo-Johnson as per https://stackoverflow.com/questions/53624804/how-to-normalize-a-non-normal-distribution
ac_values_to_transform = np.array(ac_values_to_transform).astype(float).reshape(-1,1) #convert to a 2d array
pt = PowerTransformer(method='yeo-johnson')
#calculate the right parameters to fit the data [this is lambda from the transform]
pt.fit(ac_values_to_transform)
ac_transformed_vals = pt.transform(ac_values_to_transform)
'''
plt.figure(j)
plt.hist(ac_transformed_vals, color="skyblue", bins=np.arange(0, max(ac_transformed_vals)+2,1.0), edgecolor="white",align="mid")
plt.xticks(np.arange(min(ac_transformed_vals), max(ac_transformed_vals)+2, 1.0),rotation="vertical")
plt.autoscale()
plt.xlabel("Read count")
plt.ylabel("Frequency")
plt.title(name+" DMS log-transformed A/C reads")
j += 1
plt.draw()
#print name+" reads:\n" + ",".join(values.astype(str))+"\n" #i.e. print "atpI reads: \n" followed by the reads
#print "Arcsinh-transformed "+name+" reads:\n" + ",".join(transformed_vals.astype(str))+"\n" #i.e. print "arcsinh-transformed atpI reads: \n" followed by the transformed reads
infile.close()
plt.show()
| gwlilabmit/Ram_Y_complex | paired_prob/plot_dat.py | plot_dat.py | py | 7,427 | python | en | code | 0 | github-code | 36 |
22782858968 | #
# @lc app=leetcode id=240 lang=python3
#
# [240] Search a 2D Matrix II
#
# https://leetcode.com/problems/search-a-2d-matrix-ii/description/
#
# algorithms
# Medium (41.66%)
# Likes: 1941
# Dislikes: 57
# Total Accepted: 218.3K
# Total Submissions: 523.9K
# Testcase Example: '[[1,4,7,11,15],[2,5,8,12,19],[3,6,9,16,22],[10,13,14,17,24],[18,21,23,26,30]]\n5'
#
# Write an efficient algorithm that searches for a value in an m x n matrix.
# This matrix has the following properties:
#
#
# Integers in each row are sorted in ascending from left to right.
# Integers in each column are sorted in ascending from top to bottom.
#
#
# Example:
#
# Consider the following matrix:
#
#
# [
# [1, 4, 7, 11, 15],
# [2, 5, 8, 12, 19],
# [3, 6, 9, 16, 22],
# [10, 13, 14, 17, 24],
# [18, 21, 23, 26, 30]
# ]
#
#
# Given target = 5, return true.
#
# Given target = 20, return false.
#
#
# @lc code=start
class Solution:
def searchMatrix(self, matrix, target):
"""
:type matrix: List[List[int]]
:type target: int
:rtype: bool
"""
if not matrix or len(matrix) == 0 or len(matrix[0]) == 0:
return False
res = False
for row in matrix:
if row[0] <= target:
res = self.binarySearch(row, target)
if res:
return res
else:
break
return res
def binarySearch(self, arr, target):
lo, hi = 0, len(arr) - 1
while lo + 1 < hi:
mid = lo + (hi - lo) // 2
if target == arr[mid]:
return True
elif target > arr[mid]:
lo = mid
else:
hi = mid
if arr[lo] == target or arr[hi] == target:
return True
return False
# @lc code=end
| Zhenye-Na/leetcode | python/240.search-a-2-d-matrix-ii.py | 240.search-a-2-d-matrix-ii.py | py | 1,876 | python | en | code | 17 | github-code | 36 |
33167135913 | from collections import Counter
from contextlib import contextmanager, asynccontextmanager
import logging
import time
logger = logging.getLogger(__name__)
class TimingStats(Counter):
def __init__(self, verbose: bool = False):
super().__init__()
self.verbose = verbose
@contextmanager
def scope(self, key, *, verbose=False):
t1 = time.monotonic()
yield
sec = time.monotonic() - t1
self[key] += sec
if self.verbose:
logger.debug(f"{key} took {sec:.3f} seconds")
@asynccontextmanager
async def async_scope(self, key, *, verbose=False):
t1 = time.monotonic()
yield
sec = time.monotonic() - t1
self[key] += sec
if self.verbose:
logger.debug(f"{key} took {sec:.3f} seconds")
def report_strings(self):
return [f"{key}: {sec:.1f} sec" for key, sec in self.items()]
| andrew-landers-by/luman-1584-blob-timeout | luman_1584/timing.py | timing.py | py | 915 | python | en | code | 0 | github-code | 36 |
14722446132 | from pycorenlp import StanfordCoreNLP
import os, json, sys
#os.chdir("C:/Program Files/stanford-corenlp-4.2.2")
#os.system("java -mx5g -cp "*" edu.stanford.nlp.pipeline.StanfordCoreNLPServer -timeout 10000")
nlp = StanfordCoreNLP('http://localhost:9000')
annotators = "ssplit,ner,depparse"
ner_keys = ["PERSON", "LOCATION", "ORGANIZATION", "NUMBER", "DATE", "EMAIL",
"URL", "CITY", "STATE_OR_PROVINCE", "COUNTRY", "NATIONALITY",
"RELIGION", "TITLE", "IDEOLOGY"]
reference_keys = ["basicDependencies","enhancedDependencies","enhancedPlusPlusDependencies"]
dataset_path = "C:/Users/Mark/Marco/Magistrale/Anno I/Secondo semestre/DS & ML/Progetto/Social-Mapper-Extended/social_mapper2/dataset/"
for account in os.listdir(dataset_path):
if account == "log.txt":
continue
#if "nlp.json" in os.listdir(dataset_path + account):
# continue
print(account)
js = open(dataset_path + account+ "/bio.json")
sentence = json.load(js)
print(sentence)
res = nlp.annotate(sentence,
properties={
'annotators': annotators,
'outputFormat': 'json',
'timeout': 1000,
})
if isinstance(res,str):
continue
nlp_res = dict()
nlp_res["entities"] = []
nlp_res["references"] = []
for sent in res["sentences"]:
check_references = []
for m in sent["entitymentions"]:
mention = m['text']
ner = m["ner"]
if "nerConfidences" in m.keys():
ner_confidence = m['nerConfidences']
if isinstance(ner_confidence, dict):
if ner in ner_confidence.keys():
ner_confidence = ner_confidence[ner]
else:
ner_confidence = "None"
if ner in ner_keys:
find = False
for entity in nlp_res["entities"]:
if ner in entity.keys():
find = True
entity[ner].append(mention)
if ner in ["TITLE", "ORGANIZATION"]:
check_references.append(mention)
break
if not find:
nlp_res["entities"].append({ner:[]})
find = False
for entity in nlp_res["entities"]:
if ner in entity.keys():
find = True
entity[ner].append(mention)
if ner in ["TITLE", "ORGANIZATION"]:
check_references.append(mention)
break
for k in reference_keys:
for dependency in sent[k]:
key = dependency["governorGloss"]
if key in check_references:
find = False
for reference in nlp_res["references"]:
if key in reference.keys():
find = True
item = dependency["dependentGloss"]
if not item in reference[key]:
reference[key].append(item)
break
if not find:
nlp_res["references"].append({key:[]})
find = False
for reference in nlp_res["references"]:
if key in reference.keys():
find = True
item = dependency["dependentGloss"]
if not item in reference[key]:
reference[key].append(item)
break
with open(dataset_path+account+"/nlp.json", "w") as js:
json.dump(nlp_res, js) | gaelix98/progetto-fdsml | codici aggiunti/bio_nlp.py | bio_nlp.py | py | 4,109 | python | en | code | 1 | github-code | 36 |
30998043719 | import copy
import utils
from Handler import Handler
MAX_LEN = 4000
THE_ANSWER_IS_LONG = "The answer is long, type /cont to continue"
class DefaultValueHandler(Handler):
def __init__(self, base_handler, default_query):
self.base_handler = base_handler
self.default_query = default_query
def handle(self, query, state):
if not query:
query = self.default_query
answer = self.base_handler.handle(query, state)
return self.format_answer(**answer)
| petr-kalinin/progrobot | DefaultValueHandler.py | DefaultValueHandler.py | py | 516 | python | en | code | 14 | github-code | 36 |
22772365443 | from tkinter import*
from tkinter import ttk, messagebox
import datetime as dt
import openpyxl
import pandas as pd
import os
import csv
class dataEntry:
def __init__(self,root):
self.root = root
self.root.title("Quality tracker")
self.root.geometry("1000x800+0+0")
self.root.pack_propagate(False) # tells the root to not let the widgets inside it determine its size.
self.root.resizable(0, 0)
self.user = os.getlogin()
#self.bg=ImageTk.PhotoImage(file=r'C:\Users\mutta\Desktop\test1\wallpaper_tk1.jpg')
#bg=Label(self.root,image=self.bg).place(relwidth = 1, relheight = 1)
frame1 = Frame(self.root, bg= "DarkCyan")
frame1.place(x=0.5, y=0.5, width =2000, height = 80)
frame2 = Frame(self.root, bg= "White")
frame2.place(x=0.5, y=80.5, width =2000, height = 1000)
title = Label(frame1, text= "Business Reviews Audit Entry", font=("times new roman", 20, "bold"), bg = "DarkCyan", fg = 'white').place(x=30,y=30)
date= dt.datetime.now()
date = Label(frame2, text=f"{date:%A, %B %d, %Y}", font="Calibri, 10", bg='white', fg='black')
date.place(x=600, y=2)
Auditor_login = Label(frame2, text= "Auditor Login:", font=("times new roman", 15, "bold"), bg = "white", fg = 'black').place(x=50,y=30)
self.txt_Auditor_login = Label(frame2, text= self.user, font = ("calibri", 15, "bold"), bg= "white", fg="black")
self.txt_Auditor_login.place(x=250, y= 30, width =100)
File_name = Label(frame2, text= "File Name:", font=("times new roman", 15, "bold"), bg = "white", fg = 'black').place(x=50,y=70)
self.txt_File_name = Entry(frame2, font = ("times new roman", 10), bg= "lightgray")
self.txt_File_name.place(x=250, y= 75, width =250)
Marketplace = Label(frame2, text= "Marketplace:", font=("times new roman", 15, "bold"), bg = "white", fg = 'black').place(x=50,y=110)
self.cmb_Marketplace = ttk.Combobox(frame2, font = ("times new roman", 12), state= "readonly", justify=CENTER)
self.cmb_Marketplace['values']=("Select","EN","DE","FR","IT","JP","ES","UK","CA","IN","None")
self.cmb_Marketplace.place(x=250, y= 115, width =100)
self.cmb_Marketplace.current(0)
Audit_sample = Label(frame2, text= "Audit Sample:", font=("times new roman", 15, "bold"), bg = "white", fg = 'black').place(x=50,y=150)
self.txt_Audit_sample = Entry(frame2, font = ("times new roman", 15), bg= "lightgray")
self.txt_Audit_sample.place(x=250, y= 155, width =100)
Error_count = Label(frame2, text= "Error Count:", font=("times new roman", 15, "bold"), bg = "white", fg = 'black').place(x=50,y=190)
self.txt_Error_count =Entry(frame2, font = ("times new roman", 15), bg= "lightgray")
self.txt_Error_count.place(x=250, y= 195, width =100)
Classifier_login = Label(frame2, text= "Classifier login:", font=("times new roman", 15, "bold"), bg = "white", fg = 'black').place(x=50,y=230)
self.txt_Classifier_login = Entry(frame2, font = ("times new roman", 15), bg= "lightgray")
self.txt_Classifier_login.place(x=250, y= 235, width =100)
button = Button(text = 'Submit', font = ("times new roman", 15),bg='DarkCyan', fg='white', cursor="hand2", command = self.auditDetails).place(x=500, y= 450, width = 100)
def clear(self):
self.txt_File_name.delete(0,END)
self.cmb_Marketplace.current(0)
self.txt_Audit_sample.delete(0,END)
self.txt_Error_count.delete(0,END)
self.txt_Classifier_login.delete(0,END)
def auditDetails(self):
if self.txt_Auditor_login=="" or self.txt_File_name.get()=="" or self.cmb_Marketplace.get()=="" or self.txt_Audit_sample.get()=="" or self.txt_Error_count.get()=="" or self.txt_Classifier_login.get()=="":
messagebox.showerror("Oops, Error!","All fields are mandatory", parent=self.root)
elif str(self.user)==str(self.txt_Classifier_login.get()):
messagebox.showerror("Oops, Error!","Auditor ID can't be same as Classifier ID", parent=self.root)
else:
try:
al = self.user
fn = self.txt_File_name.get()
mp = self.cmb_Marketplace.get()
asc =self.txt_Audit_sample.get()
ec =self.txt_Error_count.get()
cl = self.txt_Classifier_login.get()
dtn = dt.datetime.now()
dtns = dtn.strftime("%d-%m-%Y")
accuracy = int((int(asc)-int(ec))*100/int(asc))
'''
df1 = pd.DataFrame({"Auditor login": [al],"File Name":[fn], "Marketplace":[mp],"Audit Sample":[asc],"Error Count":[ec],"Classifier login":[cl],"Date":[dtns]})
df2 = pd.read_excel(r"\\ant.amazon.com\dept-as\HYD11\GroupData\ABSC-HYD\ABSC-Ops-Team\Business Reviews\audit_details.xlsx", index_col=[0])
print(df1)
print(df2)
df3 = df2.append(df1, ignore_index=True)
df3.drop(df3.filter(regex="Unname"),axis=1, inplace=True)
df3.to_excel((r"\\ant.amazon.com\dept-as\HYD11\GroupData\ABSC-HYD\ABSC-Ops-Team\Business Reviews\audit_details.xlsx"), index=False)
#df.to_excel(writer,index=False,header=False,startrow=len(reader)+1)
'''
# use incase if .txt output is needed
audit_fields=["Auditor login","File Name","Marketplace","Audit Sample","Error Count","Classifier login","Date"]
audit_values=[self.user,self.txt_File_name.get(),self.cmb_Marketplace.get(),self.txt_Audit_sample.get(),self.txt_Error_count.get(),self.txt_Classifier_login.get(),dt.datetime.now()]
s= '\n'+al+'\t'+fn+'\t'+mp+'\t'+asc+'\t'+ec+'\t'+cl+'\t'+dtns+'\t'+str(accuracy)
f= open((r"\\ant.amazon.com\dept-as\HYD11\GroupData\ABSC-HYD\ABSC-Ops-Team\Business Reviews\audit_details.txt"),'a')
f.write(s)
f.close()
# converting to excel
tf_df_new = pd.read_csv(r"\\ant.amazon.com\dept-as\HYD11\GroupData\ABSC-HYD\ABSC-Ops-Team\Business Reviews\audit_details.txt", sep = '\t')
tf_df_new.to_excel(r"\\ant.amazon.com\dept-as\HYD11\GroupData\ABSC-HYD\ABSC-Ops-Team\Business Reviews\audit_details.xlsx", index=False)
# deleting unnamed cols
file = r"\\ant.amazon.com\dept-as\HYD11\GroupData\ABSC-HYD\ABSC-Ops-Team\Business Reviews\audit_details.xlsx"
excel_file = openpyxl.load_workbook(file)
excel_sheet = excel_file['Sheet1']
# delete column
excel_sheet.delete_cols(idx=9 , amount=1)
excel_file.save(file)
# use incase if .csv output is needed
'''
with open(r"\\ant.amazon.com\dept-as\HYD11\GroupData\ABSC-HYD\ABSC-Ops-Team\Business Reviews\audit_details.xlsx", "a") as fs:
w = csv.writer(fs,dialect = 'excel-tab')
w.writerow([al,fn,mp,asc,ec,cl,dtns])
fs.close()
'''
if accuracy < 98:
messagebox.showinfo("Alert!",f"Reassign the file as Classification accuracy: {accuracy}%, is below the 98% target. \n\n Entry Success!", parent=self.root)
else:
messagebox.showinfo("Success!",f"Classification accuracy: {accuracy}%\n\n Entry Success!", parent=self.root)
self.clear()
except Exception as es:
messagebox.showerror("Error",f"Error due to:{str(es)}", parent = self.root)
root=Tk()
obj=dataEntry(root)
root.mainloop() | muttas/my-projects | BusinessReviews_audit_form.py | BusinessReviews_audit_form.py | py | 8,340 | python | en | code | 0 | github-code | 36 |
9744073954 | import sys
import os
import logging
import urllib
from datetime import datetime, timedelta
from google.appengine.ext import ndb
from google.appengine.api import users
from google.appengine.ext import blobstore
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from common.arguments import *
from common.errors import *
from users.users import build_user_key
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
import lib.parsedatetime
log = logging.getLogger("assets")
def key(asset_id):
return ndb.Key(Asset, int(asset_id))
def create(actor, asset_id=None, asset_key=None, **kwargs):
asset = Asset(key=asset_key or key(asset_id))
asset.created_by = build_user_key(actor)
asset.condition = "new"
return update(actor, asset=asset, **kwargs)
def update(actor, asset_id=None, asset_key=None, asset=None, name=undefined,
description=undefined, serial=undefined, condition=undefined,
cost=undefined, value=undefined, url=undefined, **ignored):
asset = asset or (asset_key or key(asset_id)).get()
# Update fields
if is_defined(name): asset.name = name
if is_defined(url): asset.url = url
if is_defined(description): asset.description = description
if is_defined(serial): asset.serial = serial
if is_defined(condition): asset.condition = condition
if is_defined(cost) and cost: asset.cost = float(cost)
if is_defined(value) and value: asset.value = float(value)
# Fix missing fields
if not asset.name: asset.name = str(asset.key.id())
asset.put()
return asset
def delete(actor, asset_id=None, asset_key=None, asset=None):
asset = asset or get(actor, asset_id, asset_key)
asset.delete()
def get(actor, asset_id=None, asset_key=None, silent=False):
result = (asset_key or key(asset_id)).get()
if result:
return result
elif silent:
return None
else:
raise NotFoundError()
def list(actor):
return Asset.query()
def search(**ignored):
pass
def check_out(actor, asset=None, asset_key=None, asset_id=None, checked_out_to=undefined,
project=undefined, expected=undefined, timezoneoffset=None, **ignored):
asset = asset or get(actor, asset_key=asset_key, asset_id=asset_id)
if asset.checkout:
raise IllegalError("Asset is already checked out")
checkout = AssetCheckout(parent=asset.key)
checkout.checked_out_by = build_user_key(actor)
checkout.checked_out_to = build_user_key(actor)
checkout.condition_out = asset.condition
if is_defined(expected):
if expected == "":
expected = None
else:
if timezoneoffset:
offset = timedelta(minutes=int(timezoneoffset))
client_time = datetime.utcnow() - offset
parsed_time = lib.parsedatetime.Calendar().parse(expected, client_time)
else:
offset = datetime.timedelta(0)
parsed_time = lib.parsedatetime.Calendar().parse(expected)
if parsed_time[1] == 1:
checkout.expected = datetime(*parsed_time[0][:3]) + offset
else:
checkout.expected = datetime(*parsed_time[0][:6]) + offset
if is_defined(checked_out_to) and checked_out_to: checkout.checked_out_to = build_user_key(checked_out_to)
if is_defined(project) and project: checkout.project = project
checkout.put()
asset.checkout = checkout.key
asset.put()
return checkout
def check_in(actor, asset=None, asset_key=None, asset_id=None, condition=undefined, **ignored):
asset = asset or get(actor, asset_key=asset_key, asset_id=asset_id)
if not asset.checkout:
raise IllegalError("Asset is not checked out")
checkout = asset.checkout.get()
checkout.checked_in_by = build_user_key(actor)
checkout.checked_in = datetime.now()
checkout.condition_in = asset.condition
if is_defined(condition): checkout.condition_in = condition
checkout.put()
asset.checkout = None
asset.condition = checkout.condition_in
asset.put()
return checkout
valid_conditions = ["new", "excellent", "good", "poor", "unusable", "gone"]
class Asset(ndb.Model):
name = ndb.StringProperty(required=True)
url = ndb.StringProperty()
description = ndb.StringProperty()
serial = ndb.StringProperty()
condition = ndb.StringProperty(required=True, default="new", choices=valid_conditions)
cost = ndb.FloatProperty()
value = ndb.FloatProperty()
checkout = ndb.KeyProperty(kind='AssetCheckout')
created_by = ndb.KeyProperty(kind='User')
created = ndb.DateTimeProperty(auto_now_add=True)
class AssetCheckout(ndb.Model):
# Check out fields
checked_out_to = ndb.KeyProperty(kind='User', required=True)
project = ndb.KeyProperty(kind='Project')
checked_out = ndb.DateTimeProperty(auto_now_add=True)
checked_out_by = ndb.KeyProperty(kind='User', required=True)
condition_out = ndb.StringProperty(required=True, choices=valid_conditions)
expected = ndb.DateTimeProperty()
# Check in fields
checked_in = ndb.DateTimeProperty()
checked_in_by = ndb.KeyProperty(kind='User')
condition_in = ndb.StringProperty(choices=valid_conditions)
| AegisTools/aegis-appengine | modules/assets/assets_private.py | assets_private.py | py | 5,339 | python | en | code | 0 | github-code | 36 |
4728646967 | import time
from io import BytesIO
from typing import List
import pandas as pd
from matplotlib import pyplot as plt
from pandas import DataFrame
from svglib.svglib import svg2rlg
from evaluate.EvaluateCore import PartAngle
import seaborn as sns
plt.rcParams['font.sans-serif'] = ['SimHei'] # 中文字体设置-黑体
plt.rcParams['axes.unicode_minus'] = False # 解决保存图像是负号'-'显示为方块的问题
plt.ioff()
def get_local_format_time(timestamp):
local_time = time.localtime()
format_time = time.strftime("%Y%m%d%H%M%S", local_time)
return format_time
def generateROMPart(df_angles: pd.DataFrame, parts: list):
romPart = []
for part in parts:
if part == PartAngle.Knee:
romPart.append({
"title": "膝关节活动度",
"list": [
["参数Parameters", "数值Data", "单位Unit", "参考值Reference"],
["左膝关节伸展\nL.KNEE Extension", str(df_angles["LKnee_angle"].min().round(2)), "°", "0-60"],
["左膝关节屈曲\nL.KNEE Flexion", str(df_angles["LKnee_angle"].max().round(2)), "°", "0-140"],
["右膝关节伸展\nR.KNEE Extension", str(df_angles["RKnee_angle"].min().round(2)), "°", "0-60"],
["右膝关节屈曲\nR.KNEE Flexion", str(df_angles["RKnee_angle"].max().round(2)), "°", "0-140"],
["检测项共计", "", "", "4 项"]
]
})
elif part == PartAngle.Hip:
romPart.append({
"title": "髋关节活动度",
"list": [
["参数Parameters", "数值Data", "单位Unit", "参考值Reference"],
["左髋关节伸展\nL.Hip Extension", str(df_angles["TorsoLFemur_angle"].min().round(2)), "°", "0-30"],
["左髋关节屈曲\nL.Hip Flexion", str(df_angles["TorsoLFemur_angle"].max().round(2)), "°", "0-40"],
["右髋关节伸展\nR.Hip Extension", str(df_angles["TorsoRFemur_angle"].min().round(2)), "°", "0-30"],
["右髋关节屈曲\nR.Hip Flexion", str(df_angles["TorsoRFemur_angle"].max().round(2)), "°", "0-40"],
["左髋关节外展\nL.Hip Abduction", str((180 - df_angles["LHip_angle"].max() - 90).round(2)), "°",
"-"],
["左髋关节内收\nL.Hip Adduction", str((90 - (180 - df_angles["LHip_angle"].min())).round(2)), "°",
"-"],
["右髋关节外展\nR.Hip Abduction", str((180 - df_angles["RHip_angle"].max() - 90).round(2)), "°",
"-"],
["右髋关节内收\nR.Hip Adduction", str((90 - (180 - df_angles["RHip_angle"].min())).round(2)), "°",
"-"],
["左髋关节外旋\nL.Hip Internal Rotation",
str((180 - df_angles["LTibiaSelf_vector"].max()).round(2)),
"°", "-"],
["左髋关节内旋\nL.Hip External Rotation", str((df_angles["LTibiaSelf_vector"].min()).round(2)), "°",
"-"],
["右髋关节外旋\nR.Hip Internal Rotation",
str((180 - df_angles["RTibiaSelf_vector"].max()).round(2)),
"°", "-"],
["右髋关节内旋\nR.Hip External Rotation", str((df_angles["RTibiaSelf_vector"].min()).round(2)), "°",
"-"],
["检测项共计", "", "", "12 项"]
]
})
elif part == PartAngle.Pelvis:
romPart.append({
"title": "骨盆活动度",
"list": [
["参数Parameters", "数值Data", "单位Unit", "参考值Reference"],
["骨盆侧倾\nPelvis Obliquity", str((90 - df_angles["TorsoLHip_angle"].max()).round(2)), "°",
"0-10"],
["骨盆旋转\nPelvis Rotation", str((90 - df_angles["TorsoLHip_angle"].min()).round(2)), "°", "0-10"],
["检测项共计", "", "", "2 项"]
]
})
elif part == PartAngle.Ankle:
romPart.append({
"title": "踝关节活动度",
"list": [
["参数Parameters", "数值Data", "单位Unit", "参考值Reference"],
["左踝关节跖屈\nL.Ankle Plantar flexion", str(df_angles["LAnkle_angle"].max().round(2)), "°", "20"],
["左踝关节背屈\nL.Ankle Dorsiflexion", str(df_angles["LAnkle_angle"].min().round(2)), "°", "30"],
["右踝关节跖屈\nR.Ankle Plantar flexion", str(df_angles["RAnkle_angle"].max().round(2)), "°", "20"],
["右踝关节背屈\nR.Ankle Dorsiflexion", str(df_angles["RAnkle_angle"].min().round(2)), "°", "30"],
["左踝关节外翻\nL.Ankle Pronation", "-", "°", "15"],
["左踝关节内翻\nL.Ankle Supination", "-", "°", "35"],
["右踝关节外翻\nR.Ankle Pronation", "-", "°", "15"],
["右踝关节内翻\nR.Ankle Supination", "-", "°", "35"],
["检测项共计", "", "", "8 项"]
]
})
return romPart
def polt_angle_plots(df: DataFrame) -> List[BytesIO]:
metadatas = [
{
"title": "膝关节角度变化周期",
"ylim": (0, 180),
"axis": [
["Time_in_sec", "LKnee_angle", "时间(秒)", "L 膝关节角度 (°)"],
["Time_in_sec", "RKnee_angle", "时间(秒)", "R 膝关节角度 (°)"]
]
},
{
"title": "髋关节角度变化周期(内收外展)",
"ylim": (0, 180),
"axis": [
["Time_in_sec", "LHip_angle", "时间(秒)", "L 髋关节角度 (°)"],
["Time_in_sec", "RHip_angle", "时间(秒)", "R 髋关节角度 (°)"]
]
},
{
"title": "髋关节角度变化周期(屈曲伸展)",
"ylim": (0, 180),
"axis": [
["Time_in_sec", "TorsoLFemur_angle", "时间(秒)", "L 髋关节角度 (°)"],
["Time_in_sec", "TorsoRFemur_angle", "时间(秒)", "R 髋关节角度 (°)"]
]
},
{
"title": "髋关节角度变化周期(外旋内旋)",
"ylim": (0, 180),
"axis": [
["Time_in_sec", "LTibiaSelf_vector", "时间(秒)", "L 髋关节角度 (°)"],
["Time_in_sec", "RTibiaSelf_vector", "时间(秒)", "R 髋关节角度 (°)"]
]
},
{
"title": "躯干髋关节角度变化周期",
"ylim": (0, 180),
"axis": [
["Time_in_sec", "TorsoLHip_angle", "时间(秒)", "躯干 L 髋关节角度 (°)"],
["Time_in_sec", "TorsoRHip_angle", "时间(秒)", "躯干 R 髋关节角度 (°)"]
]
},
{
"title": "踝关节角度变化周期",
"ylim": (0, 180),
"axis": [
["Time_in_sec", "LAnkle_angle", "时间(秒)", "L 踝关节角度 (°)"],
["Time_in_sec", "RAnkle_angle", "时间(秒)", "R 踝关节角度 (°)"]
]
}
]
images = []
rc = {'font.sans-serif': 'SimHei',
'axes.unicode_minus': False}
sns.set_style(style='darkgrid', rc=rc)
for metadata in metadatas:
fig, axes = plt.subplots(2, 1, figsize=(5.5, 7))
fig.suptitle(metadata["title"])
axes[0].set(ylim=metadata["ylim"])
axes[1].set(ylim=metadata["ylim"])
sns.lineplot(ax=axes[0], data=df, x=metadata["axis"][0][0], y=metadata["axis"][0][1]).set(
xlabel=metadata["axis"][0][2],
ylabel=metadata["axis"][0][3])
sns.lineplot(ax=axes[1], data=df, x=metadata["axis"][1][0], y=metadata["axis"][1][1]).set(
xlabel=metadata["axis"][1][2],
ylabel=metadata["axis"][1][3])
image = BytesIO()
fig.tight_layout()
fig.savefig(image, format='svg')
image.seek(0)
images.append(svg2rlg(image))
return images
| spianmo/GaitStudio | evaluate/ReportModuleBuilder.py | ReportModuleBuilder.py | py | 8,394 | python | en | code | 8 | github-code | 36 |
8754880255 | # -*- coding: utf-8 -*-
from odoo import models, fields, api
from odoo.exceptions import UserError
class OfDatastoreCrmAllocateWizard(models.TransientModel):
_name = 'of.datastore.crm.sender.allocate.wizard'
_description = u"Wizard d'affectation de partenaire"
lead_id = fields.Many2one('crm.lead', u"Opportunité", required=True, readonly=True)
partner_id = fields.Many2one(
'res.partner', u"Partenaire", domain="[('of_network_member', '=', True)]", required=True)
def action_done(self):
if self.partner_id:
self.lead_id.of_allocated = self.partner_id
network_members = self.env['of.datastore.crm.network.member'].search(
[('partner_id', '=', self.partner_id.id)])
# On vérifie s'il existe un connecteur achat pour ce fournisseur
connecteur_ids = self.env['of.datastore.crm.sender'].search(
['|', '&', ('partner_id', '=', self.partner_id.id), ('is_multicompany', '=', False),
'&', ('child_ids', 'in', network_members.ids), ('is_multicompany', '=', True)])
# Si un connecteur vers une base fille existe pour ce membre réseau,
# on crée cette opportunité sur la base fille
if connecteur_ids:
self.lead_id.datastore_send_lead()
# Sinon on envoie un mail au membre du réseau
else:
template = self.env.ref('of_datastore_crm_sender.of_datastore_crm_sender_email_template')
template.send_mail(self.lead_id.id)
self.lead_id.of_datastore_sent = True
class OfDatastoreCrmAutoAllocateWizard(models.TransientModel):
_name = 'of.datastore.crm.sender.auto.allocate.wizard'
_description = u"Wizard d'affectation automatique de partenaire"
lead_ids = fields.Many2many('crm.lead', string=u"Opportunités")
wizard_line_ids = fields.One2many(
'of.datastore.crm.sender.auto.allocate.wizard.line', 'wizard_id', string=u"Lignes du wizard")
@api.onchange('lead_ids')
def onchange_lead_ids(self):
of_secteur_obj = self.env['of.secteur']
res_partner_obj = self.env['res.partner']
wizard_line_obj = self.env['of.datastore.crm.sender.auto.allocate.wizard.line']
# On définit le secteur pour les membres réseau si manquant
membre_reseau_ids = res_partner_obj.search([('of_network_member', '=', True)])
for membre_reseau in membre_reseau_ids:
if not membre_reseau.of_secteur_com_id and membre_reseau.zip:
secteur_id = of_secteur_obj.get_secteur_from_cp(membre_reseau.zip)
membre_reseau.write({'of_secteur_com_id': secteur_id.id or False})
# On filtre les opportunités déjà traitées par le connecteur CRM
for lead in self.lead_ids.filtered(lambda l: not l.of_datastore_sent):
# On définit le secteur pour le partner de l'opportunité réseau
if not lead.partner_id.of_secteur_com_id:
zip = lead.partner_id.zip or lead.zip or False
of_secteur_com_id = of_secteur_obj.get_secteur_from_cp(zip)
lead.partner_id.write({'of_secteur_com_id': of_secteur_com_id.id})
partner_id = False
if lead.partner_id.of_secteur_com_id:
# On récupère les partners sur ce secteur, et on les trie par celui qui à moins d'opportunités
partner_ids = membre_reseau_ids\
.filtered(lambda m: m.of_secteur_com_id == lead.partner_id.of_secteur_com_id)\
.sorted('of_ongoing_lead_count')
# On prend le premier s'il existe
if partner_ids:
partner_id = partner_ids[0]
wizard_line_obj.new({
'wizard_id': self.id,
'lead_id': lead.id,
'partner_id': partner_id,
'secteur_id': lead.partner_id.of_secteur_com_id.id,
})
def action_done(self):
for line in self.wizard_line_ids:
if line.partner_id:
line.lead_id.of_allocated = line.partner_id
network_members = self.env['of.datastore.crm.network.member'].search(
[('partner_id', '=', line.partner_id.id)])
# On vérifie s'il existe un connecteur achat pour ce fournisseur
connecteur_ids = self.env['of.datastore.crm.sender'].search(
['|', '&', ('partner_id', '=', line.partner_id.id), ('is_multicompany', '=', False),
'&', ('child_ids', 'in', network_members.ids), ('is_multicompany', '=', True)])
# Si un connecteur vers une base fille existe pour ce membre réseau,
# on crée cette opportunité sur la base fille
if connecteur_ids:
try:
line.lead_id.datastore_send_lead()
except Exception:
pass
# Sinon on envoie un mail au membre du réseau
else:
template = self.env.ref('of_datastore_crm_sender.of_datastore_crm_sender_email_template')
template.send_mail(line.lead_id.id)
line.lead_id.of_datastore_sent = True
class OfDatastoreCrmAutoAllocateWizardLine(models.TransientModel):
_name = 'of.datastore.crm.sender.auto.allocate.wizard.line'
_description = u"Ligne de wizard d'affectation automatique de partenaire"
wizard_id = fields.Many2one('of.datastore.crm.sender.auto.allocate.wizard', u"Wizard")
lead_id = fields.Many2one('crm.lead', u"Opportunité", readonly=True)
partner_id = fields.Many2one('res.partner', u"Partenaire", domain="[('of_network_member', '=', True)]")
secteur_id = fields.Many2one('of.secteur', u"Secteur commercial", readonly=True)
| odof/openfire | of_datastore_crm_sender/wizards/of_datastore_crm_sender_allocate_wizard.py | of_datastore_crm_sender_allocate_wizard.py | py | 5,884 | python | fr | code | 3 | github-code | 36 |
30380624251 | import os
from datetime import timedelta
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/4.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get("SECRET_KEY")
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = int(os.environ.get("DEBUG"))
ALLOWED_HOSTS = os.environ.get("ALLOWED_HOSTS").split(" ")
# Application definition
INSTALLED_APPS = [
# django default apps
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
# third-party apps
"djoser",
"corsheaders",
"rest_framework",
"rest_framework.authtoken",
# custom app
"authentify.apps.AuthentifyConfig",
"quiz.apps.QuizConfig",
]
MIDDLEWARE = [
"corsheaders.middleware.CorsMiddleware",
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "backend.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "backend.wsgi.application"
# Database
# https://docs.djangoproject.com/en/4.1/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.postgresql_psycopg2",
"NAME": os.environ.get("POSTGRES_DB"),
"USER": os.environ.get("POSTGRES_USER"),
"PASSWORD": os.environ.get("POSTGRES_PASSWORD"),
"HOST": os.environ.get("POSTGRES_HOST"),
"PORT": os.environ.get("POSTGRES_PORT"),
}
}
AUTH_USER_MODEL = "authentify.User"
# Password validation
# https://docs.djangoproject.com/en/4.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
}
]
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
# Internationalization
# https://docs.djangoproject.com/en/4.1/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/4.1/howto/static-files/
STATIC_URL = "static/"
# Default primary key field type
# https://docs.djangoproject.com/en/4.1/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = "django.db.models.BigAutoField"
MAX_QUESTION_PER_QUIZ: int = 10
REST_USE_JWT = True
JWT_AUTH_COOKIE = "quiz-auth"
JWT_AUTH_REFRESH_COOKIE = "quiz-refresh-token"
REST_FRAMEWORK = {
"DEFAULT_AUTHENTICATION_CLASSES": (
"rest_framework_simplejwt.authentication.JWTAuthentication",
),
"DEFAULT_PAGINATION_CLASS": "rest_framework.pagination.LimitOffsetPagination",
"PAGE_SIZE": 10,
}
SIMPLE_JWT = {
"ACCESS_TOKEN_LIFETIME": timedelta(days=1),
"BLACKLIST_AFTER_ROTATION": False,
"USER_ID_FIELD": "uuid",
}
DJOSER = {
"LOGIN_FIELD": "email",
"PASSWORD_RESET_CONFIRM_URL": "password/reset/confirm/{uid}/{token}",
}
CORS_ALLOW_ALL_ORIGINS = True
REDIS_HOST = os.environ.get("REDIS_HOST")
REDIS_PORT = os.environ.get("REDIS_PORT") | Lord-sarcastic/quiz | backend/settings.py | settings.py | py | 4,013 | python | en | code | 0 | github-code | 36 |
6241769210 | """A simple simulation of wave packet.
Refer the details to the journal paper: PRA 45, 4734 (1992).
"""
from importlib.resources import path
import numpy as np
import pandas as pd
import xarray as xr
from . import rsc
from .electricfield import ElectricField
__all__ = ["predefined_target", "WavePacket"]
def predefined_target(name: str) -> pd.DataFrame:
with path(rsc, "{}.xlsx".format(name)) as fn:
return pd.read_excel(fn, "Levels")
class WavePacket:
def __init__(self, field: ElectricField, target: (str, pd.DataFrame)):
if isinstance(target, str):
target = predefined_target(target)
if "config" in target:
if not target["config"].is_unique:
raise ValueError(
"Values in target['config'] should be unique.")
idx = target["config"]
else:
idx = range(len(target))
self.__status = pd.DataFrame({
"config": idx,
"freq": target["level"],
"coeff": target["strength"]**0.5 * field.at_k(target["level"]),
}).set_index("config")
@property
def status(self) -> pd.DataFrame:
return self.__status
def __call__(self, t: np.ndarray) -> xr.DataArray:
n = self.__status.index # dims: [n]
k = self.__status["freq"] # dims: [n]
c = self.__status["coeff"] # dims: [n]
a = -1j * np.exp(-1j * k[None, :] * t[:, None]) * c[None, :].conj()
# dims: [t, n]
return xr.DataArray(
(a[:, :, None] * a[:, None, :].conj()).real,
coords=[t, n, n],
dims=["t", "n", "n'"],
)
| DaehyunPY/FERMI_20149100 | Packages/simul2/wavepacket.py | wavepacket.py | py | 1,648 | python | en | code | 0 | github-code | 36 |
44395034513 |
class Solution:
def minimumSwap(self, s1: str, s2: str) -> int:
# X_Y : x in s1, y in s2, with same index
# Y_X : y in s1, x in s2, with same index
X_Y, Y_X, res = 0, 0, 0
for i in range(len(s1)):
if s1[i] == s2[i]:
continue
if s1[i] == "x" and s2[i] == "y":
X_Y += 1
else:
Y_X += 1
# swap by "xx"<=>"yy" or "yy"<=>"xx"
res += X_Y//2
X_Y = X_Y%2
res += Y_X//2
Y_X = Y_X%2
# swap by "xy"<=>"yx" or "yx"<=>"xy"
tmp = min(X_Y,Y_X)
res += tmp * 2
X_Y -= tmp
Y_X -= tmp
return res if X_Y == 0 and Y_X == 0 else -1
| Liuys614/LeetCode | 1247_Minimum Swaps to Make Strings Equal_ref.py | 1247_Minimum Swaps to Make Strings Equal_ref.py | py | 721 | python | en | code | 0 | github-code | 36 |
3272420780 | import json
import re
import requests
from django.contrib.auth import login
from django.contrib.auth.decorators import login_required
from django.core import serializers
from django.db import IntegrityError
from django.http import HttpResponse
from django.shortcuts import render, redirect
from . import models
OW_API_KEY = "3f59299cb03f1d4beb6bd960a3f546fd"
@login_required
def index(request):
"""Home page view that displays current set of Locations with their weather information
along with available item operations."""
result = ""
appStatus = ""
owner = models.Owner.objects.filter(username=request.user)[0]
if request.method == "GET":
locations = models.Location.objects.filter(owner=owner)
for location in locations:
url = 'http://api.openweathermap.org/data/2.5/weather?q={}&units=metric&appid={}'.format(location.name,
OW_API_KEY)
locationWeather = requests.get(url).json()
if locationWeather['cod'] == 200:
location.temperature = locationWeather['main']['temp']
location.description = locationWeather['weather'][0]['description']
location.icon = locationWeather['weather'][0]['icon']
location.save()
else:
appStatus = "Refresh operation for {} failed. This could be an issue related with OpenWeatherMap, " \
"please contact with the administrator.".format(location.name)
result = "Fail"
break
if result != "Fail":
orderList = models.Owner.objects.filter(username=request.user).values('orderList')[0]['orderList']
if orderList != "":
orderList = orderList.split(',')
sortedLocations = []
for locName in orderList:
sortedLocations.append(locations.get(name=locName))
return render(request, "index.html", {"locations": sortedLocations})
else:
return render(request, "index.html", {"locations": locations})
elif request.POST["submit"] == "Create":
locationName = request.POST['locationName']
if locationName == "":
appStatus = "Please choose a valid location name"
result = "Fail"
else:
url = 'http://api.openweathermap.org/data/2.5/weather?q={}&units=metric&appid={}'.format(locationName,
OW_API_KEY)
locationWeather = requests.get(url).json()
if locationWeather['cod'] == 200:
try:
if models.Location.objects.count() == 0:
newLocId = 0
else:
newLocId = models.Location.objects.latest('locID').locID + 1
models.Location.objects.create(locID=newLocId, name=locationWeather['name'],
temperature=locationWeather['main']['temp'],
description=locationWeather['weather'][0]['description'],
icon=locationWeather['weather'][0]['icon'], owner=owner)
oldOrderList = models.Owner.objects.filter(username=request.user).values('orderList')[0]['orderList']
if oldOrderList != "":
newOrderList = oldOrderList + ',' + locationWeather['name']
models.Owner.objects.filter(username=request.user).update(orderList=newOrderList)
except IntegrityError:
appStatus = "Please choose a location name which does not exists in your current set of " \
"locations."
result = "Fail"
elif locationWeather['cod'] == '404' and locationWeather['message'] == 'city not found':
appStatus = "Location could not be found, please make sure that you enter a valid location name."
result = "Fail"
else:
appStatus = "Create operation failed. This could be an issue related with OpenWeatherMap, " \
"please contact with the administrator."
result = "Fail"
elif request.POST["submit"] == "Delete":
locationName = request.POST['locationName']
if locationName == "":
appStatus = "Please choose a valid location name"
result = "Fail"
else:
try:
models.Location.objects.filter(owner=owner).get(name=locationName).delete()
oldOrderList = models.Owner.objects.filter(username=request.user).values('orderList')[0]['orderList']
newOrderList = re.sub(locationName + ',', "", oldOrderList)
if len(oldOrderList) == len(newOrderList):
newOrderList = re.sub(',' + locationName, "", oldOrderList)
models.Owner.objects.filter(username=request.user).update(orderList=newOrderList)
except models.Location.DoesNotExist:
appStatus = "Delete operation failed. Please make sure that location name " \
"exists in current set of Locations"
result = "Fail"
elif request.POST["submit"] == "LocationSort":
orderList = request.POST['orderList']
try:
orderList = json.loads(orderList)
models.Owner.objects.filter(username=request.user).update(orderList=orderList)
except models.Owner.DoesNotExist:
appStatus = "Sorting operation failed. Please make sure that owner " \
"exists in WeatherApp system"
result = "Fail"
elif request.POST["submit"] == "Refresh":
try:
locations = models.Location.objects.filter(owner=owner)
for location in locations:
url = 'http://api.openweathermap.org/data/2.5/weather?q={}&units=metric&appid={}'.format(location.name,
OW_API_KEY)
locationWeather = requests.get(url).json()
if locationWeather['cod'] == 200:
location.temperature = locationWeather['main']['temp']
location.description = locationWeather['weather'][0]['description']
location.icon = locationWeather['weather'][0]['icon']
location.save()
else:
appStatus = "Refresh operation for {} failed. This could be an issue related with OpenWeatherMap, " \
"please contact with the administrator.".format(location.name)
result = "Fail"
break
except models.Location.DoesNotExist:
appStatus = "Refreshing operation failed. Please make sure that user exists" \
"exists in current set of Locations"
result = "Fail"
elif request.POST["submit"] == "Delete All":
try:
models.Location.objects.filter(owner=owner).delete()
models.Owner.objects.filter(username=request.user).update(orderList="")
except models.Location.DoesNotExist:
appStatus = "Deleting all operation failed, no locations seems to exist."
result = "Fail"
if result == "":
result = "Success"
locations = models.Location.objects.filter(owner=owner)
orderList = models.Owner.objects.filter(username=request.user).values('orderList')[0]['orderList']
if orderList != "":
orderList = orderList.split(',')
sortedLocations = []
for locName in orderList:
sortedLocations.append(locations.get(name=locName))
locations = sortedLocations
return responseLocations(result, appStatus, locations)
def signup(request):
"""SignUp page view that signs up new user to the system, according to given information."""
if request.method == 'POST':
username = request.POST['username']
email = request.POST['email']
password = request.POST['password']
try:
user = models.Owner.objects.create_user(username, email, password)
login(request, user)
return redirect('index')
except IntegrityError:
appStatus = "Oops! It seems like this username is taken, please choose another username."
return render(request, 'signup.html', {'status': appStatus})
else:
return render(request, 'signup.html')
def responseLocations(result, statusMsg, locations):
"""Helper function for returning an app request result in JSON HttpResponse"""
locations = serializers.serialize("json", locations)
return HttpResponse(json.dumps({'result': result, 'appStatus': statusMsg,
'locations': locations}), 'text/json')
| ysyesilyurt/WeatherApp | WeatherApp/views.py | views.py | py | 9,163 | python | en | code | 1 | github-code | 36 |
24680745592 | import base64
def e5(m): # base64
s = base64.b64decode(m)
s = s.decode()
return s
def e4(m, k=13): # Caesar shift cipher
m = m.lower()
s = ""
for i in range(len(m)):
s += chr((ord(m[i]) - k - 97) % 26 + 97)
return s
def e2(m, k): # Vigenere cipher
m = m.lower()
k = k.lower()
s = ""
while len(k) < len(m):
k += k
for i in range(len(m)):
s += chr((ord(m[i]) - ord(k[i])) % 26 + 97)
return s
def key_square(k):
k = k.lower()
s = ""
alphabet = "abcdefghiklmnopqrstuvwxyz"
for i in k:
if i not in s:
s += i
for j in k:
if j not in alphabet:
s += j
key_sq = []
for e in range(5):
key_sq.append('')
# Break it into 5*5
key_sq[0] = s[0:5]
key_sq[1] = s[5:10]
key_sq[2] = s[10:15]
key_sq[3] = s[15:20]
key_sq[4] = s[20:25]
return key_sq
def cipher_to_digraphs(cipher):
i = 0
new = []
for x in range(len(cipher) // 2 ):
new.append(cipher[i:i + 2])
i = i + 2
return new
def find_position(key_sq, letter):
for i in range(len(key_sq)):
s = key_sq[i]
if s.find(letter) != -1:
return i, s.find(letter)
def e1(m, k): # Playfair cipher
cipher = cipher_to_digraphs(m)
key_matrix = key_square(k)
plaintext = ""
for e in cipher:
p1, q1 = find_position(key_matrix, e[0])
p2, q2 = find_position(key_matrix, e[1])
if p1 == p2:
if q1 == 4:
q1 = -1
if q2 == 4:
q2 = -1
plaintext += key_matrix[p1][q1 - 1]
plaintext += key_matrix[p1][q2 - 1]
elif q1 == q2:
if p1 == 4:
p1 = -1
if p2 == 4:
p2 = -1
plaintext += key_matrix[p1 - 1][q1]
plaintext += key_matrix[p2 - 1][q2]
else:
plaintext += key_matrix[p1][q2]
plaintext += key_matrix[p2][q1]
return plaintext
m = "d3ZucXN0b2tib2xlamp5ZW5zdnlicGpsa3VhcGx2"
m5 = e5(m)
m4 = e4(m5, 13)
m3 = e4(m4, 20) # Since both are ceaser shift ciphers, same function is called
m2 = e2(m3, 'cryptography')
m1 = e1(m2, 'natdszgrqhebvpmxilfywcuko')
print(m1)
| SudeshGowda/Systems-recruitment-task | Decoder.py | Decoder.py | py | 2,373 | python | en | code | 0 | github-code | 36 |
25049652193 | import numpy as np
import torch
from skimage.metrics import peak_signal_noise_ratio,structural_similarity
import natsort
import cv2
import os
from tqdm import tqdm
def tensor2im(input_image, imtype=np.uint8):
if isinstance(input_image, torch.Tensor):
image_tensor = input_image.data
else:
return input_image
image_numpy = image_tensor[0].cpu().float().numpy()
if image_numpy.shape[0] == 1:
image_numpy = np.tile(image_numpy, (3, 1, 1))
image_numpy = np.clip((np.transpose(image_numpy, (1, 2, 0))), 0, 1) * 255.0
return image_numpy.astype(imtype)
def pil2tensor(im): # in: [PIL Image with 3 channels]. out: [B=1, C=3, H, W] (0, 1)
return torch.Tensor((np.float32(im) / 255).transpose(2, 0 ,1)).unsqueeze(0)
def PSNR_SSIM(GT_path, Pred_Path):
GT_list = natsort.natsorted(os.listdir(GT_path))
Pred_list = natsort.natsorted(os.listdir(Pred_Path))
psnr, ssim = [], []
for GT, Pred in tqdm(zip(GT_list,Pred_list),total=len(GT_list)):
GT = cv2.imread(os.path.join(GT_path,GT))
Pred =cv2.imread(os.path.join(Pred_Path,Pred))
psnr.append(peak_signal_noise_ratio(GT,Pred))
ssim.append(structural_similarity(GT,Pred, channel_axis=2))
print("PSNR : {} SSIM: {}".format(np.average(psnr),np.average(ssim))) | Jintopia/Hint-based-Colorization | utils.py | utils.py | py | 1,302 | python | en | code | 1 | github-code | 36 |
28524161009 | import socket
import pickle
SERVER_ADDR = "192.168.1.100"
PORT = 6000
ADDR = (SERVER_ADDR, PORT)
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
replys = []
def send(reply):
try:
client.send(pickle.dumps(reply))
return pickle.loads(client.recv(4096))
except socket.error as e:
print(e)
def main():
try:
client.connect(ADDR)
connection = True
except socket.error as e:
print(e)
player = int(client.recv(2048).decode())
if player == 0:
print("Rakip Bekleniyor...\nOyuncu Numaran: ", player)
else:
print("Oyuncu Numaran:", player)
run = True
index = 0
while run:
try:
game = send("get")
if not game.ready:
pass
elif game.finish() == 0:
for q in game.question_list:
print(q.get_question())
q.print_options()
reply = input("Cevap: ")
if player == 0:
replys.append(reply)
else:
replys.append(reply)
game = send(replys)
print("Rakip Bekleniyor...")
else:
if game.finish() == 2:
if player == 0:
print(f"Senin Puanın: {game.players_scores[0]}\t\t", end = '')
print(f"Rakibinin Puanı: {game.players_scores[1]}")
print("Rakibin Cevapları".center(50))
for i in range(len(game.player2_replys)):
print(i + 1, game.player2_replys[i], end = ' ')
else:
print(f"Senin Puanın: {game.players_scores[1]}")
print(f"Rakibinin Puanı: {game.players_scores[0]}")
print("Rakibin Cevapları".center(50))
for i in range(len(game.player1_replys)):
print(f"({i + 1}), {game.player1_replys[i]}", end = ' ')
run = False
except:
run = False
main()
| TSC-MSTF/QuizApp | client.py | client.py | py | 2,308 | python | en | code | 0 | github-code | 36 |
7148043819 | a = list(["test.email+alex@leetcode.com","test.e.mail+bob.cathy@leetcode.com","testemail+david@lee.tcode.com"])
res = []
for temp in a:
temp1 = temp.split("@")[0]
temp2 = temp.split("@")[1]
temp1 = "".join(temp1.split("."))
temp1 = temp1[0:temp1.rfind('+',1)]
if temp1+'@'+temp2 not in res:
res.append(temp1+'@'+temp2)
print(len(res))
| ljdongysu/LeetCode | 929/Unique_Email_Addresses.py | Unique_Email_Addresses.py | py | 363 | python | en | code | 0 | github-code | 36 |
228322789 | """
练习2. 定义函数,在列表中找出所有数字
[43,"悟空",True,56,"八戒",87.5,98]
"""
# 适用性
# 函数有一个结果使用return
# 函数有多个结果使用yield
def get_number1(list_number):
result = []
for item in list_number:
if type(item) in (int, float):
result.append(item)
return result
def get_number2(list_number):
for item in list_number:
if type(item) in (int, float):
yield item
list01 = [43, "悟空", True, 56, "八戒", 87.5, 98]
for item in get_number1(list01):
print(item)
for item in get_number2(list01):
print(item)
| testcg/python | code_all/day17/exercise02.py | exercise02.py | py | 635 | python | en | code | 0 | github-code | 36 |
39056231859 | from numpy import genfromtxt,where,zeros,nan,ones
from glob import glob
from obspy.core.util.geodetics import gps2DistAzimuth
from matplotlib import pyplot as plt
from obspy import read
from obspy.core import UTCDateTime
from datetime import timedelta
lonepi=-122.3174
latepi=38.2118
time_epi=UTCDateTime('2014-08-24T10:20:44')
tplot=timedelta(seconds=100)
mul=1.5
pgd=genfromtxt('/Users/dmelgar/Napa2014/PGD/napa_test_nolatency.txt')
path='/Users/dmelgar/Napa2014/GPS/sac/'
lonlat=genfromtxt(u'/Users/dmelgar/Napa2014/unr_coords.txt',usecols=[1,2])
lon=lonlat[:,0]
lat=lonlat[:,1]
stas=genfromtxt(u'/Users/dmelgar/Napa2014/unr_coords.txt',usecols=0,dtype='S')
#Get lsit of files
filesn=glob(path+'*LXN.sac')
filese=glob(path+'*LXE.sac')
#Initalize
d=zeros(len(filese)) #epicentral distances
#Loop and plot
dmin=[]
dmax=0
plt.figure()
f,axarr=plt.subplots(1,2)
axe=axarr[1]
axn=axarr[0]
for k in range(len(filese)):
current_sta=filese[k].split("/")[-1].split(".")[0].upper()
i=where(current_sta==stas)[0]
try:
d,az,baz=gps2DistAzimuth(latepi,lonepi,lat[i],lon[i])
d=d/1000
dmin=min([dmin,d])
dmax=max([dmax,d])
except:
d=nan
#Read data
stn=read(filesn[k])
ste=read(filese[k])
#Trim
stn.trim(starttime=time_epi,endtime=time_epi+tplot,pad=True,fill_value=0)
ste.trim(starttime=time_epi,endtime=time_epi+tplot,pad=True,fill_value=0)
#Self Normalize
stn[0].data=stn[0].data/max([stn[0].data.max(),-stn[0].data.min()])
ste[0].data=ste[0].data/max([ste[0].data.max(),-ste[0].data.min()])
dplot=ones(ste[0].times().shape)*d
#Plot
axn.plot(stn[0].times(),stn[0].data*mul+dplot,'k')
axe.plot(ste[0].times(),ste[0].data*mul+dplot,'k')
axn.set_title('North')
axe.set_title('East')
axn.set_ylim(dmin-5,75)
axe.set_ylim(dmin-5,75)
axn.grid()
axe.grid()
axn.set_xlabel('Seconds after OT')
axe.set_xlabel('Seconds after OT')
axn.set_ylabel('Epicentral distance (km)')
axe.yaxis.set_ticklabels([])
plt.subplots_adjust(left=0.1, bottom=0.1, right=0.9, top=0.9, wspace=0.05, hspace=0)
fig, ax1 = plt.subplots()
ax1.scatter(pgd[:,1],pgd[:,2])
ax1.set_xlabel('Seconds after OT')
ax1.set_xlim(0,100)
ax1.set_ylabel('Mw', color='b')
for tl in ax1.get_yticklabels():
tl.set_color('b')
ax2 = ax1.twinx()
ax2.scatter(pgd[:,1], pgd[:,3],marker='+', c='r')
ax2.set_ylabel('No. stations', color='r')
ax2.set_ylim(0,50)
for tl in ax2.get_yticklabels():
tl.set_color('r')
ax2.set_xlim(0,100)
plt.show()
| Ogweno/mylife | Napa_stuff/plot_PGD.py | plot_PGD.py | py | 2,500 | python | en | code | 0 | github-code | 36 |
15013232508 | ### JORDAN VICENTE-LACHAPELLE /-/ 10-26-23 /-/CTI-110 - P3HW2 - Salary ###
import os
os.system('cls')
# Get employee name from user
Name = input("Enter employee's name: \n ")
# Get number of hours from user
Hours = int(input("Enter number of hours worked: \n "))
# Get pay rate per hour from user
PayRate = float(input("Enter employee's pay rate: \n "))
# Determine if employee worked more than 40 hours
if Hours > 40:
# Calculate OT hours
OTHours = Hours - 40
# Calculate reg hours worked
RegHours = Hours - OTHours
# Calculate pay for reg hours
RegPay = RegHours * PayRate
# Calculate OT pay
OTPay = OTHours * (PayRate * 1.5)
## Calculate gross pay
GrossPay = RegPay + OTPay
# Display name, payrate, reg hours. OT hours, OT pay, gross pay
print("------------------------------------")
print(f"Employee Name: {Name}\n")
print("Hours Worked | Pay Rate | OverTime | OverTime Pay | RegHour Pay | Gross Pay")
print("-------------------------------------------------------------------------------------------")
print(f" {Hours} | {PayRate} | {OTHours} | ${OTPay} | ${RegPay} | ${GrossPay}") | JordanVL1234/CTI-110 | Python/P3HW2_JordanVicenteLachapelle.py | P3HW2_JordanVicenteLachapelle.py | py | 1,239 | python | en | code | 0 | github-code | 36 |
3349395198 |
import paho.mqtt.client as mqtt
import paho.mqtt.publish as publish
import time,os
import datetime
while True:
try:
# The callback for when the client receives a CONNACK response from the server.
def on_connect(client, userdata, flags, rc):
# Subscribing in on_connect() - if we lose the connection and
# reconnect then subscriptions will be renewed.
client.subscribe("iotdevice/oscommand")
# The callback for when a PUBLISH message is received from the server.
def on_message(client, userdata, msg):
mydata=((msg.payload).decode('utf-8'))
p=os.popen(str(mydata))
pp=[]
pp.append(p.read())
print (pp)
publish.single("iotdevice/oscommandout", str(pp[0]), hostname="iot.eclipse.org")
client = mqtt.Client()
client.on_connect = on_connect
client.on_message = on_message
client.connect("iot.eclipse.org", 1883, 60)
client.loop_forever()
except:
pass
| PraveerT/RPI_MDX | Shutdown/shutdown.py | shutdown.py | py | 1,117 | python | en | code | 0 | github-code | 36 |
29656137310 | import time
import tweepy
auth = tweepy.OAuthHandler('KINHgXqoSTS5ReyTnjXSYAA6w', 'ehCnMc37yfAf6PPdmzQMJM7pkUb5HYsnPfZw0vf5m9rxPNEbVm')
auth.set_access_token('1488729367346040833-mQJ2oNZDK0Rj49uLojV9WAYL4oURe0', '8zzRNCJ9sGxcnxJxgVEQkfNC7kWL12Akgpd2gdUt6REo3')
api = tweepy.API(auth)
user = api.me()
# public_tweets = api.home_timeline()
# for tweet in public_tweets:
# print(tweet.text)
def limit_handle(cursor):
try:
while True:
yield cursor.next()
except tweepy.RateLimitError:
time.sleep(1000)
# for follower in limit_handle(tweepy.Cursor(api.followers).items()):
# if follower.name == '':
# follower.follow()
# print(follower.name)
search_item = 'nasa'
numberOfTweets = 10
for tweet in tweepy.Cursor(api.search, search_item).items(numberOfTweets):
try:
tweet.favorite()
print('likey')
except tweepy.TweepError as e:
print(e.reason)
except StopIteration:
break | giochoa/pythontest | twitterbot/tweety.py | tweety.py | py | 978 | python | en | code | 0 | github-code | 36 |
6554339298 | from __future__ import annotations
# IMPORTS
# =======>
# noinspection PyUnresolvedReferences
import typing
import pegen.parser as pegen
# EXPORTS
# =======>
__all__ = [
'memoize',
'memoize_left_rec',
]
# MAIN CONTENT
# ============>
if typing.TYPE_CHECKING:
from pegen.parser import Parser
F = typing.TypeVar("F", bound=typing.Callable[..., typing.Any])
P = typing.TypeVar("P", bound="Parser")
T = typing.TypeVar("T")
def memoize(method: F) -> F:
"""
A wrapper for memoize from pegen.parser that overrides list type
"""
method = pegen.memoize(method)
def wrapper(self: pegen.Parser, *args: typing.Any, **kwargs: typing.Any) -> typing.Any:
result = method(self, *args, **kwargs)
if isinstance(result, list):
return memoize.List(elements=result) # type: ignore
return result
return typing.cast(F, wrapper)
def memoize_left_rec(method: typing.Callable[[P], typing.Optional[T]]) -> typing.Callable[[P], typing.Optional[T]]:
"""
A wrapper for memoize_left_rec from pegen.parser that overrides list type
"""
method = pegen.memoize_left_rec(method)
def wrapper(self: pegen.Parser, *args: typing.Any, **kwargs: typing.Any) -> typing.Any:
result = method(self, *args, **kwargs) # type: ignore
if isinstance(result, list):
return memoize.List(elements=result) # type: ignore
return result
return typing.cast(F, wrapper)
| ButterSus/KiwiPreview | frontend/parser/memoizetools.py | memoizetools.py | py | 1,460 | python | en | code | 0 | github-code | 36 |
3738842637 | import pandas as pd
from bs4 import BeautifulSoup as bs
from splinter import Browser
def init_browser():
executable_path = {"executable_path": "chromedriver.exe"}
return Browser("chrome", **executable_path)
mars_dict = {}
#NASA Mars News
def scrape_mars_news():
try:
browser = init_browser()
news_paragraph_url = "https://mars.nasa.gov/news/"
browser.visit(news_paragraph_url)
news_paragraph_html = browser.html
news_paragraph_soup = bs(news_paragraph_html, "html.parser")
news_title = news_paragraph_soup.find("div", class_="content_title").find("a").text
news_p = news_paragraph_soup.find("div", class_="article_teaser_body").text
mars_dict["news_title"] = news_title
mars_dict["news_p"] = news_p
return mars_dict
finally:
browser.quit()
#JPL Mars Space Images
def scrape_mars_image():
try:
browser = init_browser()
space_images_url = "https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars"
browser.visit(space_images_url)
space_images_html = browser.html
featured_image_soup = bs(space_images_html, "html.parser")
featured_image_link = featured_image_soup.find("article")["style"].replace("background-image: url('", "").replace("');", "")
web_link = "https://www.jpl.nasa.gov"
featured_image_url = web_link + featured_image_link
mars_dict["featured_image_url"] = featured_image_url
return mars_dict
finally:
browser.quit()
#Mars Weather
def scrape_mars_weather():
try:
browser = init_browser()
mars_weather_url = "https://twitter.com/marswxreport?lang=en"
browser.visit(mars_weather_url)
mars_weather_html = browser.html
mars_weather_soup = bs(mars_weather_html, "html.parser")
mars_weather_tweets = mars_weather_soup.find_all("div", class_="js-tweet-text-container")
for each_tweet in mars_weather_tweets:
tweet_text = each_tweet.find("p").text
if "pic.twitter.com" not in tweet_text:
mars_weather = each_tweet.find("p").text
break
else:
pass
mars_dict["mars_weather"] = mars_weather
return mars_dict
finally:
browser.quit()
#Mars Facts
def scrape_mars_facts():
try:
mars_facts_url = "http://space-facts.com/mars/"
mars_facts_df = pd.read_html(mars_facts_url)[0]
mars_facts_df.columns = ["description", "value"]
mars_facts_df.set_index("description", inplace=True)
mars_facts_html = mars_facts_df.to_html()
mars_dict["mars_facts"] = mars_facts_html
return mars_dict
except:
print("error")
#Mars Hemispheres
def scrape_mars_hemispheres():
try:
browser = init_browser()
mars_hemispheres_link = "https://astrogeology.usgs.gov"
mars_hemispheres_url = "https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars"
browser.visit(mars_hemispheres_url)
mars_hemispheres_html = browser.html
mars_hemispheres_soup = bs(mars_hemispheres_html, "html.parser")
hemisphere_image_urls = []
mars_hemispheres_list = mars_hemispheres_soup.find_all("div", class_="item")
for each_hemisphere in mars_hemispheres_list:
title = each_hemisphere.find("h3").text
mars_hemispheres_image_link = each_hemisphere.find("a", class_="itemLink product-item")["href"]
mars_hemispheres_download_url = mars_hemispheres_link + mars_hemispheres_image_link
browser.visit(mars_hemispheres_download_url)
mars_hemispheres_download_html = browser.html
mars_hemispheres_download_soup = bs(mars_hemispheres_download_html, "html.parser")
mars_hemispheres_full_image_link = mars_hemispheres_download_soup.find("img", class_="wide-image")["src"]
mars_hemispheres_image_url = mars_hemispheres_link + mars_hemispheres_full_image_link
hemisphere_image_urls.append({"title" : title, "img_url" : mars_hemispheres_image_url})
mars_dict["hemisphere_image_urls"] = hemisphere_image_urls
return mars_dict
finally:
browser.quit()
#Scrape mars info
def scrape_mars_info():
try:
scrape_mars_news()
scrape_mars_image()
scrape_mars_weather()
scrape_mars_facts()
scrape_mars_hemispheres()
except:
print("error") | williamsit/Homework | Mission_To_Mars/scrape_mars.py | scrape_mars.py | py | 4,587 | python | en | code | 0 | github-code | 36 |
37489105113 | import struct
import utils
from random import randint
from binascii import hexlify
from abci import ABCIServer
from abci import BaseApplication
from abci import ResponseInfo
from abci import ResponseQuery
from abci import ResponseInitChain
from abci import ResponseCheckTx
from abci import ResponseDeliverTx
from abci import ResponseCommit
from abci import CodeTypeOk
from abci.types_pb2 import ResponseEndBlock
from abci.types_pb2 import ResponseBeginBlock
class SimpleCoin(BaseApplication):
"""
Simple cryptocurrency implementation, based on the state model.
Can do two things: sending coins and storing small pices of data
in the blockchain.
"""
def info(self, req):
"""Called by ABCI when the app first starts."""
self.conf = utils.read_conf()
self.db = utils.DatabaseProvider(conf=self.conf)
r = ResponseInfo()
r.last_block_height = self.db.get_block_height()
r.last_block_app_hash = self.db.get_block_app_hash().encode()
return r
def init_chain(self, v):
"""Set initial state on first run"""
for address, balance in self.conf['genesis']['lucky_bois'].items():
self.db.update_state(
address=address,
genesis_balance=balance,
genesis=True
)
self.db.set_block_height(0)
self.db.set_block_app_hash('')
return ResponseInitChain()
def check_tx(self, raw_tx):
"""Validate the Tx before entry into the mempool"""
try: # Check txn syntax
tx = utils.Transaction(raw_tx)
except Exception:
return Result.error(log='txn syntax invalid')
# Check "sender" account has enough coins
if int(self.db.get_address_info(tx.sender)['balance']) < tx.amount:
return ResponseCheckTx(log='insufficient funds', code=1)
if tx.signature_invalid: # Check txn signature
return ResponseCheckTx(log='signature invalid', code=1)
if tx.timestamp_invalid: # Check timestamp for a big delay
return ResponseCheckTx(log='lag time is more than 2 hours', code=1)
# Hooray!
return ResponseCheckTx(code=CodeTypeOk)
def deliver_tx(self, raw_tx):
""" Mutate state if valid Tx """
try: # Handle unvalid txn
tx = utils.Transaction(raw_tx)
except Exception:
return ResponseDeliverTx(log='txn syntax invalid', code=1)
self.new_block_txs.append(tx)
self.db.update_state(tx=tx)
return ResponseDeliverTx(code=CodeTypeOk)
def query(self, reqQuery):
"""Return the last tx count"""
if reqQuery.path == 'balance':
address = reqQuery.data.decode('utf-8')
address_balance = self.db.get_address_info(address)['balance']
rq = ResponseQuery(
code=CodeTypeOk,
key=b'balance',
value=utils.encode_number(int(address_balance))
)
return rq
def begin_block(self, reqBeginBlock):
"""Called to process a block"""
self.new_block_txs = []
return ResponseBeginBlock()
def end_block(self, height):
"""Called at the end of processing. If this is a stateful application
you can use the height from here to record the last_block_height"""
self.db.set_block_height(increment=True)
if self.new_block_txs: # Change app hash only if there any new txns
self.db.set_block_app_hash(utils.get_merkle_root(self.new_block_txs))
return ResponseEndBlock()
def commit(self):
"""Return the current encode state value to tendermint"""
h = self.db.get_block_app_hash().encode()
return ResponseCommit(data=h)
if __name__ == '__main__':
app = ABCIServer(app=SimpleCoin(), port=26658)
app.run()
| SoftblocksCo/Simple_coin | application.py | application.py | py | 3,914 | python | en | code | 9 | github-code | 36 |
14059339607 | from utils import WordEmbeddingUtil, TextUtil
from config import Config
import numpy as np
import torch
word2vec_util = None
text_cnn_model = torch.load('../pretrained/text_cnn_static.h5')
def static_text_cnn_word2vec_predict(sentence):
global word2vec_util, text_cnn_model
if word2vec_util is None:
word2vec_util = WordEmbeddingUtil()
text_util = TextUtil()
row = text_util.text_normalization(sentence)
words = text_util.lemmatize_sentence(row)
words = text_util.filter_punctuation(words)
words = text_util.filter_stop_word(words)
words = text_util.get_words_with_len(words)
words_matrix = np.zeros([Config.SENTENCE_MAX_LEN, Config.EMBEDDING_SIZE], dtype=np.float32)
for idx, word in enumerate(words):
words_matrix[idx] = word2vec_util.get_word2vec_vec(word)
text_cnn_model.eval()
words_matrix_tensor = torch.Tensor(words_matrix)
words_matrix_tensor = torch.unsqueeze(words_matrix_tensor, 0)
predict = text_cnn_model(words_matrix_tensor)
result = predict.item()
return result
if __name__ == '__main__':
print(static_text_cnn_word2vec_predict("hello world"))
| miyazawatomoka/QIQC | script/predict.py | predict.py | py | 1,148 | python | en | code | 0 | github-code | 36 |
7822082403 | # 풀이 중도 포기 (2/1 이어서 시도)
from collections import deque
from sys import stdin
input = stdin.readline
def dfs(h, w):
queue = deque([h, w])
visited[h, w] = True
for i, j in li[h]:
if not visited[j]:
pass
h, w = map(int, input().split())
li = []
res = 0
max = 0
# 육지 바다 정보 입력
for _ in range(h):
li.append(list(map(str, input().split())))
for i in range(h):
for j in range(w):
if li[i][j] == 'L': #육지라면 bfs 탐색 돌림
visited = [[False]*w]*h
res = bfs(i, j)
if res > max:
max = res
print(res)
| Drizzle03/baekjoon_coding | 20230131/2589_Backtracking.py | 2589_Backtracking.py | py | 646 | python | ko | code | 0 | github-code | 36 |
22123090899 | from conf import * # Это для моего пользованяи можете удалить
import os
TOKEN = TOKEN # Токен бота
WEBHOOK_HOST = WEBHOOK_HOST #Хостинг для вебхуков
WEBHOOK_PATH = f'/webhook/{TOKEN}'
WEBHOOK_URL = f'{WEBHOOK_HOST}{WEBHOOK_PATH}'
WEBAPP_HOST = '0.0.0.0'
WEBAPP_PORT = 5000
pat_home = os.getcwd()
| Colobok2002/Profkom-bot | CONFIG.py | CONFIG.py | py | 366 | python | ru | code | 0 | github-code | 36 |
18694607794 | # -*- coding: utf-8 -*-
"""
Functions to interact with the realsense recordings for HPPD project
"""
#%% imports
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import cv2
import pyrealsense2 as rs
import mediapipe
import sys
import keyboard
import os
import csv
import datetime
import time
import tqdm
import logging
from . import utils
#%% functions
def getInfoTopicTable(fileCompleteName):
'''
Returns the frequency and the number of frames in a test by means of the
functions of bagpy, consequently creates a folder in same directory of the
bag file analyzed
Counts the number of frames in the test loading the bagfile, accessing to
the topics of image data and getting the value of Message Count
Gets the frequency of execution loading the bagfile, accessing to the topics
of image data and getting the value of Frequency
Parameters
----------
fileCompleteName : .bag file
from realsense recording
Returns
-------
frequency : int
NB: the returned value is an int, the frequencies of acquisition of the two
channels may differ and are slightly lower than the nominal value
numberOfFrames : int
NB: the returned value is an estimation of the number of paired frames
Since the two streams are not paired (the pairing is done with rs.playback)
the number of frames for the color and depth images can be different and not
equal to the number of paired frames that are obtained executing a playback.
'''
# reads the bag file
b = bagpy.bagreader(fileCompleteName)
# extracts the topic table
topicTable = b.topic_table
# from the topic_table creates a new pandas dataframe with the two topics
interestingTopics = topicTable.loc[ \
(topicTable['Topics'] == '/device_0/sensor_0/Depth_0/image/data') | \
(topicTable['Topics'] == '/device_0/sensor_1/Color_0/image/data') ]
# from the new dataframe, extracts the value
frequency = np.ceil(interestingTopics.loc[:,"Frequency"].mean())
numberOfFrames = interestingTopics.loc[:,"Message Count"].max()
return frequency, numberOfFrames
def getDataFromIndex(fileCompleteName, index):
'''
Given a bag file and the index, returns:
- time stamp
- rgb image
- depth image
at the given index
To do so, a playback of the file is executed. Consequently, the highest the
index, the slowest is the function
Parameters
----------
fileCompleteName : bag file from realsense recording
contains the data of rgb and depth images
index : int
index of the data that are required
Returns
-------
timestamp_s : int
timestamp corresponding to the recording of the file
to print the corresponding date:
>>> print(datetime.datetime.fromtimestamp(timestamp_s).strftime('%Y-%m-%d %H:%M:%S.%f'))
color_image_rgb : matrix w*h*3
Contains the rgb channel values of every pixel
depth_image : matrix w*h*1
Contains the depth value of every pixel
'''
if not fileCompleteName[-4:] == '.bag':
fileCompleteName = fileCompleteName + '.bag'
# =============================================================================
# START THE STREAM OF THE PIPELINE
# =============================================================================
pipeline = rs.pipeline()
config = rs.config()
rs.config.enable_device_from_file(config, fileCompleteName, repeat_playback = False)
profile = pipeline.start(config)
device = profile.get_device()
playback = device.as_playback()
playback.set_real_time(False)
colorizer = rs.colorizer()
colorizer.set_option(rs.option.color_scheme, 1) # jet
aligned_stream = rs.align(rs.stream.color) # alignment depth -> color
# =============================================================================
# INITIALIZATION
# =============================================================================
# so at the first executuion becomes 0
frameCounter = -1
try:
while frameCounter <= index:
try:
frame = pipeline.wait_for_frames()
except:
break
# =============================================================================
# DEBUGGING
# =============================================================================
frameCounter = frameCounter + 1
# =============================================================================
# GET THE REQUIRED DATA FROM THE BAG FILE
# =============================================================================
# alignement of the frames: the obtained resolution is the one of the rgb image
frame = aligned_stream.process(frame)
# get the depth and color frames
depth_frame = frame.get_depth_frame()
color_frame = frame.get_color_frame()
# get the timestamp in seconds
timestamp_s = frame.get_timestamp()/1000
# print(datetime.datetime.fromtimestamp(timestamp_s).strftime('%Y-%m-%d %H:%M:%S.%f'))
# from frames to images
# the image saved in the bag file is in rgb format,
# the one required from mediapipe as well
color_image_rgb = np.asanyarray(color_frame.get_data())
depth_image = np.asanyarray(depth_frame.get_data())
finally:
# =============================================================================
# OTHER OPERATIONS
# =============================================================================
# stop the pipeline
pipeline.stop()
# close all the windows
cv2.destroyAllWindows()
return timestamp_s, color_image_rgb, depth_image
def loadTopic(bagreaderElement, topicName, printLoadingTime):
"""
Uses the functions of the library bagpy to extract topics from the bag file
For every topic, a csv file is generated and then loaded
Parameters
----------
bagreaderElement : return of the bagreader function
example: b = bagreader(bagFileCompletePath)
topicName : String
The name of the topic that wants to be loaded
printLoadingTime : Boolean
If True, the elapsed time to load the topic is printed
Returns
-------
A pandas dataframe corresponding to the topic
"""
if printLoadingTime:
start_time = time.time()
# creates a csv file and returns its location
message = bagreaderElement.message_by_topic(topic = topicName)
if printLoadingTime:
time_elapsed = time.time() - start_time
logging.info('Time elapsed: {:.2f} [s]'.format(time_elapsed))
# loads the csv file previously generated
dataframe = pd.read_csv(message)
if printLoadingTime:
time_elapsed = time.time() - start_time
logging.info('Time elapsed: {:.2f} [s]'.format(time_elapsed))
return dataframe
def createTimesDataFrame(metaDataframe, freq, rgb_depth):
"""
The metadata table contains 24 (21) lines for every acquired frame of the
depth (rgb) channel;
In both tables, among the other values, different times are expressed:
- index_time
- system_time
- Time of Arrival
- Backend TimeStamp
New dataframe is created, contains the four times already present and the
nominal time (the theorical one, if the acquision would work perfectly,
taking into account the length of the others)
Parameters
----------
metaDataframe : pandas dataframe of metadata
Can come from depth or rgb channel
freq : int
Frequency of acquisition of the frames
rgb_depth : string
Declares if the metadata dataframe is from depth or rgb
Returns
-------
time_df : pandas dataframe containing 5 columns
'index time';
'system time';
'arrival time';
'backend time';
'nominal time'.
global_system_time : a pandas dataframe containing 1 column
"""
# renaming for shorter handling
df = metaDataframe
# recognition if it's an rgb or a depth dataframe
if rgb_depth == 'rgb':
# how many rows for each frame
skipRows = 21
# index of the first element related to that magnitude on the table
system_time_row = 0
time_of_arrival_row = 6
backend_timestamp_row = 7
elif rgb_depth == 'depth' or rgb_depth == 'stereo' or rgb_depth == '3d':
# how many rows for each frame
skipRows = 24
# index of the first element related to that magnitude on the table
system_time_row = 0
time_of_arrival_row = 8
backend_timestamp_row = 9
else:
logging.error('not recognized dataframe')
return None
# obtaining the shape of the dataframe
(rows, columns) = df.shape
# extracting the lines from the data frames
index_time = df.iloc[np.arange(0, rows, skipRows), 0]
global_system_time = df.iloc[np.arange(system_time_row, rows, skipRows), 2].astype(float)
time_of_arrival = df.iloc[np.arange(time_of_arrival_row, rows, skipRows), 2].astype(float)
backend_timestamp = df.iloc[np.arange(backend_timestamp_row, rows, skipRows), 2].astype(float)
# some arrays are giving absolute time
system_time = (global_system_time - global_system_time.iloc[0])
time_of_arrival = (time_of_arrival - time_of_arrival.iloc[0])
backend_timestamp = (backend_timestamp - backend_timestamp.iloc[0])
# converting to numpy array
index_time_array = index_time.to_numpy()
global_system_time_array = global_system_time.to_numpy()
system_time_array = system_time.to_numpy()
time_of_arrival_array = time_of_arrival.to_numpy()
backend_timestamp_array = backend_timestamp.to_numpy()
# creating also the nominal time array
nominal_time_array = np.arange(0, len(index_time_array)*1/freq, 1/freq)
# since different precisions on len()*1/freq and np.arange is different,
# an element can be added, double check the array
nominal_time_array = nominal_time_array[0 : len(index_time_array)]
# explication of different precisions: try the code below
# print(len(index_time_array) * 1/depth_freq)
# print(nominal_time_array[-5:])
# conversion of every array from s to ms
index_time_array = index_time_array * 1000
#system_time_array # is alreay in ms
#time_of_arrival_array # is alreay in ms
#backend_timestamp_array # is alreay in ms
nominal_time_array = nominal_time_array * 1000
# creating a dataframe
d = {'index time': index_time_array, \
'system time': system_time_array, \
'arrival time': time_of_arrival_array, \
'backend time': backend_timestamp_array, \
'nominal time': nominal_time_array}
time_df = pd.DataFrame(data=d)
#display(time_df)
# check the types
#dataTypeSeries = time_df.dtypes
#print(dataTypeSeries)
d = {'global system time': global_system_time_array}
global_system_time = pd.DataFrame(data=d)
return time_df, global_system_time
def plotTiming(timeDataframe, freq, title, essentialPlots):
"""
Creates 4 subplots displaying timing information
Upper left: time elapsed at the acquisition of every frame with respect to
the start of the acquisition
Upper right: time elapsed between each couple of frames
Lower left: drift with respect to the nominal time (the final value is the
delay with respect to the theorically perfect recording)
Lower Right: Histogram of the time elapsed between each couple of frames
Parameters
----------
timeDataframe : pandas dataframe containing the timing information
use the one returned from "createTimesDataFrame"
freq : int
Frequency of acquisition of the frames
rgb_depth : string
Declares if the metadata dataframe is from depth or rgb
essentialPlot : bool
If True, only 'system time' is plotted
Returns
-------
None.
"""
fig, axes = plt.subplots(nrows=2, ncols=2)
fig.suptitle(title, fontsize=16)
# renaming for shorter handling
if essentialPlots: # only system time is considered
df = timeDataframe[['system time', 'nominal time']]
else:
df = timeDataframe
# obtaining the shape of the dataframe
(rows, columns) = df.shape
# elapsed time
this_ax = axes[0,0]
df.plot(ax = this_ax, style = '.-')
this_ax.grid()
this_ax.set_xlabel("frame number")
this_ax.set_ylabel("[ms]")
this_ax.set_title("elapsed time to acquire each frame")
# time difference
this_ax = axes[0,1]
df.diff().plot(ax = this_ax, style = '.-')
this_ax.grid()
this_ax.set_xlabel("frame number")
this_ax.set_ylabel("[ms]")
this_ax.set_title("dt between each frame and previous one")
# distribution of time difference (gaussian hopefully)
this_ax = axes[1,1]
# solution 1: doesn't plot nominal time and resizes automatically
df.diff().loc[:,df.diff().columns != 'nominal time'].plot.hist(bins = 30, ax = this_ax, alpha = 0.5)
# solution 2: plots also nominal time but doesn't resize automatically
# plot = df.diff().plot(kind = 'density', ax = this_ax)
# this_ax.set_ylim(-0.1, 1.5)
# to give a reference with the nominal time
if freq != 0:
this_ax.axvline(1/freq*1000, label = 'nominal', color = 'C4')
this_ax.grid()
this_ax.set_xlabel("[ms]")
this_ax.set_ylabel("frequency")
# if freq != 0:
# this_ax.set_xlim(1/freq*0.7*1000, 1/freq*1.3*1000)
this_ax.set_title("time distribution")
this_ax.legend()
if freq != 0:
# new dataframe containing the difference with the nominal time
# creating an empty data frame
tmp_df = pd.DataFrame()
# getting the names of the columns from the previous database
columnNames = df.columns.values.tolist()
for column in range(0,columns):
# computing the difference, storing it in tmp
tmp = df.iloc[:,column] - df['nominal time']
# adding the tmp column to the dataframe
tmp_df[columnNames[column]] = tmp
else:
# new dataframe containing the difference between each couple
# creating an empty data frame
tmp_df = pd.DataFrame()
# getting the names of the columns from the previous database
columnNames = df.columns.values.tolist()
for i in range(columns): # for every column
for j in range(i, columns): # from i to the max number to avoid rep
if i != j: # to avoid the difference between two same array
tmp = df.iloc[:,i] - df.iloc[:,j]
tmp_df[str(columnNames[i] + ' - ' + columnNames[j])] = tmp
df = tmp_df
this_ax = axes[1,0]
df.plot(ax = this_ax, style = '.-')
this_ax.grid()
this_ax.set_xlabel("frame number")
this_ax.set_ylabel("[ms]")
this_ax.set_title("drift with respect to nominal time")
# plt.show(block=False)
# plt.pause(0.1)
def infoTiming(timeDataFrame, columnName, freq):
"""
Given a time dataframe containing a column called as specified in
columnName, for this application, the most reliable is "system time",
returns a dictionary containing information regarding the timing execution:
- 'freq th',
- 'mean freq real',
- 'std dev freq real',
- 'time stamp th [ms]',
- 'mean time stamp real [ms]',
- 'std dev time stamp real [ms]',
- 'elapsed time real [ms]',
- 'number of samples real',
- 'elapsed time th [ms]', (to acquire a number of samples equal to
number_of_samples_real, the theorical required time should be)
- 'number of samples th' {in the elapsed_time_real should have been acquired
a number of samples equal to:}
Parameters
----------
timeDataFrame : pandas dataframe
Usually system time is the most reliable one
columnName : string
Name of the column that wants to be analyzed, usually system time
freq : int
Theorical frequency of acquisition
Returns
-------
d : dictionary
Contains all timing parameters characterizing the test
"""
# renaming the dataframe for a better handling
df = timeDataFrame
(rows, columns) = df.shape
# comparison of frequencies
freq_th = float(freq)
# the number of time stamps is equal to the number of elements - 1
mean_freq_real = float((rows-1)/df[columnName].iloc[-1]*1000) #freq in Hz
std_freq_real = float(np.nanstd(1/df[columnName].diff()) * 1000) #freq in Hz
# comparison of time stamps
time_stamp_theorical = 1/freq * 1000 # from s to ms
mean_time_stamp_real = float(np.nanmean(df[columnName].diff()))
std_time_stamp_real = float(np.nanstd(df[columnName].diff()))
# comparison of elapsed time and number of samples
elapsed_time_real = float(df[columnName].iloc[-1])
number_of_samples_real = float(rows)
# to acquire a number of samples equal to number_of_samples_real,
# the theorical required time should be:
elapsed_time_theorical = number_of_samples_real / freq * 1000 # from s to ms
# in the elapsed_time_real should have been acquired a number of samples equal to:
number_of_samples_theorical = float(np.floor(elapsed_time_real/1000 * freq))
# creating the dictionary
d = {'freq th': freq_th, \
'mean freq real': mean_freq_real, \
'std dev freq real' : std_freq_real, \
'time stamp th [ms]': time_stamp_theorical, \
'mean time stamp real [ms]': mean_time_stamp_real, \
'std dev time stamp real [ms]' : std_time_stamp_real, \
'elapsed time real [ms]': elapsed_time_real, \
'number of samples real': number_of_samples_real, \
'elapsed time th [ms]': elapsed_time_theorical, \
'number of samples th' : number_of_samples_theorical}
return d
# def compareTiming(arrayOfTimes,arrayNames, *title):
# # creating the dataframe with the given arrays
# df = pd.DataFrame(arrayOfTimes).T
# # for the tile title
# if title:
# pass
# else:
# title = "comparison"
# # for the labels
# if arrayNames:
# df.columns = arrayNames
# # calling the plotTiming function with frequency = 0
# freq = 0
# plotTiming(df, freq, title, essentialPlots = False)
def logBagFile(bagFileCompletePath, depth_freq, rgb_freq, printLoadingTime, \
showPlots, essentialPlots, showTimingTable):
"""
Given a bag file, loads the metadata files regarding the rgb and the depth
channel and plots figures to show the timing execution
Parameters
----------
bagFileCompletePath : String
path to the bag file
depth_freq : Int
Frequency of acquisition of the depth channel
rgb_freq : Int
Frequency of acquisition of the rgb channel
printLoadingTime : Bool
If True, the elapsed time to load the topic is printed
It's passed to the function loadTopic
showPlots : Bool
If True, shows the plots regarding the timing execution.
It's a flag in this function
essentialPlots : Bool
If True, only system time is plotted,
It's passed to the function plotTiming
showTimingTable : Bool
If True, from the two dictionaries containing the timing information
(the one that are also returned), creates a pandas dataframe and prints it
Returns
-------
dictDEP : dictionary
Contains all parameters characterizing the test of the depth channel
dictRGB : dictionary
Contains all parameters characterizing the test of the rgb channel
df_depth_time:
df_rgb_time:
global_depth_time:
global_rgb_time:
"""
# to get the name of the file
path, fileName = os.path.split(bagFileCompletePath)
logging.info('Loading information on the file: ' + fileName)
# creates the bagreader element
b = bagpy.bagreader(bagFileCompletePath)
# loading the metadata topics (the data topics are too heavy)
df_depth_meta = loadTopic(b, '/device_0/sensor_0/Depth_0/image/metadata', printLoadingTime)
df_rgb_meta = loadTopic(b, '/device_0/sensor_1/Color_0/image/metadata', printLoadingTime)
df_depth_time, global_depth_time = createTimesDataFrame(df_depth_meta, depth_freq, 'depth')
df_rgb_time, global_rgb_time = createTimesDataFrame(df_rgb_meta, rgb_freq, 'rgb')
if showPlots:
plotTiming(df_depth_time, depth_freq, (fileName + ' - DEPTH'), essentialPlots)
plotTiming(df_rgb_time, rgb_freq, (fileName + ' - RGB'), essentialPlots)
dictDEP = infoTiming(df_depth_time, 'system time', depth_freq)
dictRGB = infoTiming(df_rgb_time, 'system time', rgb_freq)
if showTimingTable:
results = pd.DataFrame({'depth':pd.Series(dictDEP),'rgb':pd.Series(dictRGB)})
print(results)
return dictDEP, dictRGB, df_depth_time, df_rgb_time, global_depth_time, global_rgb_time
def getTimeStampArray(bagFileCompleteName, printInfo = False):
"""
Executes a playback of the whole test to get the time stamp array
Parameters
----------
bagFileCompleteName : String
directory to the bag file
printInfo : bool, optional
Set true if you want to print the timeframe stored at each iteration.
The default is False.
Returns
-------
time_stamp_array : float64 array
array containing the corresponding ms of acquisition of each frame
"""
pipeline = rs.pipeline()
config = rs.config()
rs.config.enable_device_from_file(config, bagFileCompleteName, repeat_playback = False)
profile = pipeline.start(config)
device = profile.get_device()
playback = device.as_playback()
playback.set_real_time(False)
# initialize the array
time_stamp_array = []
try:
while True:
try:
frames = pipeline.wait_for_frames()
except:
break
tmp = frames.get_timestamp()
if printInfo:
print(datetime.datetime.fromtimestamp(tmp/1000).strftime('%Y-%m-%d %H:%M:%S.%f'))
time_stamp_array = np.append(time_stamp_array, tmp)
finally:
pipeline.stop()
if printInfo:
print('all the frames were analyzed')
return time_stamp_array
def extractAviVideosFromBag(fileCompleteName, outputDir, frequency = 60, numberOfFrames = 20000, color = True, depth_splitted = True, depth_colorized = True, textOnImage = True):
'''
Saves in the specified folder a folder with the name of the test.
The subfolder contains a csv file with the timestamp of each paired frame and
two avi videos: COL and DEP channel.
For the COL video, it's simply the extraction of the rgb channel
For the DEPcolored video, it's a rendering of the depth info through a colormap
For the DEP video, a conversion of the 16 bit depth information is done in
the 3 channels where the avi video is saved:
***
# CREATE DEPTH IMAGE through conversion
dep_image_height, dep_image_width = depth_image.shape
zerosbit = np.zeros([dep_image_height, dep_image_width], dtype = np.uint8) # 480,848...
# less significan bits are the rest of the division for 256
lsb = (depth_image % 256).astype(np.uint8)
# most significan bits are the division for 256 without rest
msb = (depth_image / 256).astype(np.uint8)
depth_image_3ch = cv2.merge([zerosbit, msb, lsb])
***
When using this function, keep in mind that the avi video is a compression
of the information that each frame has
Parameters
----------
fileCompleteName : .bag file
.bag file containing the rgb/bgr frames, the depth frames and the time array
outputDir : string
directory where the files will be saved
frequency : int, optional
nominal frequency of recording, frequency for the video saved in .avi
The default is 60.
numberOfFrames : int, optional
attended number of frames in the recording. The extractor will do
numberOfFrames iterations, or, if the extraction is complete,
will stop earlier. Better put a larger number than the actual one.
Useful to print the loading bar.
The default is 20000.
textOnImage : bool, optional
set true if you want to add the timing information on the images.
The default is True.
Returns
-------
time_exec_array: array
contains information about the execution of the extraction
'''
if textOnImage:
# =============================================================================
# WRITE ON THE IMAGE PARAMS
# =============================================================================
font = cv2.FONT_HERSHEY_SIMPLEX
origin = (20, 20)
fontScale = 0.8
color = (255, 255, 255)
thickness = 1
# check extension of the file
fileCompleteName = utils.checkExtension(fileCompleteName, '.bag')
# get only the file name excluding ".bag"
fileName = os.path.split(fileCompleteName)[1][:-4]
# in order to give a unique name to the execution
thisExecutionDate = datetime.datetime.fromtimestamp(int(time.time())).strftime('%Y%m%d%H%M%S')
# create folder for the given execution of the given file
outputFileDir = os.path.join(outputDir, fileName + '-' + thisExecutionDate)
# create the folder if it doesn't exist
os.makedirs(outputFileDir, exist_ok=True)
# create the complete directory to the 3 different outputs
if color:
videoRGBCompleteName = os.path.join(outputFileDir, fileName + '-color.avi')
if depth_splitted:
videoDEPCompleteName = os.path.join(outputFileDir, fileName + '-depth splitted.avi')
if depth_colorized:
videoDEPcolorizedCompleteName = os.path.join(outputFileDir, fileName + '-depth colorized.avi')
timeCSVCompleteName = os.path.join(outputFileDir, fileName + '-timestamp.csv')
logging.info('working on: ' + fileName)
# =============================================================================
# # sometimes the function to load the bag file gets stuck, better avoid this
# # get the number of frames
# frequency, numberOfFrames = getInfoTopicTable(fileCompleteName)
# # since the method getInfoTopicTable gives an estimation of the number
# # of frames, it's better to increase this value. Executing the for loop and
# # catching the exception won't give any problem
# numberOfFrames = int(numberOfFrames * 1.2)
# =============================================================================
# =============================================================================
# START THE STREAM OF THE PIPELINE
# =============================================================================
pipeline = rs.pipeline()
config = rs.config()
rs.config.enable_device_from_file(config, fileCompleteName, repeat_playback = False)
profile = pipeline.start(config)
device = profile.get_device()
playback = device.as_playback()
playback.set_real_time(False)
colorizer = rs.colorizer()
colorizer.set_option(rs.option.color_scheme, 1) # jet
aligned_stream = rs.align(rs.stream.color) # alignment depth -> color
# =============================================================================
# INITIALIZATION
# =============================================================================
# so at the first executuion becomes 0
frameCounter = -1
# to save the timing execution of each loop (debug)
time_exec_array = [0] * numberOfFrames
# to save the starting of the execution
startTime = time.time()
# at each iteration add a new row containing landMarkArray and timestamp_s
timestamp_array = [0] * numberOfFrames
try:
for i in tqdm.tqdm(range(numberOfFrames)):
try:
frame = pipeline.wait_for_frames()
except:
break
# =============================================================================
# DEBUGGING
# =============================================================================
frameCounter = frameCounter + 1
# time frame on the execution of the loop
now = time.time()
# time_exec_array = np.append(time_exec_array, now-startTime)
time_exec_array[frameCounter] = now-startTime
# =============================================================================
# GET THE REQUIRED DATA FROM THE BAG FILE
# =============================================================================
# alignement of the frames: the obtained resolution is the one of the rgb image
frame = aligned_stream.process(frame)
# get the depth and color frames
depth_frame = frame.get_depth_frame()
color_frame = frame.get_color_frame()
# get the timestamp in seconds
timestamp_s = frame.get_timestamp()/1000
# print(datetime.datetime.fromtimestamp(timestamp_s).strftime('%Y-%m-%d %H:%M:%S.%f'))
# from frames to images
# the image saved in the bag file is in rgb format,
# the one required from mediapipe as well
color_image_rgb = np.asanyarray(color_frame.get_data())
depth_image = np.asanyarray(depth_frame.get_data())
depth_image_colorized = np.asanyarray(colorizer.colorize(depth_frame).get_data())
# CREATE COLOR IMAGE
# cv2 displays images in bgr
color_image_bgr = cv2.cvtColor(color_image_rgb, cv2.COLOR_BGR2RGB)
# CREATE DEPTH IMAGE through conversion
dep_image_height, dep_image_width = depth_image.shape
zerosbit = np.zeros([dep_image_height, dep_image_width], dtype = np.uint8) # 480,848...
# less significan bits are the rest of the division for 256
lsb = (depth_image % 256).astype(np.uint8)
# most significan bits are the division for 256 without rest
msb = (depth_image / 256).astype(np.uint8)
depth_image_3ch = cv2.merge([zerosbit, msb, lsb])
# CREATE DEPTH IMAGE COLORIZED through colorizer
depth_image_colorized = np.asanyarray(colorizer.colorize(depth_frame).get_data())
if textOnImage:
stringForImage = 'frame: {:05d} - '.format(frameCounter) + \
datetime.datetime.fromtimestamp(timestamp_s).strftime('%Y-%m-%d %H:%M:%S.%f')
# puts text on the image
if color:
color_image_bgr = cv2.putText(color_image_bgr, stringForImage, origin, font, fontScale, color, thickness, cv2.LINE_AA)
if depth_splitted:
depth_image_3ch = cv2.putText(depth_image_3ch, stringForImage, origin, font, fontScale, color, thickness, cv2.LINE_AA)
if depth_colorized:
depth_image_colorized = cv2.putText(depth_image_colorized, stringForImage, origin, font, fontScale, color, thickness, cv2.LINE_AA)
if frameCounter == 0:
# create the folder if it doesn't exist
os.makedirs(os.path.split(videoRGBCompleteName)[0], exist_ok=True)
if color:
# initialize the video saver for BGR
image_height, image_width, _ = color_image_bgr.shape
videoOutCol = cv2.VideoWriter(videoRGBCompleteName, cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), frequency, (image_width, image_height))
if depth_splitted:
# initialize the video saver for DEP
image_height, image_width, _ = depth_image_3ch.shape
videoOutDep = cv2.VideoWriter(videoDEPCompleteName, cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), frequency, (image_width, image_height))
if depth_colorized:
# initialize the video saver for DEP colorized
image_height, image_width, _ = depth_image_colorized.shape
videoOutDepCol = cv2.VideoWriter(videoDEPcolorizedCompleteName, cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), frequency, (image_width, image_height))
if color:
videoOutCol.write(color_image_bgr)
if depth_splitted:
videoOutDep.write(depth_image_3ch)
if depth_colorized:
videoOutDepCol.write(depth_image_colorized)
timestamp_array[frameCounter] = timestamp_s
finally:
# cut the files preallocated with
timestamp_array = timestamp_array[:frameCounter]
time_exec_array = time_exec_array[:frameCounter]
# create the folder if it doesn't exist
os.makedirs(os.path.split(timeCSVCompleteName)[0], exist_ok=True)
# create the pandas dataframe
df = pd.DataFrame(np.vstack(timestamp_array), columns=['timestamp'])
# saves the pandas dataframe in a csv file
df.to_csv(timeCSVCompleteName, index = False)
# =============================================================================
# OTHER OPERATIONS
# =============================================================================
# stop the pipeline
pipeline.stop()
# close all the windows
cv2.destroyAllWindows()
# gives few information to the user
elapsedTime = time.time()-startTime
freqOfExecution = frameCounter/elapsedTime
logging.info("{:d} frames were analyzed in {:.2f} seconds ({:.2f} frames per second)"\
.format(frameCounter, elapsedTime, freqOfExecution))
return time_exec_array
def extractPngFramesFromBag(fileCompleteName, outputDir, frequency = 60, numberOfFrames = 20000, textOnImage = True):
'''
Saves in the specified folder a folder with the name of the test.
The subfolder contains a csv file with the timestamp of each paired frame and
two other subfolders: COL and DEP channel.
For the COL folder, it's the extraction of the rgb frame,
in format w*h*3 of integer 8bit (0->255)
For the DEP folder, it's the extraction of the dep frame,
in format w*h*1 of integer 16bit (0->65535)
Parameters
----------
fileCompleteName : .bag file
.bag file containing the rgb/bgr frames, the depth frames and the time array
outputDir : string
directory where the files will be saved
frequency : int, optional
nominal frequency of recording, frequency for the video saved in .avi
The default is 60.
numberOfFrames : int, optional
attended number of frames in the recording. The extractor will do
numberOfFrames iterations, or, if the extraction is complete,
will stop earlier. Better put a larger number than the actual one.
Useful to print the loading bar.
The default is 20000.
textOnImage : bool, optional
set true if you want to add the timing information on the images.
The default is True.
Returns
-------
time_exec_array: array
contains information about the execution of the extraction
'''
if textOnImage:
# =============================================================================
# WRITE ON THE IMAGE PARAMS
# =============================================================================
font = cv2.FONT_HERSHEY_SIMPLEX
origin = (20, 20)
fontScale = 0.8
color = (255, 255, 255)
thickness = 1
# check extension of the file
fileCompleteName = utils.checkExtension(fileCompleteName, '.bag')
# get only the file name excluding ".bag"
fileName = os.path.split(fileCompleteName)[1][:-4]
# in order to give a unique name to the execution
thisExecutionDate = datetime.datetime.fromtimestamp(int(time.time())).strftime('%Y%m%d%H%M%S')
# create folder for the given execution of the given file
outputFileDir = os.path.join(outputDir, fileName + '-' + thisExecutionDate)
# create directory of folders for saving col and dep
outputCOLDir = os.path.join(outputFileDir, 'col')
outputDEPDir = os.path.join(outputFileDir, 'dep')
# create the folders if they don't exist
os.makedirs(outputFileDir, exist_ok=True)
os.makedirs(outputCOLDir, exist_ok = True)
os.makedirs(outputDEPDir, exist_ok = True)
# create the complete directory
timeCSVCompleteName = os.path.join(outputFileDir, 'timestamp.csv')
logging.info('working on: ' + fileName)
# =============================================================================
# # sometimes the function to load the bag file gets stuck, better avoid this
# # get the number of frames
# frequency, numberOfFrames = getInfoTopicTable(fileCompleteName)
# # since the method getInfoTopicTable gives an estimation of the number
# # of frames, it's better to increase this value. Executing the for loop and
# # catching the exception won't give any problem
# numberOfFrames = int(numberOfFrames * 1.2)
# =============================================================================
# =============================================================================
# START THE STREAM OF THE PIPELINE
# =============================================================================
pipeline = rs.pipeline()
config = rs.config()
rs.config.enable_device_from_file(config, fileCompleteName, repeat_playback = False)
profile = pipeline.start(config)
device = profile.get_device()
playback = device.as_playback()
playback.set_real_time(False)
aligned_stream = rs.align(rs.stream.color) # alignment depth -> color
# =============================================================================
# INITIALIZATION
# =============================================================================
# so at the first executuion becomes 0
frameCounter = -1
# to save the timing execution of each loop (debug)
time_exec_array = [0] * numberOfFrames
# to save the starting of the execution
startTime = time.time()
# at each iteration add a new row containing landMarkArray and timestamp_s
timestamp_array = [0] * numberOfFrames
try:
for i in tqdm.tqdm(range(numberOfFrames)):
try:
frame = pipeline.wait_for_frames()
except:
break
if i == 322:
debugFlag = True
# =============================================================================
# DEBUGGING
# =============================================================================
frameCounter = frameCounter + 1
# time frame on the execution of the loop
now = time.time()
# time_exec_array = np.append(time_exec_array, now-startTime)
time_exec_array[frameCounter] = now-startTime
# =============================================================================
# GET THE REQUIRED DATA FROM THE BAG FILE
# =============================================================================
# alignement of the frames: the obtained resolution is the one of the rgb image
frame = aligned_stream.process(frame)
# get the depth and color frames
depth_frame = frame.get_depth_frame()
color_frame = frame.get_color_frame()
# get the timestamp in seconds
timestamp_s = frame.get_timestamp()/1000
# print(datetime.datetime.fromtimestamp(timestamp_s).strftime('%Y-%m-%d %H:%M:%S.%f'))
# from frames to images
# the image saved in the bag file is in rgb format,
# the one required from mediapipe as well,
# the one for cv2 should be in bgr
color_image_rgb = np.asanyarray(color_frame.get_data())
color_image_bgr = cv2.cvtColor(color_image_rgb, cv2.COLOR_BGR2RGB)
depth_image = np.asanyarray(depth_frame.get_data())
if textOnImage:
stringForImage = 'frame: {:05d} - '.format(frameCounter) + \
datetime.datetime.fromtimestamp(timestamp_s).strftime('%Y-%m-%d %H:%M:%S.%f')
# puts text on the image
color_image_bgr = cv2.putText(color_image_bgr, stringForImage, origin, font, fontScale, color, thickness, cv2.LINE_AA)
# makes no sense write on the image since it's saved in 16 bit format
# depth_image = cv2.putText(depth_image, stringForImage, origin, font, fontScale, color, thickness, cv2.LINE_AA)
frameName = '{:05d}'.format(frameCounter)
cv2.imwrite(os.path.join(outputCOLDir,frameName+'.png'), color_image_bgr)
cv2.imwrite(os.path.join(outputDEPDir,frameName+'.png'), depth_image)
timestamp_array[frameCounter] = timestamp_s
finally:
# cut the files preallocated with
timestamp_array = timestamp_array[:frameCounter]
time_exec_array = time_exec_array[:frameCounter]
# create the folder if it doesn't exist
os.makedirs(os.path.split(timeCSVCompleteName)[0], exist_ok=True)
# create the pandas dataframe
df = pd.DataFrame(np.vstack(timestamp_array), columns=['timestamp'])
# saves the pandas dataframe in a csv file
df.to_csv(timeCSVCompleteName, index = False)
# =============================================================================
# OTHER OPERATIONS
# =============================================================================
# stop the pipeline
pipeline.stop()
# close all the windows
cv2.destroyAllWindows()
# gives few information to the user
elapsedTime = time.time()-startTime
freqOfExecution = frameCounter/elapsedTime
logging.info("{:d} frames were analyzed in {:.2f} seconds ({:.2f} frames per second)"\
.format(frameCounter, elapsedTime, freqOfExecution))
return time_exec_array
| mmtlab/wheelchair_contact_detection | hppdWC/bagRS.py | bagRS.py | py | 43,231 | python | en | code | 0 | github-code | 36 |
32523088106 | import os
from flask import Flask, jsonify, request, send_from_directory, Blueprint
from flask_restful import Api
from werkzeug.utils import secure_filename
from resources.invoice import InvoicesResource, InvoiceResource, MarkDigitizedInvoice
# from config import UPLOAD_FOLDER
UPLOAD_FOLDER = "./uploads/"
ALLOWED_EXTENSIONS = {'pdf'}
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///data.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
api = Api(app)
@app.route("/hello")
def index():
return jsonify({'message': 'hello world'})
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/uploads/<path:filename>')
def uploaded_file(filename):
return send_from_directory(app.config['UPLOAD_FOLDER'],
filename, as_attachment=True)
@app.route('/upload', methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST':
if 'file' not in request.files:
return "Error! No file selected", 400
file = request.files['file']
if file.filename == '':
return "No file selected", 400
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
# return redirect(url_for('uploaded_file',
# filename=filename))
if os.path.isfile(os.path.join(app.config['UPLOAD_FOLDER'], filename)):
return 'File uploaded successfully', 200
else:
return 'Server Error in uploading file', 500
else:
return "Invalid file type: {}".format(file.mimetype), 415
return '''
<!doctype html>
<title>Upload new File</title>
<h2>Upload new File</h2>
<form method=post enctype=multipart/form-data>
<input type=file name=file>
<input type=submit value=Upload>
</form>
'''
# register APIs
api.add_resource(InvoicesResource, "/invoices")
api.add_resource(InvoiceResource, "/invoices/<id>")
api.add_resource(MarkDigitizedInvoice, "/invoices/<id>/digitize")
if __name__ == "__main__":
from db import db
db.init_app(app)
# db.create_all()
app.run(port=5000, debug=True)
| KetanSingh11/Python_Assignment_-_Plate_IQ | plateiq_app/app.py | app.py | py | 2,447 | python | en | code | 0 | github-code | 36 |
16103607796 | import imp
from multiprocessing.spawn import import_main_path
from django.shortcuts import render
from student.models.students import Student
def index(request):
if request.method == "POST":
name = request.POST.get("name")
adm = request.POST.get("adm")
print(name)
print(adm)
try:
student = Student(name=name,adm=adm)
student.save()
print("done")
except:
print("Fail")
student = Student.objects.all().order_by('-id')
data = {
"students" : student
}
return render(request,'index.html',data) | Python-Guruz/CRUD-DEMO | student/views/students.py | students.py | py | 612 | python | en | code | 0 | github-code | 36 |
2722590323 | class Solution:
def groupThePeople(self, groupSizes: List[int]) -> List[List[int]]:
d = collections.defaultdict(list)
res = []
for i, g in enumerate(groupSizes):
if g == 1:
res.append([i])
elif (g not in d) or (g in d and len(d[g]) < g-1):
d[g].append(i)
elif g in d and len(d[g]) == g-1:
d[g].append(i)
res.append(d[g])
d[g] = []
return res
| ZhengLiangliang1996/Leetcode_ML_Daily | contest/weekcontest166/groupPeople.py | groupPeople.py | py | 521 | python | en | code | 1 | github-code | 36 |
42243134200 | import sys, time, itertools
import dill as pickle
import numpy as np
import matplotlib.pyplot as plt
import scipy.interpolate as interp
import scipy.stats as stats
import scipy.optimize as opti
import bead_util as bu
import calib_util as cal
import transfer_func_util as tf
import configuration as config
import warnings
warnings.filterwarnings("ignore")
##################################################################
######################## Script Params ###########################
only_closest = False #True
minsep = 15 # um
maxthrow = 80 # um
beadheight = 10 # um
#data_dir = '/data/20180314/bead1/grav_data/ydrive_1sep_1height_extdrive_nofield_long'
#data_dir = '/data/20180314/bead1/grav_data/ydrive_1sep_1height_nofield_shieldin'
#data_dir = '/data/20180314/bead1/grav_data/ydrive_1sep_1height_1V-1300Hz_shieldin_0mV-cant'
#data_dir = '/data/20180314/bead1/grav_data/ydrive_1sep_1height_2V-2200Hz_shield_0mV-cant'
data_dir = '/data/20180314/bead1/grav_data/ydrive_6sep_1height_shield-2Vac-2200Hz_cant-0mV'
#savepath = '/sensitivities/20180314_grav_shield-2200Hz_cant-m100mV_allharm.npy'
savepath = '/sensitivities/20180314_grav_shieldin-2V-2200Hz_cant-0V_allharm.npy'
save = False
load = False
file_inds = (0, 10)
theory_data_dir = '/data/grav_sim_data/2um_spacing_data/'
tfdate = '' #'20180215'
diag = False
confidence_level = 0.95
lamb_range = (1.7e-6, 1e-4)
#user_lims = [(65e-6, 80e-6), (-240e-6, 240e-6), (-5e-6, 5e-6)]
user_lims = [(5e-6, 80e-6), (-240e-6, 240e-6), (-5e-6, 5e-6)]
#user_lims = []
tophatf = 300 # Hz, doesn't reconstruct data above this frequency
nharmonics = 10
harms = [1,3,5,7]
plot_just_current = False
figtitle = ''
ignoreX = False
ignoreY = False
ignoreZ = False
compute_min_alpha = False
##################################################################
################# Constraints to plot against ####################
alpha_plot_lims = (1000, 10**13)
lambda_plot_lims = (10**(-7), 10**(-4))
#limitdata_path = '/home/charles/opt_lev_analysis/gravity_sim/gravity_sim_v1/data/' + \
# 'decca2_limit.txt'
limitdata_path = '/sensitivities/decca1_limits.txt'
limitdata = np.loadtxt(limitdata_path, delimiter=',')
limitlab = 'No Decca 2'
#limitdata_path2 = '/home/charles/opt_lev_analysis/gravity_sim/gravity_sim_v1/data/' + \
# 'no_decca2_limit.txt'
limitdata_path2 = '/sensitivities/decca2_limits.txt'
limitdata2 = np.loadtxt(limitdata_path2, delimiter=',')
limitlab2 = 'With Decca 2'
##################################################################
##################################################################
##################################################################
# Various fitting functions
def parabola(x, a, b, c):
return a * x**2 + b * x + c
def line(x, a, b):
return a * x + b
def const(x, a):
return a
def flicker(x, a):
return a * (1. / x)
def build_mod_grav_funcs(theory_data_dir):
'''Loads data from the output of /data/grav_sim_data/process_data.py
which processes the raw simulation output from the farmshare code
INPUTS: theory_data_dir, path to the directory containing the data
OUTPUTS: gfuncs, 3 element list with 3D interpolating functions
for regular gravity [fx, fy, fz]
yukfuncs, 3 x Nlambda array with 3D interpolating function
for modified gravity with indexing:
[[y0_fx, y1_fx, ...], [y0_fy, ...], [y0_fz, ...]]
lambdas, np.array with all lambdas from the simulation
lims, 3 element with tuples for (min, max) of coordinate
limits in interpolation
'''
# Load modified gravity curves from simulation output
Gdata = np.load(theory_data_dir + 'Gravdata.npy')
yukdata = np.load(theory_data_dir + 'yukdata.npy')
lambdas = np.load(theory_data_dir + 'lambdas.npy')
xpos = np.load(theory_data_dir + 'xpos.npy')
ypos = np.load(theory_data_dir + 'ypos.npy')
zpos = np.load(theory_data_dir + 'zpos.npy')
if lambdas[-1] > lambdas[0]:
lambdas = lambdas[::-1]
yukdata = np.flip(yukdata, 0)
# Find limits to avoid out of range erros in interpolation
xlim = (np.min(xpos), np.max(xpos))
ylim = (np.min(ypos), np.max(ypos))
zlim = (np.min(zpos), np.max(zpos))
# Build interpolating functions for regular gravity
gfuncs = [0,0,0]
for resp in [0,1,2]:
gfuncs[resp] = interp.RegularGridInterpolator((xpos, ypos, zpos), Gdata[:,:,:,resp])
# Build interpolating functions for yukawa-modified gravity
yukfuncs = [[],[],[]]
for resp in [0,1,2]:
for lambind, yuklambda in enumerate(lambdas):
lamb_func = interp.RegularGridInterpolator((xpos, ypos, zpos), yukdata[lambind,:,:,:,resp])
yukfuncs[resp].append(lamb_func)
lims = [xlim, ylim, zlim]
return gfuncs, yukfuncs, lambdas, lims
def get_data_at_harms(files, gfuncs, yukfuncs, lambdas, lims, \
minsep=20, maxthrow=80, beadheight=5,\
cantind=0, ax1='x', ax2='z', diag=True, plottf=False, \
width=0, nharmonics=10, harms=[], \
ext_cant_drive=False, ext_cant_ind=1, \
ignoreX=False, ignoreY=False, ignoreZ=False, noiseband=10):
'''Loops over a list of file names, loads each file, diagonalizes,
then performs an optimal filter using the cantilever drive and
a theoretical force vs position to generate the filter/template.
The result of the optimal filtering is stored, and the data
released from memory
INPUTS: files, list of files names to extract data
cantind, cantilever electrode index
ax1, axis with different DC positions
ax2, 2nd axis with different DC positions
OUTPUTS:
'''
#parts = data_dir.split('/')
#prefix = parts[-1]
#savepath = '/processed_data/grav_data/' + prefix + '_fildat.p'
#try:
# fildat = pickle.load(open(savepath, 'rb'))
# return fildat
#except:
# print 'Loading data from: ', data_dir
fildat = {}
temp_gdat = {}
for fil_ind, fil in enumerate(files):
bu.progress_bar(fil_ind, len(files), suffix=' Sorting Files, Extracting Data')
### Load data
df = bu.DataFile()
df.load(fil)
df.calibrate_stage_position()
cantbias = df.electrode_settings['dc_settings'][0]
ax1pos = df.stage_settings[ax1 + ' DC']
ax2pos = df.stage_settings[ax2 + ' DC']
if cantbias not in list(fildat.keys()):
fildat[cantbias] = {}
if ax1pos not in list(fildat[cantbias].keys()):
fildat[cantbias][ax1pos] = {}
if ax2pos not in list(fildat[cantbias][ax1pos].keys()):
fildat[cantbias][ax1pos][ax2pos] = []
if ax1pos not in list(temp_gdat.keys()):
temp_gdat[ax1pos] = {}
if ax2pos not in list(temp_gdat[ax1pos].keys()):
temp_gdat[ax1pos][ax2pos] = [[], []]
temp_gdat[ax1pos][ax2pos][1] = [[]] * len(lambdas)
cfind = len(fildat[cantbias][ax1pos][ax2pos])
fildat[cantbias][ax1pos][ax2pos].append([])
if fil_ind == 0 and plottf:
df.diagonalize(date=tfdate, maxfreq=tophatf, plot=True)
else:
df.diagonalize(date=tfdate, maxfreq=tophatf)
if fil_ind == 0:
ginds, fund_ind, drive_freq, drive_ind = \
df.get_boolean_cantfilt(ext_cant_drive=ext_cant_drive, ext_cant_ind=ext_cant_ind, \
nharmonics=nharmonics, harms=harms, width=width)
datffts, diagdatffts, daterrs, diagdaterrs = \
df.get_datffts_and_errs(ginds, drive_freq, noiseband=noiseband, plot=False, \
diag=diag)
drivevec = df.cant_data[drive_ind]
mindrive = np.min(drivevec)
maxdrive = np.max(drivevec)
posvec = np.linspace(mindrive, maxdrive, 500)
ones = np.ones_like(posvec)
start = time.time()
for lambind, yuklambda in enumerate(lambdas):
if ax1 == 'x' and ax2 == 'z':
newxpos = minsep + (maxthrow - ax1pos)
newheight = ax2pos - beadheight
elif ax1 =='z' and ax2 == 'x':
newxpos = minsep + (maxthrow - ax2pos)
newheight = ax1pos - beadheight
else:
print("Coordinate axes don't make sense for gravity data...")
print("Proceeding anyway, but results might be hard to interpret")
newxpos = ax1pos
newheight = ax2pos
if (newxpos < lims[0][0]*1e6) or (newxpos > lims[0][1]*1e6):
#print 'skipped x'
continue
if (newheight < lims[2][0]*1e6) or (newheight > lims[2][1]*1e6):
#print 'skipped z'
continue
pts = np.stack((newxpos*ones, posvec, newheight*ones), axis=-1)
gfft = [[], [], []]
yukfft = [[], [], []]
for resp in [0,1,2]:
if (ignoreX and resp == 0) or (ignoreY and resp == 1) or (ignoreZ and resp == 2):
gfft[resp] = np.zeros(np.sum(ginds))
yukfft[resp] = np.zeros(np.sum(ginds))
continue
if len(temp_gdat[ax1pos][ax2pos][0]):
gfft[resp] = temp_gdat[ax1pos][ax2pos][0][resp]
else:
gforcevec = gfuncs[resp](pts*1e-6)
gforcefunc = interp.interp1d(posvec, gforcevec)
gforcet = gforcefunc(drivevec)
gfft[resp] = np.fft.rfft(gforcet)[ginds]
if len(temp_gdat[ax1pos][ax2pos][1][lambind]):
yukfft[resp] = temp_gdat[ax1pos][ax2pos][1][lambind][resp]
else:
yukforcevec = yukfuncs[resp][lambind](pts*1e-6)
yukforcefunc = interp.interp1d(posvec, yukforcevec)
yukforcet = yukforcefunc(drivevec)
yukfft[resp] = np.fft.rfft(yukforcet)[ginds]
gfft = np.array(gfft)
yukfft = np.array(yukfft)
temp_gdat[ax1pos][ax2pos][0] = gfft
temp_gdat[ax1pos][ax2pos][1][lambind] = yukfft
outdat = (yuklambda, datffts, diagdatffts, daterrs, diagdaterrs, gfft, yukfft)
fildat[cantbias][ax1pos][ax2pos][cfind].append(outdat)
stop = time.time()
#print 'func eval time: ', stop-start
return fildat
def get_alpha_lambda(fildat, diag=True, ignoreX=False, ignoreY=False, ignoreZ=False, \
plot=True, save=False, savepath='', confidence_level=0.95, \
only_closest=False, ax1='x', ax2='z', lamb_range=(1e-9, 1e-2)):
'''Loops over a list of file names, loads each file, diagonalizes,
then performs an optimal filter using the cantilever drive and
a theoretical force vs position to generate the filter/template.
The result of the optimal filtering is stored, and the data
released from memory
INPUTS: fildat
OUTPUTS:
'''
# For the confidence interval, compute the inverse CDF of a
# chi^2 distribution at given confidence level and compare to
# liklihood ratio via a goodness of fit parameter.
# Refer to scipy.stats documentation to understand chi2
chi2dist = stats.chi2(1)
# factor of 0.5 from Wilks's theorem: -2 log (Liklihood) ~ chi^2(1)
con_val = 0.5 * chi2dist.ppf(confidence_level)
colors = bu.get_color_map(len(lambdas))
alphas = np.zeros_like(lambdas)
diagalphas = np.zeros_like(lambdas)
testalphas = np.linspace(-10**10, 10**10, 11)
minalphas = [[]] * len(lambdas)
biasvec = list(fildat.keys())
biasvec.sort()
ax1posvec = list(fildat[biasvec[0]].keys())
ax1posvec.sort()
ax2posvec = list(fildat[biasvec[0]][ax1posvec[0]].keys())
ax2posvec.sort()
if only_closest:
if ax1 == 'x' and ax2 == 'z':
seps = minsep + (maxthrow - np.array(ax1posvec))
heights = np.array(ax2posvec) - beadheight
sind = np.argmin(seps)
hind = np.argmin(np.abs(heights - beadheight))
ax1posvec = [ax1posvec[sind]]
ax2posvec = [ax2posvec[hind]]
elif ax1 =='z' and ax2 == 'x':
seps = minsep + (maxthrow - np.array(ax2posvec))
heights = np.array(ax1pos) - beadheight
sind = np.argmin(seps)
hind = np.argmin(np.abs(heights - beadheight))
ax1posvec = [ax1posvec[hind]]
ax2posvec = [ax2posvec[sind]]
newlamb = lambdas[(lambdas > lamb_range[0]) * (lambdas < lamb_range[-1])]
tot_iterations = len(biasvec) * len(ax1posvec) * len(ax2posvec) * \
len(newlamb) * len(testalphas) + 1
i = -1
# To test chi2 fit against "fake" data, uncomment these lines
rands = np.random.randn(*fildat[biasvec[0]][ax1posvec[0]][ax2posvec[0]][0][0][1].shape)
rands2 = np.random.randn(*fildat[biasvec[0]][ax1posvec[0]][ax2posvec[0]][0][0][1].shape)
for lambind, yuklambda in enumerate(lambdas):
#if lambind != 48:
# continue
if (yuklambda < lamb_range[0]) or (yuklambda > lamb_range[1]):
continue
test = fildat[biasvec[0]][ax1posvec[0]][ax2posvec[0]][0][lambind]
test_yukdat = test[-1]
test_dat = test[1]
newalpha = 1e-4 * np.sqrt(np.mean(np.abs(test_dat) / np.abs(test_yukdat)))
testalphas = np.linspace(-1.0*newalpha, newalpha, 21)
chi_sqs = np.zeros(len(testalphas))
diagchi_sqs = np.zeros(len(testalphas))
for alphaind, testalpha in enumerate(testalphas):
N = 0
chi_sq = 0
diagchi_sq = 0
for bias, ax1pos, ax2pos in itertools.product(biasvec, ax1posvec, ax2posvec):
i += 1
bu.progress_bar(i, tot_iterations, suffix=' Fitting the Data for Chi^2')
for fil_ind in range(len(fildat[bias][ax1pos][ax2pos])):
dat = fildat[bias][ax1pos][ax2pos][fil_ind][lambind]
assert dat[0] == yuklambda
_, datfft, diagdatfft, daterr, diagdaterr, gfft, yukfft = dat
# To test chi2 fit against "fake" data, uncomment these lines
#datfft = yukfft * -0.5e9
#datfft += (1.0 / np.sqrt(2)) * daterr * rands + \
# (1.0 / np.sqrt(2)) * daterr * rands2 * 1.0j
#gfft = np.zeros_like(datfft)
for resp in [0,1,2]:
if (ignoreX and resp == 0) or \
(ignoreY and resp == 1) or \
(ignoreZ and resp == 2):
print(ignoreX, ignoreY, ignoreZ, resp)
continue
re_diff = datfft[resp].real - \
(gfft[resp].real + testalpha * yukfft[resp].real )
im_diff = datfft[resp].imag - \
(gfft[resp].imag + testalpha * yukfft[resp].imag )
if diag:
diag_re_diff = diagdatfft[resp].real - \
(gfft[resp].real + testalpha * yukfft[resp].real )
diag_im_diff = diagdatfft[resp].imag - \
(gfft[resp].imag + testalpha * yukfft[resp].imag )
#plt.plot(np.abs(re_diff))
#plt.plot(daterr[resp])
#plt.show()
chi_sq += ( np.sum( np.abs(re_diff)**2 / (0.5*daterr[resp]**2) ) + \
np.sum( np.abs(im_diff)**2 / (0.5*daterr[resp]**2) ) )
if diag:
diagchi_sq += ( np.sum( np.abs(diag_re_diff)**2 / \
(0.5*diagdaterr[resp]**2) ) + \
np.sum( np.abs(diag_im_diff)**2 / \
(0.5*diagdaterr[resp]**2) ) )
N += len(re_diff) + len(im_diff)
chi_sqs[alphaind] = chi_sq / (N - 1)
if diag:
diagchi_sqs[alphaind] = diagchi_sq / (N - 1)
max_chi = np.max(chi_sqs)
if diag:
max_diagchi = np.max(diagchi_sqs)
max_alpha = np.max(testalphas)
p0 = [max_chi/max_alpha**2, 0, 1]
if diag:
diag_p0 = [max_diagchi/max_alpha**2, 0, 1]
#if lambind == 0:
# p0 = [0.15e9, 0, 5]
#else:
# p0 = p0_old
if plot:
plt.figure(1)
plt.plot(testalphas, chi_sqs, color = colors[lambind])
if diag:
plt.figure(2)
plt.plot(testalphas, diagchi_sqs, color = colors[lambind])
try:
popt, pcov = opti.curve_fit(parabola, testalphas, chi_sqs, \
p0=p0, maxfev=100000)
if diag:
diagpopt, diagpcov = opti.curve_fit(parabola, testalphas, diagchi_sqs, \
p0=diag_p0, maxfev=1000000)
except:
print("Couldn't fit")
popt = [0,0,0]
popt[2] = np.mean(chi_sqs)
regular_con_val = con_val + np.min(chi_sqs)
if diag:
diag_con_val = con_val + np.min(diagchi_sqs)
# Select the positive root for the non-diagonalized data
soln1 = ( -1.0 * popt[1] + np.sqrt( popt[1]**2 - \
4 * popt[0] * (popt[2] - regular_con_val)) ) / (2 * popt[0])
soln2 = ( -1.0 * popt[1] - np.sqrt( popt[1]**2 - \
4 * popt[0] * (popt[2] - regular_con_val)) ) / (2 * popt[0])
if diag:
diagsoln1 = ( -1.0 * diagpopt[1] + np.sqrt( diagpopt[1]**2 - \
4 * diagpopt[0] * (diagpopt[2] - diag_con_val)) ) / (2 * diagpopt[0])
diagsoln2 = ( -1.0 * diagpopt[1] - np.sqrt( diagpopt[1]**2 - \
4 * diagpopt[0] * (diagpopt[2] - diag_con_val)) ) / (2 * diagpopt[0])
if soln1 > soln2:
alpha_con = soln1
else:
alpha_con = soln2
if diag:
if diagsoln1 > diagsoln2:
diagalpha_con = diagsoln1
else:
diagalpha_con = diagsoln2
alphas[lambind] = alpha_con
if diag:
diagalphas[lambind] = alpha_con
if plot:
plt.figure(1)
plt.title('Goodness of Fit for Various Lambda', fontsize=16)
plt.xlabel('Alpha Parameter [arb]', fontsize=14)
plt.ylabel('$\chi^2$', fontsize=18)
if diag:
plt.figure(2)
plt.title('Goodness of Fit for Various Lambda - DIAG', fontsize=16)
plt.xlabel('Alpha Parameter [arb]', fontsize=14)
plt.ylabel('$\chi^2$', fontsize=18)
plt.show()
if not diag:
diagalphas = np.zeros_like(alphas)
if save:
if savepath == '':
print('No save path given, type full path here')
savepath = input('path: ')
np.save(savepath, [lambdas, alphas, diagalphas])
return lambdas, alphas, diagalphas
def get_alpha_vs_file(fildat, diag=True, ignoreX=False, ignoreY=False, ignoreZ=False, \
plot=True, save=False, savepath='', confidence_level=0.95, \
only_closest=False, ax1='x', ax2='z', lamb_range=(1e-9, 1e-2)):
'''Loops over a list of file names, loads each file, diagonalizes,
then performs an optimal filter using the cantilever drive and
a theoretical force vs position to generate the filter/template.
The result of the optimal filtering is stored, and the data
released from memory
INPUTS: fildat
OUTPUTS:
'''
# For the confidence interval, compute the inverse CDF of a
# chi^2 distribution at given confidence level and compare to
# liklihood ratio via a goodness of fit parameter.
# Refer to scipy.stats documentation to understand chi2
chi2dist = stats.chi2(1)
# factor of 0.5 from Wilks's theorem: -2 log (Liklihood) ~ chi^2(1)
con_val = 0.5 * chi2dist.ppf(confidence_level)
colors = bu.get_color_map(len(lambdas))
alphas = np.zeros_like(lambdas)
diagalphas = np.zeros_like(lambdas)
testalphas = np.linspace(-10**10, 10**10, 11)
biasvec = list(fildat.keys())
biasvec.sort()
ax1posvec = list(fildat[biasvec[0]].keys())
ax1posvec.sort()
ax2posvec = list(fildat[biasvec[0]][ax1posvec[0]].keys())
ax2posvec.sort()
if only_closest:
if ax1 == 'x' and ax2 == 'z':
seps = minsep + (maxthrow - np.array(ax1posvec))
heights = np.array(ax2posvec) - beadheight
sind = np.argmin(seps)
hind = np.argmin(np.abs(heights - beadheight))
ax1posvec = [ax1posvec[sind]]
ax2posvec = [ax2posvec[hind]]
elif ax1 =='z' and ax2 == 'x':
seps = minsep + (maxthrow - np.array(ax2posvec))
heights = np.array(ax1pos) - beadheight
sind = np.argmin(seps)
hind = np.argmin(np.abs(heights - beadheight))
ax1posvec = [ax1posvec[hind]]
ax2posvec = [ax2posvec[sind]]
newlamb = lambdas[(lambdas > lamb_range[0]) * (lambdas < lamb_range[-1])]
tot_iterations = len(biasvec) * len(ax1posvec) * len(ax2posvec) * len(newlamb) * len(testalphas)
i = -1
for lambind, yuklambda in enumerate(lambdas):
if lambind != 48:
continue
if (yuklambda < lamb_range[0]) or (yuklambda > lamb_range[1]):
continue
test = fildat[biasvec[0]][ax1posvec[0]][ax2posvec[0]][0][lambind]
test_yukdat = test[-1]
test_dat = test[1]
newalpha = 1e-4 * np.sqrt(np.mean(np.abs(test_dat) / np.abs(test_yukdat)))
testalphas = np.linspace(-1.0*newalpha, newalpha, 11)
for bias, ax1pos, ax2pos in itertools.product(biasvec, ax1posvec, ax2posvec):
i += 1
bu.progress_bar(i, tot_iterations)
minalphas = [0] * len(fildat[bias][ax1pos][ax2pos])
diag_minalphas = [0] * len(fildat[bias][ax1pos][ax2pos])
for fil_ind in range(len(fildat[bias][ax1pos][ax2pos])):
dat = fildat[bias][ax1pos][ax2pos][fil_ind][lambind]
assert dat[0] == yuklambda
_, datfft, diagdatfft, daterr, diagdaterr, gfft, yukfft = dat
chi_sqs = np.zeros(len(testalphas))
diagchi_sqs = np.zeros(len(testalphas))
for alphaind, testalpha in enumerate(testalphas):
chi_sq = 0
diagchi_sq = 0
N = 0
for resp in [0,1,2]:
if (ignoreX and resp == 0) or \
(ignoreY and resp == 1) or \
(ignoreZ and resp == 2):
continue
re_diff = datfft[resp].real - \
(gfft[resp].real + testalpha * yukfft[resp].real )
im_diff = datfft[resp].imag - \
(gfft[resp].imag + testalpha * yukfft[resp].imag )
if diag:
diag_re_diff = diagdatfft[resp].real - \
(gfft[resp].real + testalpha * yukfft[resp].real )
diag_im_diff = diagdatfft[resp].imag - \
(gfft[resp].imag + testalpha * yukfft[resp].imag )
#plt.plot(np.abs(re_diff))
#plt.plot(daterr[resp])
#plt.show()
chi_sq += ( np.sum( np.abs(re_diff)**2 / (0.5*(daterr[resp]**2)) ) + \
np.sum( np.abs(im_diff)**2 / (0.5*(daterr[resp]**2)) ) )
if diag:
diagchi_sq += ( np.sum( np.abs(diag_re_diff)**2 / \
(0.5*(diagdaterr[resp]**2)) ) + \
np.sum( np.abs(diag_im_diff)**2 / \
(0.5*(diagdaterr[resp]**2)) ) )
N += len(re_diff) + len(im_diff)
chi_sqs[alphaind] = chi_sq / (N - 1)
if diag:
diagchi_sqs[alphaind] = diagchi_sq / (N - 1)
max_chi = np.max(chi_sqs)
if diag:
max_diagchi = np.max(diagchi_sqs)
max_alpha = np.max(testalphas)
p0 = [max_chi/max_alpha**2, 0, 1]
if diag:
diag_p0 = [max_diagchi/max_alpha**2, 0, 1]
try:
popt, pcov = opti.curve_fit(parabola, testalphas, chi_sqs, \
p0=p0, maxfev=100000)
if diag:
diagpopt, diagpcov = opti.curve_fit(parabola, testalphas, diagchi_sqs, \
p0=diag_p0, maxfev=1000000)
except:
print("Couldn't fit")
popt = [0,0,0]
popt[2] = np.mean(chi_sqs)
regular_con_val = con_val + np.min(chi_sqs)
if diag:
diag_con_val = con_val + np.min(diagchi_sqs)
# Select the positive root for the non-diagonalized data
soln1 = ( -1.0 * popt[1] + np.sqrt( popt[1]**2 - \
4 * popt[0] * (popt[2] - regular_con_val)) ) / (2 * popt[0])
soln2 = ( -1.0 * popt[1] - np.sqrt( popt[1]**2 - \
4 * popt[0] * (popt[2] - regular_con_val)) ) / (2 * popt[0])
if diag:
diagsoln1 = ( -1.0 * diagpopt[1] + np.sqrt( diagpopt[1]**2 - \
4 * diagpopt[0] * (diagpopt[2] - diag_con_val)) ) / (2 * diagpopt[0])
diagsoln2 = ( -1.0 * diagpopt[1] - np.sqrt( diagpopt[1]**2 - \
4 * diagpopt[0] * (diagpopt[2] - diag_con_val)) ) / (2 * diagpopt[0])
if soln1 > soln2:
alpha_con = soln1
else:
alpha_con = soln2
if diag:
if diagsoln1 > diagsoln2:
diagalpha_con = diagsoln1
else:
diagalpha_con = diagsoln2
minalphas[fil_ind] = alpha_con
if diag:
diag_minalphas[fil_ind] = diagalpha_con
if plot:
minfig, minaxarr = plt.subplots(1,2,figsize=(10,5),dpi=150)
minaxarr[0].plot(minalphas)
minaxarr[0].set_title('Min $\\alpha$ vs. Time', fontsize=18)
minaxarr[0].set_xlabel('File Num', fontsize=16)
minaxarr[0].set_ylabel('$\\alpha$ [arb]', fontsize=16)
minaxarr[1].hist(minalphas, bins=20)
minaxarr[1].set_xlabel('$\\alpha$ [arb]', fontsize=16)
plt.tight_layout()
plt.show()
return minalphas
if not plot_just_current:
gfuncs, yukfuncs, lambdas, lims = build_mod_grav_funcs(theory_data_dir)
datafiles = bu.find_all_fnames(data_dir, ext=config.extensions['data'])
datafiles = datafiles[file_inds[0]:file_inds[1]]
if len(datafiles) == 0:
print("Found no files in: ", data_dir)
quit()
fildat = get_data_at_harms(datafiles, gfuncs, yukfuncs, lambdas, lims, \
minsep=minsep, maxthrow=maxthrow, beadheight=beadheight, \
cantind=0, ax1='x', ax2='z', diag=diag, plottf=False, \
nharmonics=nharmonics, harms=harms, \
ext_cant_drive=True, ext_cant_ind=1, \
ignoreX=ignoreX, ignoreY=ignoreY, ignoreZ=ignoreZ)
if compute_min_alpha:
_ = get_alpha_vs_file(fildat, only_closest=only_closest, \
ignoreX=ignoreX, ignoreY=ignoreY, ignoreZ=ignoreZ, \
lamb_range=lamb_range, diag=diag, plot=True)
newlambdas, alphas, diagalphas = \
get_alpha_lambda(fildat, only_closest=only_closest, \
ignoreX=ignoreX, ignoreY=ignoreY, ignoreZ=ignoreZ, \
lamb_range=lamb_range, diag=diag)
outdat = [newlambdas, alphas, diagalphas]
if save:
np.save(savepath, outdat)
if load:
dat = np.load(savepath)
newlambdas = dat[0]
alphas = dat[1]
diagalphas = dat[2]
fig, ax = plt.subplots(1,1,sharex='all',sharey='all',figsize=(5,5),dpi=150)
if diag:
fig2, ax2 = plt.subplots(1,1,sharex='all',sharey='all',figsize=(5,5),dpi=150)
if not plot_just_current:
ax.loglog(newlambdas, alphas, linewidth=2, label='95% CL')
if diag:
ax2.loglog(newlambdas, diagalphas, linewidth=2, label='95% CL')
ax.loglog(limitdata[:,0], limitdata[:,1], '--', label=limitlab, linewidth=3, color='r')
ax.loglog(limitdata2[:,0], limitdata2[:,1], '--', label=limitlab2, linewidth=3, color='k')
ax.grid()
ax.set_xlim(lambda_plot_lims[0], lambda_plot_lims[1])
ax.set_ylim(alpha_plot_lims[0], alpha_plot_lims[1])
ax.set_xlabel('$\lambda$ [m]')
ax.set_ylabel('$\\alpha$')
ax.legend(numpoints=1, fontsize=9)
ax.set_title(figtitle)
plt.tight_layout()
if diag:
ax2.loglog(limitdata[:,0], limitdata[:,1], '--', label=limitlab, linewidth=3, color='r')
ax2.loglog(limitdata2[:,0], limitdata2[:,1], '--', label=limitlab2, linewidth=3, color='k')
ax2.grid()
ax2.set_xlim(lambda_plot_lims[0], lambda_plot_lims[1])
ax2.set_ylim(alpha_plot_lims[0], alpha_plot_lims[1])
ax2.set_xlabel('$\lambda$ [m]')
ax2.set_ylabel('$\\alpha$')
ax2.legend(numpoints=1, fontsize=9)
ax2.set_title(figtitle)
plt.tight_layout()
plt.show()
| charlesblakemore/opt_lev_analysis | scripts/mod_grav/old/alpha_lambda_from_timedomain_fit.py | alpha_lambda_from_timedomain_fit.py | py | 30,732 | python | en | code | 1 | github-code | 36 |
73495581544 | def czy_wszystkie(napis):
alfabet = "abcdefghijklmnopqrstuwvxyz"
bledy = 0
for i in alfabet:
if i not in napis:
bledy = 1
if bledy == 0:
return True
else:
return False
napis = input("Podaj slowo do sprawdzenia:")
if czy_wszystkie(napis):
print("TAK")
else:
print("NIE")
| GracjanKoscinski/Programowanie | Petle for/funkcje/zadanie 6.py | zadanie 6.py | py | 353 | python | pl | code | 0 | github-code | 36 |
4435033563 |
import requests
from currency_codes import CURRENCIES
API_KEY = '82e68121413a404dc85fd537'
def get_rate(currency):
url = f"https://v6.exchangerate-api.com/v6/{API_KEY}/pair/{currency}/UZS"
try:
response = requests.get(url)
rate = response.json()['conversion_rate']
except:
rate = False
return rate
def get_currency_codes():
code_list = ""
for curr_code in CURRENCIES:
code_list += f"/{curr_code[0]} - {curr_code[1]}\n"
return code_list
def is_currency_code(currency):
return currency in dict((x, y) for x, y in CURRENCIES)
def get_ordered_rate_list(sort_in_desc=False):
rate_dict = {}
for code in CURRENCIES:
rate = get_rate(code[0])
if not (rate is False):
rate_dict[code[0]] = rate
sorted_tuple = sorted(rate_dict, key=rate_dict.get, reverse=sort_in_desc)
rate_list = ""
for code in sorted_tuple:
rate_list += f"1 {code} = {rate_dict[code]} UZS\n"
return rate_list
| otabek-usmonov/uzs-exchangerate-bot | currency_rate_info.py | currency_rate_info.py | py | 921 | python | en | code | 0 | github-code | 36 |
1478139833 | import sys
import os
from PyQt5.QtWidgets import QApplication, QWidget, QVBoxLayout, QLineEdit, QLabel, QPushButton, QListView
from PyQt5.QtWidgets import QSizePolicy, QScrollArea, QCompleter, QHBoxLayout, QDialog
from PyQt5.QtCore import Qt, pyqtSlot, QModelIndex
from PyQt5.QtCore import QStandardPaths
import requests, zipfile, io
from nighandu import Nighandu
import asyncio
OLAM_DATASET_URL = "https://olam.in/open/enml/olam-enml.csv.zip"
HOME_PATH = QStandardPaths.writableLocation(QStandardPaths.HomeLocation)
FILES_DIR = os.path.join(HOME_PATH, ".Nighandu")
class NighanduGui(QWidget):
def __init__(self, parent=None):
super(NighanduGui, self).__init__(parent)
self.window().setWindowTitle("Nighandu")
self.initApp()
self.initUI()
async def downloadOlamDataset(self, url, saveLocation):
r = requests.get(url)
z = zipfile.ZipFile(io.BytesIO(r.content))
z.extractall(saveLocation)
def initApp(self):
if not os.path.exists(FILES_DIR):
os.mkdir(FILES_DIR)
csvFile = os.path.join(FILES_DIR, "olam-enml.csv")
if not os.path.exists(csvFile):
loop = asyncio.get_event_loop()
loop.run_until_complete(self.downloadOlamDataset(OLAM_DATASET_URL, FILES_DIR))
self.nighandu = Nighandu(csvFile)
def initUI(self):
#widget properties
self.setMinimumSize(895, 680)
mainLayout = QHBoxLayout()
#inputs Widgets
inputLayout = QHBoxLayout()
self.searchButton = QPushButton("&Search", self)
self.searchButton.setFixedSize(80, 30)
self.searchButton.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
self.searchButton.clicked.connect(self.searchButtonClicked)
wordList = self.nighandu.word_list()
self.wordInput = QLineEdit(self)
self.wordInput.setFixedHeight(30)
completer = QCompleter(wordList, self)
completer.setCaseSensitivity(Qt.CaseInsensitive)
self.wordInput.setCompleter(completer)
self.wordInput.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed)
self.wordInput.returnPressed.connect(self.searchButtonClicked)
inputLayout.addWidget(self.wordInput)
inputLayout.addWidget(self.searchButton)
leftControlsLayout = QVBoxLayout()
leftControlsLayout.addLayout(inputLayout)
suggesionsList = QListView(self)
suggesionsList.setEditTriggers(QListView.NoEditTriggers)
suggesionsList.setModel(completer.completionModel())
suggesionsList.clicked.connect(self.suggesionsListClicked)
leftControlsLayout.addWidget(suggesionsList)
mainLayout.addLayout(leftControlsLayout)
self.wordViewerLabel = QLabel(self)
self.wordViewerScrollArea = QScrollArea(self)
self.wordViewerScrollArea.setWidgetResizable(True)
self.wordViewerScrollArea.setWidget(self.wordViewerLabel)
self.wordViewerScrollArea.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.wordViewerLabel.setMargin(20)
self.wordViewerLabel.setAlignment(Qt.AlignTop)
#initial font size
font = self.wordViewerLabel.font()
font.setPixelSize(15)
self.wordViewerLabel.setFont(font)
self.wordViewerLabel.setText("<center> <h1> Nighandu </h1></center>")
self.zoomInButton = QPushButton("ZoomIn (+)", self)
self.zoomInButton.clicked.connect(self.zoomIn)
self.zoomOutButton = QPushButton("ZoomOut (-)", self)
self.zoomOutButton.clicked.connect(self.zoomOut)
self.aboutButton = QPushButton("About", self)
self.aboutButton.clicked.connect(self.about)
zoomButtonLayout = QHBoxLayout()
zoomButtonLayout.addWidget(self.aboutButton)
zoomButtonLayout.addStretch()
zoomButtonLayout.addWidget(self.zoomInButton)
zoomButtonLayout.addWidget(self.zoomOutButton)
rightConrolsLayout = QVBoxLayout()
rightConrolsLayout.addWidget(self.wordViewerScrollArea)
rightConrolsLayout.addLayout(zoomButtonLayout)
mainLayout.addLayout(rightConrolsLayout)
self.setLayout(mainLayout)
@pyqtSlot()
def searchButtonClicked(self):
#change case
word = self.wordInput.text().lower()
word = word.replace(word[0], word[0].upper(), 1)
results = self.searchMeaning(word)
if results == None:
txt ="Sorry No results Found"
else:
txt = self.formatResults(results)
self.wordViewerLabel.setText(txt)
@pyqtSlot(QModelIndex)
def suggesionsListClicked(self, index):
results = self.searchMeaning(index.data())
if results == None:
txt ="Sorry No results Found"
else:
txt = self.formatResults(results)
self.wordViewerLabel.setText(txt)
def formatResults(self, results):
verbs = []
nouns = []
adjectives = []
adverbs = []
pronouns = []
properNouns = []
phrasalVerbs = []
conjunctions = []
interjections = []
prepositions = []
prefixs = []
suffixs = []
idioms = []
abbreviations = []
auxiliaryVerbs = []
meanings = []
for result in results:
if result['part_of_speech'] == "n":
nouns.append(result['malayalam_definition'])
elif result['part_of_speech'] == "v":
verbs.append(result['malayalam_definition'])
elif result['part_of_speech'] == "a":
adjectives.append(result['malayalam_definition'])
elif result['part_of_speech'] == "adv":
adverbs.append(result['malayalam_definition'])
elif result['part_of_speech'] == "pron":
pronouns.append(result['malayalam_definition'])
elif result['part_of_speech'] == "propn":
properNouns.append(result['malayalam_definition'])
elif result['part_of_speech'] == "phrv":
phrasalVerbs.append(result['malayalam_definition'])
elif result['part_of_speech'] == "conj":
conjunctions.append(result['malayalam_definition'])
elif result['part_of_speech'] == "interj":
interjections.append(result['malayalam_definition'])
elif result['part_of_speech'] == "prep":
prepositions.append(result['malayalam_definition'])
elif result['part_of_speech'] == "pfx":
prefixs.append(result['malayalam_definition'])
elif result['part_of_speech'] == "sfx":
suffixs.append(result['malayalam_definition'])
elif result['part_of_speech'] == "abbr":
abbreviations.append(result['malayalam_definition'])
elif result['part_of_speech'] == "auxv":
auxiliaryVerbs.append(result['malayalam_definition'])
elif result['part_of_speech'] == "idm":
idioms.append(result['malayalam_definition'])
else:
meanings.append(result['malayalam_definition'])
meaningHtmlContent = "" if len(meanings) == 0 else '''<hr/>
<h3>അര്ത്ഥം <span> :Meaning</span></h3>
<hr/>'''
for meaning in meanings:
meaningHtmlContent += '''
<li><h4>{0}</h4></li>
'''.format(meaning)
nounHtmlContent = "" if len(nouns) == 0 else '''<hr/>
<h3>നാമം <span>:Noun</span></h3>
<hr/>'''
for noun in nouns:
nounHtmlContent += '''
<li><h4>{0}</h4></li>
'''.format(noun)
verbHtmlContent = "" if len(verbs) == 0 else '''
<hr/>
<h3>ക്രിയ <span> :Verb</span></h3>
<hr/>
'''
for verb in verbs:
verbHtmlContent += '''
<li><h4>{0}</h4></li>
'''.format(verb)
adjectivesHtmlContent = "" if len(adjectives) == 0 else '''<hr/>
<h3>വിശേഷണം<span>:Adjective</span></h3>
<hr/>'''
for adjective in adjectives:
adjectivesHtmlContent += '''
<li><h4>{0}</h4></li>
'''.format(adjective)
adverbHtmlContent = "" if len(adverbs) == 0 else '''
<hr/>
<h3>ക്രിയാവിശേഷണം<span> :Adverb</span></h3>
<hr/>
'''
for adverb in adverbs:
adverbHtmlContent += '''
<li><h4>{0}</h4></li>
'''.format(adverb)
pronounHtmlContent = "" if len(pronouns) == 0 else '''
<hr/>
<h3>സര്വ്വനാമം<span> :Pronoun</span></h3>
<hr/>
'''
for pronoun in pronouns:
pronounHtmlContent += '''
<li><h4>{0}</h4></li>
'''.format(pronoun)
propernounHtmlContent = "" if len(properNouns) == 0 else '''
<hr/>
<h3>സംജ്ഞാനാമം<span> :Proper noun</span></h3>
<hr/>
'''
for propnoun in properNouns:
propernounHtmlContent += '''
<li><h4>{0}</h4></li>
'''.format(propnoun)
phrasalVerbHtmlContent = "" if len(phrasalVerbs) == 0 else '''
<hr/>
<h3>ഉപവാക്യ ക്രിയ<span> :Phrasal verb</span></h3>
<hr/>
'''
for phrasalVerb in phrasalVerbs:
phrasalVerbHtmlContent += '''
<li><h4>{0}</h4></li>
'''.format(phrasalVerb)
conjunctionHtmlContent = "" if len(conjunctions) == 0 else '''
<hr/>
<h3>അവ്യയം<span>:Conjunction</span></h3>
<hr/>
'''
for conjunction in conjunctions:
conjunctionHtmlContent += '''
<li><h4>{0}</h4></li>
'''.format(conjunction)
interjectionHtmlContent = "" if len(interjections) == 0 else '''
<hr/>
<h3>വ്യാക്ഷേപകം<span> :interjection</span></h3>
<hr/>
'''
for interjection in interjections:
interjectionHtmlContent += '''
<li>{0}</li>
'''.format(interjection)
prepositionHtmlContent = "" if len(prepositions) == 0 else '''
<hr/>
<h3>വ്യാക്ഷേപകം<span> :preposition</span></h3>
<hr/>
'''
for preposition in prepositions:
prepositionHtmlContent += '''
<li>{0}</li>
'''.format(preposition)
prefixHtmlContent = "" if len(prefixs) == 0 else '''
<hr/>
<h3>പൂർവ്വപ്രത്യയം<span> :Prefix</span></h3>
<hr/>
'''
for prefix in prefixs:
prefixHtmlContent += '''
<li>{0}</li>
'''.format(prefix)
suffixHtmlContent = "" if len(suffixs) == 0 else '''
<hr/>
<h3>പ്രത്യയം<span> :Suffix</span></h3>
<hr/>
'''
for suffix in suffixs:
suffixHtmlContent += '''
<li>{0}</li>
'''.format(suffix)
abbrHtmlContent = "" if len(abbreviations) == 0 else '''
<hr/>
<h3>പ്രത്യയം<span> :Suffix</span></h3>
<hr/>
'''
for abbr in abbreviations:
abbrHtmlContent += '''
<li>{0}</li>
'''.format(abbr)
auxiliaryVerbHtmlContent = "" if len(auxiliaryVerbs) == 0 else '''
<hr/>
<h3>പൂരകകൃതി <span> :Auxiliary verb</span></h3>
<hr/>
'''
for auxv in auxiliaryVerbs:
auxiliaryVerbHtmlContent += '''
<li>{0}</li>
'''.format(auxv)
idiomsHtmlContent = "" if len(idioms) == 0 else '''
<hr/>
<h3>പൂരകകൃതി <span> :Idioms</span></h3>
<hr/>
'''
for idiom in idioms:
idiomsHtmlContent += '''
<li>{0}</li>
'''.format(idiom)
htmlContent = '''
<h3>Word : {0} </h3>
<ul>
{1}
{2}
{3}
{4}
{5}
{6}
{7}
{8}
{9}
{10}
{11}
{12}
{13}
{14}
{15}
{16}
</ul>
'''.format(self.wordInput.text().strip(), meaningHtmlContent, nounHtmlContent, verbHtmlContent, adjectivesHtmlContent,
adverbHtmlContent, pronounHtmlContent, propernounHtmlContent, phrasalVerbHtmlContent, conjunctionHtmlContent,
interjectionHtmlContent, prepositionHtmlContent, prefixHtmlContent, suffixHtmlContent, abbrHtmlContent, auxiliaryVerbHtmlContent,
idiomsHtmlContent)
return htmlContent
def searchMeaning(self, word):
results = self.nighandu.search_word(word)
return results
@pyqtSlot()
def zoomIn(self):
font = self.wordViewerLabel.font()
fontSize = font.pixelSize()
font.setPixelSize(fontSize+3)
self.wordViewerLabel.setFont(font)
@pyqtSlot()
def zoomOut(self):
font = self.wordViewerLabel.font()
fontSize = font.pixelSize()
font.setPixelSize(fontSize-3)
self.wordViewerLabel.setFont(font)
@pyqtSlot()
def about(self):
content = """
<center>
<h2> Nighandu </h2>
<p>
Nighandu is an free opensoure english malayalam dictionary software. <br/>
This is based on <a href="https://olam.in/open/enml/">Olam English-Malayalam dictionary dataset</a>
<br/>
<br/>
<br/>
Project: https://github.com/Vivx701/Nighandu
<br/>
Developer: Vivek.P (https://github.com/Vivx701)
<br/>
</p>
</center>
"""
contentLayout = QHBoxLayout()
contentLabel = QLabel(self)
contentLabel.setText(content)
contentLayout.addWidget(contentLabel)
contentLayout.addStretch()
dialog = QDialog(self)
dialog.window().setWindowTitle("About")
dialog.setLayout(contentLayout)
dialog.exec()
if __name__ == "__main__":
app = QApplication(sys.argv)
nighanduGui = NighanduGui()
nighanduGui.show()
sys.exit(app.exec_()) | Vivx701/Nighandu | nighandu_gui.py | nighandu_gui.py | py | 15,836 | python | en | code | 1 | github-code | 36 |
21271931699 | from pulp import *
def solve_sudoku(input_form):
# A list for indexing
indices_seq = ["1", "2", "3", "4", "5", "6", "7", "8", "9"]
values = indices_seq
rows = indices_seq
columns = indices_seq
squares_list = []
for i in range(3):
for j in range(3):
squares_list += [[(rows[3*i+k],columns[3*j+l]) for k in range(3) for l in range(3)]]
model = LpProblem("Sudoku Problem",LpMinimize)
choices = LpVariable.dicts("Choice",(values,rows,columns),0,1,LpInteger)
# The constraints are created here
for r in rows:
for c in columns:
model += lpSum([choices[v][r][c] for v in values]) == 1, ""
for v in values:
for r in rows:
model += lpSum([choices[v][r][c] for c in columns]) == 1,""
for c in columns:
model += lpSum([choices[v][r][c] for r in rows]) == 1,""
for b in squares_list:
model += lpSum([choices[v][r][c] for (r,c) in b]) == 1,""
for i in rows:
for j in columns:
cell = "cell_" + i + j
if input_form[cell] != "":
val = input_form[cell]
model += choices[val][i][j] == 1,""
# just for analysis, we write out the model into an .lp-file
model.writeLP("sudoku_model.lp")
# we are going to write the result to sudokuout.txt and into a html-string (res) for the web app
sudokuout = open('sudokuout.txt','w')
# note: we terminate after the first feasible solution is found!
while True:
model.solve()
res = ""
res_array = [["" for i in range(9)] for j in range(9)]
# The status of the solution is printed to the screen
print("Status:", LpStatus[model.status])
if LpStatus[model.status] == "Optimal":
for r in rows:
if r == "1" or r == "4" or r == "7":
sudokuout.write("+-------+-------+-------+\n")
res += "<b>+-------+-------+-------+</b><br>"
for c in columns:
for v in values:
if value(choices[v][r][c]) == 1:
res_array[int(r)-1][int(c)-1] = v
if c == "1" or c == "4" or c =="7":
sudokuout.write("<b>| </b>")
res += "<b>| </b>"
sudokuout.write(v + " ")
res += v + " "
if c == "9":
sudokuout.write("|\n")
res += "<b>|</b><br>"
sudokuout.write("+-------+-------+-------+\n\n")
res += "<b>+-------+-------+-------+</b><br><br>"
# The constraint is added that the same solution cannot be returned again
model += lpSum([choices[v][r][c] for v in values
for r in rows
for c in columns
if value(choices[v][r][c])==1]) <= 80
break
else:
break
sudokuout.close()
print(res)
return res_array
| nrebel/sudoku-web-app | sudoku.py | sudoku.py | py | 3,199 | python | en | code | 0 | github-code | 36 |
25161970451 | import json
import logging
import requests
from dacite import from_dict
from typing import Any
from adyen_gift_card.api.adyen_notifications.request import NotificationRequestItem
from adyen_gift_card.infrastructure.newstore_client.client_response import NewStoreError
from newstore_common.json.multi_encoder import MultiToValueEncoder
LOGGER = logging.getLogger()
class NewStoreClient:
def __init__(self, tenant: str, stage: str, provider_name: str):
self.tenant = tenant
self.stage = stage
self.provider_name = provider_name
def send_notification(self, action: str, notification_item: NotificationRequestItem, json_data: Any) -> NewStoreError:
idempotency_key = notification_item.merchant_reference
instrument_id = notification_item.original_reference
url = f'https://{self.tenant}.{self.stage}.newstore.net/v0/d/payment_providers/{action}/' \
f'{self.provider_name}/{idempotency_key}/{instrument_id}'
json_data = json.loads(json.dumps(json_data, cls=MultiToValueEncoder))
LOGGER.info(f'POST: {url} -- {json_data}')
resp = requests.post(url=url, json=json_data)
LOGGER.info(f'http response: {resp.text}')
error = None
if resp.status_code != 200:
error = from_dict(data_class=NewStoreError, data=resp.json())
return error
| NewStore/int-cinori | integrations/adyen_gift_card/adyen_gift_card/infrastructure/newstore_client/client.py | client.py | py | 1,368 | python | en | code | 0 | github-code | 36 |
27115300498 | from django.shortcuts import render, redirect
from application.models import *
# Create your views here.
def index(request):
context= {
'Users': User.objects.all()
}
return render(request, 'index.html', context)
def submit_user(request):
User.objects.create(
first_name=request.POST['fname'],
last_name=request.POST['lname'],
age=request.POST['age'],
email=request.POST['email'],
)
return redirect('/') | beattietrey/Coding-Dojo | python_stack/django/django_fullstack/assignments/users_with_templates/application/views.py | views.py | py | 468 | python | en | code | 0 | github-code | 36 |
74114165863 | import frappe
import os
import json
import sys
# bench execute mfi_customization.mfi.patch.migrate_patch.get_custom_role_permission
def get_custom_role_permission(site=None):
if sys.argv[2]=='--site':
os.system("bench --site {0} export-fixtures".format(sys.argv[3]))
else:
os.system("bench export-fixtures")
# bench execute mfi_customization.mfi.patch.migrate_patch.set_custom_role_permission
def set_custom_role_permission():
with open(frappe.get_app_path("mfi_customization","fixtures","custom_docperm.json")) as f:
for d in json.load(f):
if len(frappe.get_all('Custom DocPerm',{'parent':d.get('parent'),'role':d.get('role')}))==0:
role=frappe.new_doc('Custom DocPerm')
for k in d.keys():
role.set(k,d.get(k))
role.save()
| Bizmap-Technologies-Pvt-Ltd/mfi_customization- | mfi_customization/mfi/patch/migrate_patch.py | migrate_patch.py | py | 848 | python | en | code | 0 | github-code | 36 |
36384166089 | """
Author: Kevin Owens
Date: 12 May 2014
Class: LongCalc
Problem description summary (from TopCoder Tournament Inv 2001 Semi C+D 1000): Do big-int math with two integer
operands and a an operator identifier for add, subtract, multiply, and integer divide. Operands are given as strings;
operator is given as a numeric id 1:+, 2:-, 3:*, 4://.
Python makes this trivial. Perhaps this 1000-point problem is geared toward other languages that don't natively
support arbitrarily large numbers?
"""
class LongCalc:
def process(self, a_str, b_str, op):
a = int(a_str)
b = int(b_str)
result = '#ERROR'
if op == 1: # addition
result = str(a + b)
elif op == 2: # subtraction
result = str(a - b)
elif op == 3: # multiplication
result = str(a * b)
elif op == 4: # integer division
result = str(a // b)
return result
if __name__ == '__main__':
lc = LongCalc()
print(lc.process("100", "50", 1)) # "150"
print(lc.process("100000000000000000000000000000000", "400000000000000000000000000000000", 1))
# 500000000000000000000000000000000
print(lc.process("3", "4", 2)) # "-1"
print(lc.process("29", "465", 3)) # "13485"
print(lc.process("15", "2", 4)) # "7"
| knaught/TopCoder | LongCalc.py | LongCalc.py | py | 1,318 | python | en | code | 0 | github-code | 36 |
417596476 | from socket import*
import socket
import sys
try:
sock=socket.socket(family=AF_INET,type=SOCK_STREAM)
except socket.error as err:
print("Failed to create a socket")
print("Reason: %s" %str(err))
sys.exit()
print("Socekt created")
target_host=input("Enter the target_host name to connect: ")
target_port=input("Enter the target port: ")
try:
sock.connect((target_host,int(target_port)))
print("socket connected to: %s" %(target_host+target_port))
sock.shutdown(2)
except socket.error as err:
print("Failed to connect: %s" %(target_host+target_port))
print("Reason %s"%str(err))
sys.exit()
| Rakibuz/Robotics_HCI | Python Socket Programming/Pro_Knw_tcpsockets.py | Pro_Knw_tcpsockets.py | py | 633 | python | en | code | 0 | github-code | 36 |
73857321062 | import numpy as np
from munch import DefaultMunch
from sklearn.model_selection import train_test_split
from tests import config_params, compas_dataset_class, compas_without_sensitive_attrs_dataset_class
from virny.utils.common_helpers import validate_config, confusion_matrix_metrics
def test_validate_config_true1(config_params):
actual = validate_config(config_params)
assert actual == True
def test_validate_config_true2():
config_dct = {
"dataset_name": 'COMPAS',
"bootstrap_fraction": 0.8,
"n_estimators": 100,
"sensitive_attributes_dct": {'sex': 0, 'race': 'Caucasian'},
}
config = DefaultMunch.fromDict(config_dct)
actual = validate_config(config)
assert actual == True
def test_validate_config_false1():
config_dct = {
"dataset_name": 'COMPAS',
"bootstrap_fraction": 0.8,
"n_estimators": 100,
"sensitive_attributes_dct": {'sex': 0, 'race': 'Caucasian', 'sex&race&age': None},
}
config = DefaultMunch.fromDict(config_dct)
try:
actual = validate_config(config)
except ValueError:
actual = False
assert actual == False
def test_validate_config_false2():
config_dct = {
"dataset_name": 'COMPAS',
"bootstrap_fraction": 1.8,
"n_estimators": 100,
"sensitive_attributes_dct": {'sex': 0, 'race': 'Caucasian'},
}
config = DefaultMunch.fromDict(config_dct)
try:
actual = validate_config(config)
except ValueError:
actual = False
assert actual == False
def test_validate_config_false3():
config_dct = {
"dataset_name": 'COMPAS',
"bootstrap_fraction": 1.8,
"n_estimators": 100,
"sensitive_attributes_dct": {'sex': 0, 'sex&race': None},
}
config = DefaultMunch.fromDict(config_dct)
try:
actual = validate_config(config)
except ValueError:
actual = False
assert actual == False
def test_confusion_matrix_metrics():
y_true = np.array([0, 1, 0, 1, 0, 1, 0, 1, 0, 1])
y_preds = np.array([0, 1, 0, 1, 0, 1, 0, 1, 0, 1])
actual_metrics = confusion_matrix_metrics(y_true, y_preds)
required_fields = ['TPR', 'TNR', 'PPV', 'FNR', 'FPR', 'Accuracy', 'F1', 'Selection-Rate', 'Positive-Rate']
for field in required_fields:
assert field in actual_metrics.keys()
| DataResponsibly/Virny | tests/utils/test_common_helpers.py | test_common_helpers.py | py | 2,369 | python | en | code | 7 | github-code | 36 |
27698172299 | # -*- coding: utf-8 -*-#
'''
# Name: NormalizePredicateData
# Description: 将测试数据也进行归一化操作
# Author: super
# Date: 2020/5/13
'''
import numpy as np
from HelperClass.NeuralNet_1_1 import *
file_name = "../data/ch05.npz"
if __name__ == '__main__':
# data
reader = DataReader_1_1(file_name)
reader.ReadData()
reader.NormalizeX()
# net
hp = HyperParameters_1_0(2, 1, eta=0.01, max_epoch=100, batch_size=10, eps = 1e-5)
net = NeuralNet_1_1(hp)
net.train(reader, checkpoint=0.1)
# inference
x1 = 15
x2 = 93
x = np.array([x1,x2]).reshape(1,2)
x_new = reader.NormalizePredicateData(x)
z = net.inference(x_new)
print("Z=", z) | Knowledge-Precipitation-Tribe/Neural-network | code/MultiVariableLinearRegression/NormalizePredicateData.py | NormalizePredicateData.py | py | 727 | python | en | code | 3 | github-code | 36 |
100754923 | from linkedin import (LinkedInAuthentication, LinkedInApplication,
PERMISSIONS)
if __name__ == '__main__':
API_KEY = '77se22zag9iejz'
API_SECRET = 'kBpqQgsjTrWXu4wB'
RETURN_URL = 'http://68.183.125.29:5000'
authentication = LinkedInAuthentication(API_KEY, API_SECRET, RETURN_URL,
PERMISSIONS.enums.values())
print (authentication.authorization_url)
application = LinkedInApplication(authentication)
| fernando-carvalho/digital_info | teste2.py | teste2.py | py | 495 | python | en | code | 0 | github-code | 36 |
35398028388 | from __future__ import (nested_scopes, generators, division, absolute_import, with_statement,
print_function, unicode_literals)
from contextlib import contextmanager
import os
import pytest
from textwrap import dedent
from pants.base.address import SyntheticAddress, BuildFileAddress
from pants.base.address_lookup_error import AddressLookupError
from pants.base.build_configuration import BuildConfiguration
from pants.base.build_file import BuildFile
from pants.base.build_file_parser import BuildFileParser
from pants.base.build_graph import BuildGraph
from pants.base.build_root import BuildRoot
from pants.base.target import Target
from pants.util.contextutil import pushd, temporary_dir
from pants.util.dirutil import touch
from pants_test.base_test import BaseTest
# TODO(Eric Ayers) There are many untested methods in BuildGraph left to be tested.
class BuildGraphTest(BaseTest):
@contextmanager
def workspace(self, *buildfiles):
with temporary_dir() as root_dir:
with BuildRoot().temporary(root_dir):
with pushd(root_dir):
for buildfile in buildfiles:
touch(os.path.join(root_dir, buildfile))
yield os.path.realpath(root_dir)
# TODO(Eric Ayers) This test broke during a refactoring and should be moved, removed or updated
@pytest.mark.xfail
def test_transitive_closure_spec(self):
with self.workspace('./BUILD', 'a/BUILD', 'a/b/BUILD') as root_dir:
with open(os.path.join(root_dir, './BUILD'), 'w') as build:
build.write(dedent('''
fake(name="foo",
dependencies=[
'a',
])
'''))
with open(os.path.join(root_dir, 'a/BUILD'), 'w') as build:
build.write(dedent('''
fake(name="a",
dependencies=[
'a/b:bat',
])
'''))
with open(os.path.join(root_dir, 'a/b/BUILD'), 'w') as build:
build.write(dedent('''
fake(name="bat")
'''))
build_configuration = BuildConfiguration()
build_configuration.register_target_alias('fake', Target)
parser = BuildFileParser(build_configuration, root_dir=root_dir)
build_graph = BuildGraph(self.address_mapper)
parser.inject_spec_closure_into_build_graph(':foo', build_graph)
self.assertEqual(len(build_graph.dependencies_of(SyntheticAddress.parse(':foo'))), 1)
# TODO(Eric Ayers) This test broke during a refactoring and should be moved, removed or updated
@pytest.mark.xfail
def test_target_invalid(self):
self.add_to_build_file('a/BUILD', 'target(name="a")')
with pytest.raises(BuildFileParser.InvalidTargetException):
self.build_graph.inject_spec_closure('a:nope')
self.add_to_build_file('b/BUILD', 'target(name="a")')
with pytest.raises(BuildFileParser.InvalidTargetException):
self.build_graph.inject_spec_closure('b')
with pytest.raises(BuildFileParser.InvalidTargetException):
self.build_graph.inject_spec_closure('b:b')
with pytest.raises(BuildFileParser.InvalidTargetException):
self.build_graph.inject_spec_closure('b:')
# TODO(Eric Ayers) This test broke during a refactoring and should be moved removed or updated
@pytest.mark.xfail
def test_transitive_closure_address(self):
with self.workspace('./BUILD', 'a/BUILD', 'a/b/BUILD') as root_dir:
with open(os.path.join(root_dir, './BUILD'), 'w') as build:
build.write(dedent('''
fake(name="foo",
dependencies=[
'a',
])
'''))
with open(os.path.join(root_dir, 'a/BUILD'), 'w') as build:
build.write(dedent('''
fake(name="a",
dependencies=[
'a/b:bat',
])
'''))
with open(os.path.join(root_dir, 'a/b/BUILD'), 'w') as build:
build.write(dedent('''
fake(name="bat")
'''))
def fake_target(*args, **kwargs):
assert False, "This fake target should never be called in this test!"
alias_map = {'target_aliases': {'fake': fake_target}}
self.build_file_parser.register_alias_groups(alias_map=alias_map)
bf_address = BuildFileAddress(BuildFile(root_dir, 'BUILD'), 'foo')
self.build_file_parser._populate_target_proxy_transitive_closure_for_address(bf_address)
self.assertEqual(len(self.build_file_parser._target_proxy_by_address), 3)
# TODO(Eric Ayers) This test broke during a refactoring and should be moved, removed or updated
@pytest.mark.xfail
def test_no_targets(self):
self.add_to_build_file('empty/BUILD', 'pass')
with pytest.raises(BuildFileParser.EmptyBuildFileException):
self.build_file_parser.inject_spec_closure_into_build_graph('empty', self.build_graph)
with pytest.raises(BuildFileParser.EmptyBuildFileException):
self.build_file_parser.inject_spec_closure_into_build_graph('empty:foo', self.build_graph)
def test_contains_address(self):
a = SyntheticAddress.parse('a')
self.assertFalse(self.build_graph.contains_address(a))
target = Target(name='a',
address=a,
build_graph=self.build_graph)
self.build_graph.inject_target(target)
self.assertTrue(self.build_graph.contains_address(a))
def test_get_target_from_spec(self):
a = self.make_target('foo:a')
result = self.build_graph.get_target_from_spec('foo:a')
self.assertEquals(a, result)
b = self.make_target('foo:b')
result = self.build_graph.get_target_from_spec(':b', relative_to='foo')
self.assertEquals(b, result)
def test_walk_graph(self):
"""
Make sure that BuildGraph.walk_transitive_dependency_graph() and
BuildGraph.walk_transitive_dependee_graph() return DFS preorder (or postorder) traversal.
"""
def assertDependencyWalk(target, results, postorder=False):
targets = []
self.build_graph.walk_transitive_dependency_graph([target.address],
lambda x: targets.append(x),
postorder=postorder)
self.assertEquals(results, targets)
def assertDependeeWalk(target, results, postorder=False):
targets = []
self.build_graph.walk_transitive_dependee_graph([target.address],
lambda x: targets.append(x),
postorder=postorder)
self.assertEquals(results, targets)
a = self.make_target('a')
b = self.make_target('b', dependencies=[a])
c = self.make_target('c', dependencies=[b])
d = self.make_target('d', dependencies=[c, a])
e = self.make_target('e', dependencies=[d])
assertDependencyWalk(a, [a])
assertDependencyWalk(b, [b, a])
assertDependencyWalk(c, [c, b, a])
assertDependencyWalk(d, [d, c, b, a])
assertDependencyWalk(e, [e, d, c, b, a])
assertDependeeWalk(a, [a, b, c, d, e])
assertDependeeWalk(b, [b, c, d, e])
assertDependeeWalk(c, [c, d, e])
assertDependeeWalk(d, [d, e])
assertDependeeWalk(e, [e])
assertDependencyWalk(a, [a], postorder=True)
assertDependencyWalk(b, [a, b], postorder=True)
assertDependencyWalk(c, [a, b, c], postorder=True)
assertDependencyWalk(d, [a, b, c, d], postorder=True)
assertDependencyWalk(e, [a, b, c, d, e], postorder=True)
assertDependeeWalk(a, [e, d, c, b, a], postorder=True)
assertDependeeWalk(b, [e, d, c, b], postorder=True)
assertDependeeWalk(c, [e, d, c], postorder=True)
assertDependeeWalk(d, [e, d], postorder=True)
assertDependeeWalk(e, [e], postorder=True)
#Try a case where postorder traversal is not identical to reversed preorder traversal
c = self.make_target('c1', dependencies=[])
d = self.make_target('d1', dependencies=[c])
b = self.make_target('b1', dependencies=[c, d])
e = self.make_target('e1', dependencies=[b])
a = self.make_target('a1', dependencies=[b, e])
assertDependencyWalk(a, [a, b, c, d, e])
assertDependencyWalk(a, [c, d, b, e, a], postorder=True)
def test_target_closure(self):
a = self.make_target('a')
self.assertEquals([a], a.closure())
b = self.make_target('b', dependencies=[a])
self.assertEquals([b, a], b.closure())
c = self.make_target('c', dependencies=[b])
self.assertEquals([c, b, a], c.closure())
d = self.make_target('d', dependencies=[a, c])
self.assertEquals([d, a, c, b], d.closure())
def test_target_walk(self):
def assertWalk(expected, target):
results = []
target.walk(lambda x: results.append(x))
self.assertEquals(expected, results)
a = self.make_target('a')
assertWalk([a], a)
b = self.make_target('b', dependencies=[a])
assertWalk([b, a], b)
c = self.make_target('c', dependencies=[b])
assertWalk([c, b, a], c)
d = self.make_target('d', dependencies=[a, c])
assertWalk([d, a, c, b], d)
def test_lookup_exception(self):
"""
There is code that depends on the fact that TransitiveLookupError is a subclass
of AddressLookupError
"""
self.assertIsInstance(BuildGraph.TransitiveLookupError(), AddressLookupError)
def test_invalid_address(self):
with self.assertRaisesRegexp(AddressLookupError,
'^BUILD file does not exist at:.*/BUILD'):
self.build_graph.inject_spec_closure('//:a')
self.add_to_build_file('BUILD',
'target(name="a", '
' dependencies=["non-existent-path:b"],'
')')
with self.assertRaisesRegexp(BuildGraph.TransitiveLookupError,
'^BUILD file does not exist at:.*/non-existent-path/BUILD'
'\s+when translating spec non-existent-path:b'
'\s+referenced from :a$'):
self.build_graph.inject_spec_closure('//:a')
def test_invalid_address_two_hops(self):
self.add_to_build_file('BUILD',
'target(name="a", '
' dependencies=["goodpath:b"],'
')')
self.add_to_build_file('goodpath/BUILD',
'target(name="b", '
' dependencies=["non-existent-path:c"],'
')')
with self.assertRaisesRegexp(BuildGraph.TransitiveLookupError,
'^BUILD file does not exist at: .*/non-existent-path/BUILD'
'\s+when translating spec non-existent-path:c'
'\s+referenced from goodpath:b'
'\s+referenced from :a$'):
self.build_graph.inject_spec_closure('//:a')
def test_invalid_address_two_hops_same_file(self):
self.add_to_build_file('BUILD',
'target(name="a", '
' dependencies=["goodpath:b"],'
')')
self.add_to_build_file('goodpath/BUILD',
'target(name="b", '
' dependencies=[":c"],'
')\n'
'target(name="c", '
' dependencies=["non-existent-path:d"],'
')')
with self.assertRaisesRegexp(BuildGraph.TransitiveLookupError,
'^BUILD file does not exist at:.*/non-existent-path/BUILD'
'\s+when translating spec non-existent-path:d'
'\s+referenced from goodpath:c'
'\s+referenced from goodpath:b'
'\s+referenced from :a$'):
self.build_graph.inject_spec_closure('//:a')
def test_raise_on_duplicate_dependencies(self):
self.add_to_build_file('BUILD',
'target(name="a", '
' dependencies=['
' "other:b",'
' "//other:b",' # we should perform the test on normalized addresses
'])')
self.add_to_build_file('other/BUILD',
'target(name="b")')
with self.assertRaisesRegexp(
BuildGraph.TransitiveLookupError,
'^Addresses in dependencies must be unique. \'other:b\' is referenced more than once.'
'\s+referenced from :a$'):
self.build_graph.inject_spec_closure('//:a')
def test_inject_then_inject_closure(self):
self.add_to_build_file('BUILD',
'target(name="a", '
' dependencies=['
' "other:b",'
'])')
self.add_to_build_file('other/BUILD',
'target(name="b")')
self.build_graph.inject_address(SyntheticAddress.parse('//:a'))
self.build_graph.inject_address_closure(SyntheticAddress.parse('//:a'))
a = self.build_graph.get_target_from_spec('//:a')
b = self.build_graph.get_target_from_spec('//other:b')
self.assertIn(b, a.dependencies)
| fakeNetflix/square-repo-pants | tests/python/pants_test/graph/test_build_graph.py | test_build_graph.py | py | 13,188 | python | en | code | 0 | github-code | 36 |
19056751666 | from model.Player import Player
from model.PropertySquare import PropertySquare
from model.Square import Square
class SquareView:
def __init__(self):
return
def render(self, square: Square):
if(type(square) is PropertySquare):
owner_obj: Player = square.get_owner()
owner_name = 'None' if owner_obj == None else owner_obj.get_name()
result = '{} {}) name: {}, owner: {}, price: {}, rents: {}'.format(
square.get_token(),
square.to_string(),
square.get_name(),
owner_name,
square.get_price(),
square.get_rents()
)
else:
result = '{} {})'.format(
square.get_token(),
square.to_string(),
)
print(result)
return
def render_you_are_here(self, square:PropertySquare):
result = '{}. {}) <---- You are here'.format(
square.get_token(),
square.to_string()
)
print(result)
return | louisZYC/monopoly | view/SquareView.py | SquareView.py | py | 1,081 | python | en | code | 1 | github-code | 36 |
25719962431 | import nmap
import main
import xlsxwriter
nmScan = nmap.PortScanner()
def scan_ip(host):
nombre = main.checkoutput()
if nombre == "print":
print('Host : %s (%s)' % (host, nmScan[host].hostname()))
print('State : %s' % nmScan[host].state())
for proto in nmScan[host].all_protocols():
print('----------')
print('Protocol : %s' % proto)
lport = nmScan[host][proto].keys()
lport.sort()
for port in lport:
print ('port : %s\tstate : %s' % (port, nmScan[host][proto][port]['state']))
elif nombre.endswith(".xlsx"):
workbook = xlsxwriter.Workbook(nombre)
for proto in nmScan[host].all_protocols():
fila = 2
worksheet = workbook.add_worksheet(proto)
worksheet.write(1, 1, "Anfitrion")
worksheet.write(1, 2, "Protocolo")
worksheet.write(1, 3, "Puerto")
worksheet.write(1, 4, "Estado")
worksheet.write(2, 1, nmScan[host].hostname())
worksheet.write(2, 2, proto)
lport = nmScan[host][proto].keys()
lport.sort()
for port in lport:
worksheet.write(fila, 3, port)
worksheet.write(fila, 4, nmScan[host][proto][port]['state'])
fila += 1 | mepiadmw/PIA-Ciberseguridad | scan_ip.py | scan_ip.py | py | 1,175 | python | en | code | 0 | github-code | 36 |
30326229759 | import pandas as pd
import numpy as np
from statsmodels.stats.outliers_influence import variance_inflation_factor
def forward_delete_corr(data):
# 计算相关系数矩阵
corr = data.corr().abs()
# 选取相关系数矩阵的上三角部分
upper = corr.where(np.triu(np.ones(corr.shape), k=1).astype(bool))
# 找出相关系数大于0.7的变量并添加到待删除列表中
to_delete = [column for column in upper.columns if any(upper[column] > 0.7)]
print("相关性删除列: ", to_delete)
return to_delete
def get_low_vif_cols(data, save_path):
to_delete = []
# 循环剔除VIF值大于10的变量,直至所有变量的VIF值均小于10
while True:
vif = pd.DataFrame()
vif["variables"] = data.columns
vif["VIF"] = [variance_inflation_factor(data.values, i) for i in range(data.shape[1])]
vif.to_csv(save_path)
if vif["VIF"].max() > 10:
# 找出VIF值最大的变量并删除
col_to_drop = vif.loc[vif["VIF"].idxmax(), "variables"]
to_delete.append(col_to_drop)
data = data.drop(col_to_drop, axis=1)
else:
break
print("多重共线性删除列: ", to_delete)
return to_delete
def get_low_var_cols(data):
var = data.var()
to_delete = var[var < 1].index.tolist()
print("方差删除列: ", to_delete)
return to_delete
def get_single_enum_cols(data):
to_delete = []
for col in data.columns:
if len(data[col].value_counts()) > 1:
value_counts = data[col].value_counts(normalize=True)
if (value_counts >= 0.9).sum() > 0:
to_delete.append(col)
print("枚举值删除列: ", to_delete)
return to_delete
| Whale-lyi/simple-predict | filter.py | filter.py | py | 1,753 | python | en | code | 0 | github-code | 36 |
5940263315 | from functools import reduce
import math
import numpy as np
import torch
from torch import nn
from tqdm import tqdm
import torch.nn.functional as F
from model.layers import *
from model.losses import *
class GraphRecommender(nn.Module):
def __init__(self, opt, num_node, adj, len_session, n_train_sessions):
super(GraphRecommender, self).__init__()
self.opt = opt
self.batch_size = opt.batch_size
self.num_node = num_node
self.len_session = len_session
self.dim = opt.dim
self.item_embedding = nn.Embedding(num_node + 1, self.dim,
padding_idx=0)
self.pos_embedding = nn.Embedding(self.len_session, self.dim)
self.ssl_task = SSLTask(opt)
self.item_conv = GlobalItemConv(layers=opt.layers)
self.w_k = opt.w_k
self.adj = adj
self.dropout = opt.dropout
self.n_sessions = n_train_sessions
self.memory_bank = torch.empty((n_train_sessions, self.dim))
# pos attention
self.w_1 = nn.Parameter(torch.Tensor(2 * self.dim, self.dim))
self.w_2 = nn.Parameter(torch.Tensor(self.dim, 1))
self.glu1 = nn.Linear(self.dim, self.dim)
self.glu2 = nn.Linear(self.dim, self.dim, bias=False)
self.reset_parameters()
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.dim)
for weight in self.parameters():
weight.data.uniform_(-stdv, stdv)
def compute_sess_emb(self, item_seq, hidden, rev_pos=True, attn=True):
batch_size = hidden.shape[0]
len = hidden.shape[1]
mask = torch.unsqueeze((item_seq != 0), -1)
hs = torch.sum(hidden * mask, -2) / torch.sum(mask, 1)
hs = hs.unsqueeze(-2).repeat(1, len, 1)
nh = hidden
if rev_pos:
pos_emb = self.pos_embedding.weight[:len]
pos_emb = torch.flip(pos_emb, [0]) # reverse order
pos_emb = pos_emb.unsqueeze(0).repeat(batch_size, 1, 1)
nh = torch.matmul(torch.cat([pos_emb, hidden], -1), self.w_1)
nh = torch.tanh(nh)
nh = torch.sigmoid(self.glu1(nh) + self.glu2(hs))
if attn:
beta = torch.matmul(nh, self.w_2)
beta = beta * mask
sess_emb = torch.sum(beta * hidden, 1)
else:
sess_emb = torch.sum(nh * hidden, 1)
return sess_emb
def compute_con_loss(self, batch, sess_emb, item_embs):
mask = torch.unsqueeze((batch['inputs'] != 0), -1)
last_item_pos = torch.sum(mask, dim=1) - 1
last_items = torch.gather(batch['inputs'], dim=1, index=last_item_pos).squeeze()
last_items_emb = item_embs[last_items]
pos_last_items_emb = item_embs[batch['pos_last_items']]
neg_last_items_emb = item_embs[batch['neg_last_items']]
pos_target_item_emb = item_embs[batch['targets']]
neg_targets_item_emb = item_embs[batch['neg_targets']]
con_loss = self.ssl_task(sess_emb, last_items_emb, pos_last_items_emb, neg_last_items_emb,
pos_target_item_emb, neg_targets_item_emb)
return con_loss
def forward(self, batch, cl=False):
items, inputs, alias_inputs = batch['items'], batch['inputs'], batch['alias_inputs']
graph_item_embs = self.item_conv(self.item_embedding.weight, self.adj)
hidden = graph_item_embs[items]
# dropout
hidden = F.dropout(hidden, self.dropout, training=self.training)
alias_inputs = alias_inputs.view(-1, alias_inputs.size(1), 1).expand(-1, -1, self.dim)
seq_hidden = torch.gather(hidden, dim=1, index=alias_inputs)
# reverse position attention
sess_emb = self.compute_sess_emb(inputs, seq_hidden, rev_pos=True, attn=True)
# weighted L2 normalization: NISER, DSAN, STAN, COTREC
select = self.w_k * F.normalize(sess_emb, dim=-1, p=2)
graph_item_embs_norm = F.normalize(graph_item_embs, dim=-1, p=2)
scores = torch.matmul(select, graph_item_embs_norm.transpose(1, 0))
con_loss = torch.Tensor(0)
if cl:
con_loss = self.compute_con_loss(batch, select, graph_item_embs_norm)
return scores, con_loss
| dbis-uibk/SPARE | model/recommender.py | recommender.py | py | 4,257 | python | en | code | 3 | github-code | 36 |
29725189606 | import pandas as pd
import numpy as np
def iat_get_dscore_each_stim(df,subject,rt,block,condition,stimulus,cond1,cond2,blocks,weighted):
'''
Take all relevant columns and produce a D score for each stimulus (i.e. word).
08-2017
Alexander Millner <alexmillner@gmail.com
'''
idx=pd.IndexSlice
df=df[(df[condition]==cond1)|(df[condition]==cond2)]
if weighted==True:
blocks=sorted(blocks)
blcnd_rt=df.groupby([subject,stimulus,condition,block])[rt].mean()
#Get mean RT for each block of each condition
cond1rt_bl1=blcnd_rt.loc[idx[:,:,cond1,[blocks[0],blocks[2]]]]
cond1rt_bl2=blcnd_rt.loc[idx[:,:,cond1,[blocks[1],blocks[3]]]]
cond2rt_bl1=blcnd_rt.loc[idx[:,:,cond2,[blocks[0],blocks[2]]]]
cond2rt_bl2=blcnd_rt.loc[idx[:,:,cond2,[blocks[1],blocks[3]]]]
#Drop block and condidition levels to subtract means
cond1rt_bl1.index=cond1rt_bl1.index.droplevel([2,3])
cond1rt_bl2.index=cond1rt_bl2.index.droplevel([2,3])
cond2rt_bl1.index=cond2rt_bl1.index.droplevel([2,3])
cond2rt_bl2.index=cond2rt_bl2.index.droplevel([2,3])
#Get RT standard deviation separately for first and second blocks
b1rt_std=df[(df[block]==blocks[0])|(df[block]==blocks[2])].groupby([subject,stimulus])[rt].std()
b2rt_std=df[(df[block]==blocks[1])|(df[block]==blocks[3])].groupby([subject,stimulus])[rt].std()
#Get D score
d1=(cond1rt_bl1-cond2rt_bl1)/b1rt_std
d2=(cond1rt_bl2-cond2rt_bl2)/b2rt_std
d=(d1+d2)/2
elif weighted==False:
cnds = df.groupby([subject,stimulus,condition])
d = (cnds[rt].mean().unstack()[cond1]-cnds[rt].mean().unstack()[cond2])/df.groupby([subject,stimulus])[rt].std()
return(d)
def iat_get_dscore_across_stim(df,subject,rt,block,condition,cond1,cond2,blocks,weighted):
'''
Take all relevant columns and produce a D score across all stimuli (i.e. words), which is standard.
08-2017
Alexander Millner <alexmillner@gmail.com
'''
idx=pd.IndexSlice
df=df[(df[condition]==cond1)|(df[condition]==cond2)]
if weighted==True:
blocks=sorted(blocks)
blcnd_rt=df.groupby([subject,condition,block])[rt].mean()
#Get mean RT for each block of each condition
cond1rt_bl1=blcnd_rt.loc[idx[:,cond1,[blocks[0],blocks[2]]]]
cond1rt_bl2=blcnd_rt.loc[idx[:,cond1,[blocks[1],blocks[3]]]]
cond2rt_bl1=blcnd_rt.loc[idx[:,cond2,[blocks[0],blocks[2]]]]
cond2rt_bl2=blcnd_rt.loc[idx[:,cond2,[blocks[1],blocks[3]]]]
#Drop block and condidition levels to subtract means
for df_tmp in [cond1rt_bl1,cond1rt_bl2,cond2rt_bl1,cond2rt_bl2]:
df_tmp.index=df_tmp.index.droplevel([1,2])
#Get RT standard deviation separately for first and second blocks
b1rt_std=df[(df[block]==blocks[0])|(df[block]==blocks[2])].groupby(subject)[rt].std()
b2rt_std=df[(df[block]==blocks[1])|(df[block]==blocks[3])].groupby(subject)[rt].std()
#Get D score
d1=(cond1rt_bl1-cond2rt_bl1)/b1rt_std
d2=(cond1rt_bl2-cond2rt_bl2)/b2rt_std
d=(d1+d2)/2
d=pd.concat([d1,d2,d],axis=1)
d.columns=['dscore1','dscore2','dscore']
return(d)
elif weighted==False:
cnds = df.groupby([subject,condition])
d = (cnds[rt].mean().unstack()[cond1]-cnds[rt].mean().unstack()[cond2])/df.groupby(subject)[rt].std()
d.name='dscore'
return(d)
def biat_get_dscore_each_stim(df,subject,rt,block,condition,stimulus,cond1,cond2,blocks,weighted):
'''
Take all relevant columns and produce a D score for each stimulus (i.e. word).
08-2017
Alexander Millner <alexmillner@gmail.com
'''
idx=pd.IndexSlice
df=df[(df[condition]==cond1)|(df[condition]==cond2)]
if weighted==True:
blocks=sorted(blocks)
blcnd_rt=df.groupby([subject,stimulus,condition,block])[rt].mean()
#Get mean RT for each block of each condition
cond1rt_bl1=blcnd_rt.loc[idx[:,:,cond1,[blocks[0],blocks[1]]]]
cond2rt_bl1=blcnd_rt.loc[idx[:,:,cond2,[blocks[0],blocks[1]]]]
#Drop block and condidition levels to subtract means
cond1rt_bl1.index=cond1rt_bl1.index.droplevel([2,3])
cond2rt_bl1.index=cond2rt_bl1.index.droplevel([2,3])
#Get RT standard deviation separately for first and second blocks
b1rt_std=df[(df[block]==blocks[0])|(df[block]==blocks[1])].groupby([subject,stimulus])[rt].std()
if len(blocks)>=4:
cond1rt_bl2=blcnd_rt.loc[idx[:,:,cond1,[blocks[2],blocks[3]]]]
cond2rt_bl2=blcnd_rt.loc[idx[:,:,cond2,[blocks[2],blocks[3]]]]
#Drop block and condidition levels to subtract means
cond1rt_bl2.index=cond1rt_bl2.index.droplevel([2,3])
cond2rt_bl2.index=cond2rt_bl2.index.droplevel([2,3])
b2rt_std=df[(df[block]==blocks[2])|(df[block]==blocks[3])].groupby([subject,stimulus])[rt].std()
if len(blocks)>=6:
cond1rt_bl3=blcnd_rt.loc[idx[:,:,cond1,[blocks[4],blocks[5]]]]
cond2rt_bl3=blcnd_rt.loc[idx[:,:,cond2,[blocks[4],blocks[5]]]]
#Drop block and condidition levels to subtract means
cond1rt_bl3.index=cond1rt_bl3.index.droplevel([2,3])
cond2rt_bl3.index=cond2rt_bl3.index.droplevel([2,3])
b3rt_std=df[(df[block]==blocks[4])|(df[block]==blocks[5])].groupby([subject,stimulus])[rt].std()
if len(blocks)==2:
d=(cond1rt_bl1-cond2rt_bl1)/b1rt_std
elif len(blocks)==4:
d1=(cond1rt_bl1-cond2rt_bl1)/b1rt_std
d2=(cond1rt_bl2-cond2rt_bl2)/b2rt_std
d=(d1+d2)/2
elif len(blocks)==6:
d1=(cond1rt_bl1-cond2rt_bl1)/b1rt_std
d2=(cond1rt_bl2-cond2rt_bl2)/b2rt_std
d3=(cond1rt_bl3-cond2rt_bl3)/b3rt_std
d=(d1+d2+d3)/2
elif weighted==False:
cnds = df.groupby([subject,stimulus,condition])
d = (cnds[rt].mean().unstack()[cond1]-cnds[rt].mean().unstack()[cond2])/df.groupby([subject,stimulus])[rt].std()
return(d)
def biat_get_dscore_across_stim(df,subject,rt,block,condition,cond1,cond2,blocks,weighted):
'''
Take all relevant columns and produce a D score for each stimulus (i.e. word).
08-2017
Alexander Millner <alexmillner@gmail.com
'''
idx=pd.IndexSlice
df=df[(df[condition]==cond1)|(df[condition]==cond2)]
if weighted==True:
blocks=sorted(blocks)
blcnd_rt=df.groupby([subject,condition,block])[rt].mean()
#Get mean RT for each block of each condition
cond1rt_bl1=blcnd_rt.loc[idx[:,cond1,[blocks[0],blocks[1]]]]
cond2rt_bl1=blcnd_rt.loc[idx[:,cond2,[blocks[0],blocks[1]]]]
#Drop block and condidition levels to subtract means
cond1rt_bl1.index=cond1rt_bl1.index.droplevel([1,2])
cond2rt_bl1.index=cond2rt_bl1.index.droplevel([1,2])
#Get RT standard deviation separately for first and second blocks
b1rt_std=df[(df[block]==blocks[0])|(df[block]==blocks[1])].groupby([subject])[rt].std()
if len(blocks)>=4:
cond1rt_bl2=blcnd_rt.loc[idx[:,cond1,[blocks[2],blocks[3]]]]
cond2rt_bl2=blcnd_rt.loc[idx[:,cond2,[blocks[2],blocks[3]]]]
#Drop block and condidition levels to subtract means
cond1rt_bl2.index=cond1rt_bl2.index.droplevel([1,2])
cond2rt_bl2.index=cond2rt_bl2.index.droplevel([1,2])
b2rt_std=df[(df[block]==blocks[2])|(df[block]==blocks[3])].groupby([subject])[rt].std()
if len(blocks)>=6:
cond1rt_bl3=blcnd_rt.loc[idx[:,cond1,[blocks[4],blocks[5]]]]
cond2rt_bl3=blcnd_rt.loc[idx[:,cond2,[blocks[4],blocks[5]]]]
#Drop block and condidition levels to subtract means
cond1rt_bl3.index=cond1rt_bl3.index.droplevel([1,2])
cond2rt_bl3.index=cond2rt_bl3.index.droplevel([1,2])
b3rt_std=df[(df[block]==blocks[4])|(df[block]==blocks[5])].groupby([subject])[rt].std()
if len(blocks)==2:
d=(cond1rt_bl1-cond2rt_bl1)/b1rt_std
d.name='dscore'
elif len(blocks)==4:
#Get D score
d1=(cond1rt_bl1-cond2rt_bl1)/b1rt_std
d2=(cond1rt_bl2-cond2rt_bl2)/b2rt_std
d=(d1+d2)/2
d=pd.concat([d1,d2,d],axis=1)
d.columns=['dscore1','dscore2','dscore']
elif len(blocks)==6:
#Get D score
d1=(cond1rt_bl1-cond2rt_bl1)/b1rt_std
d2=(cond1rt_bl2-cond2rt_bl2)/b2rt_std
d3=(cond1rt_bl3-cond2rt_bl3)/b3rt_std
d=(d1+d2+d3)/3
d=pd.concat([d1,d2,d3,d],axis=1)
d.columns=['dscore1','dscore2','dscore3','dscore']
return(d)
elif weighted==False:
cnds = df.groupby([subject,stimulus,condition])
d = (cnds[rt].mean().unstack()[cond1]-cnds[rt].mean().unstack()[cond2])/df.groupby(subject)[rt].std()
d.name='dscore'
return(d)
def iat_get_dscore(df,subject,rt,block,condition,cond1,cond2,blocks,weighted,biat,each_stim,stimulus):
'''
Select either iat_get_dscore_across_stim or iat_get_dscore_each_stim, depending on the each_stim argument.
08-2017
Alexander Millner <alexmillner@gmail.com
'''
#Get D scores
if biat==False:
if each_stim==False:
d=iat_get_dscore_across_stim(df,subject,rt,block,condition,cond1,cond2,blocks,weighted)
if weighted == False:
d=d.to_frame()
elif each_stim==True:
d=iat_get_dscore_each_stim(df,subject,rt,block,condition,stimulus,cond1,cond2,blocks,weighted)
d=d.unstack()
elif biat==True:
if each_stim==False:
d=biat_get_dscore_across_stim(df,subject,rt,block,condition,cond1,cond2,blocks,weighted)
if weighted == False:
d=d.to_frame()
elif each_stim==True:
d=biat_get_dscore_each_stim(df,subject,rt,block,condition,stimulus,cond1,cond2,blocks,weighted)
d=d.unstack()
return(d)
def overall_fast_slow_stats(df,rt,fast_rt,slow_rt,subject,flags):
'''
Return the total number of trials removed across all subjects and across those without flags for poor performance.
08-2017
Alexander Millner <alexmillner@gmail.com
'''
#Count all fast and slow trials across all subjects
all_fast_rt_count_all_subs=df[df[rt]<fast_rt][rt].count()
all_slow_rt_count_all_subs=df[df[rt]>=slow_rt][rt].count()
all_fast_rt_pct_all_subs=df[df[rt]<fast_rt][rt].count()/df[rt].count().astype(float)
all_slow_rt_pct_all_subs=df[df[rt]>=slow_rt][rt].count()/df[rt].count().astype(float)
#Now remove subjects with flags and recount
df_no_flag=df[df[subject].isin(flags[flags.iat_flag==0].index)].copy(deep=True)
all_fast_rt_count_incl_subs=df_no_flag[(df_no_flag[rt]<fast_rt)][rt].count()
all_slow_rt_count_incl_subs=df_no_flag[(df_no_flag[rt]>=slow_rt)][rt].count()
all_fast_rt_pct_incl_subs=df_no_flag[(df_no_flag[rt]<fast_rt)][rt].count()/df_no_flag[rt].count().astype(float)
all_slow_rt_pct_incl_subs=df_no_flag[(df_no_flag[rt]>=slow_rt)][rt].count()/df_no_flag[rt].count().astype(float)
all_fast_slow_rt=pd.DataFrame([all_fast_rt_count_all_subs,all_fast_rt_pct_all_subs,\
all_slow_rt_count_all_subs,all_slow_rt_pct_all_subs,\
all_fast_rt_count_incl_subs,all_fast_rt_pct_incl_subs,\
all_slow_rt_count_incl_subs,all_slow_rt_pct_incl_subs],
index=['fast_rt_count_all_subs','fast_rt_pct_all_subs',\
'slow_rt_count_all_subs','slow_rt_pct_all_subs',\
'fast_rt_count_included_subs','fast_rt_pct_included_subs',\
'slow_rt_count_included_subs','slow_rt_pct_included_subs']\
,columns=['fast_slow_rt'])
return(all_fast_slow_rt)
def blcnd_extract(df,var,subject,condition,block,cond1,cond2,blocks,biat,flag_outformat='pct',include_blocks=True):
'''
Generic groupby function to group by subject depending on condition
and groupby condition and block (or just condition if unweighted) to
extract particular variables (errors, too fast\too slow) by condition and block.
08-2017
Alexander Millner <alexmillner@gmail.com
'''
idx=pd.IndexSlice
if flag_outformat=='pct':
all_df=df.groupby(subject)[var].mean()
##By condition
cond1_df=df[(df[condition]==cond1)].groupby(subject)[var].mean()
cond2_df=df[(df[condition]==cond2)].groupby(subject)[var].mean()
##By condition and block
if include_blocks == True:
blcnd=df.groupby([subject,condition,block])[var].mean()
elif flag_outformat=='sum':
all_df=df.groupby(subject)[var].sum()
##By condition
cond1_df=df[(df[condition]==cond1)].groupby(subject)[var].sum()
cond2_df=df[(df[condition]==cond2)].groupby(subject)[var].sum()
##By condition and block
if include_blocks == True:
blcnd=df.groupby([subject,condition,block])[var].sum()
elif flag_outformat=='count':
all_df=df.groupby(subject)[var].count()
##By condition
cond1_df=df[(df[condition]==cond1)].groupby(subject)[var].count()
cond2_df=df[(df[condition]==cond2)].groupby(subject)[var].count()
##By condition and block
if include_blocks == True:
blcnd=df.groupby([subject,condition,block])[var].count()
if (include_blocks == True) and (biat==False):
cond1_bl1=blcnd.loc[idx[:,cond1,[blocks[0],blocks[2]]]]
cond1_bl2=blcnd.loc[idx[:,cond1,[blocks[1],blocks[3]]]]
cond2_bl1=blcnd.loc[idx[:,cond2,[blocks[0],blocks[2]]]]
cond2_bl2=blcnd.loc[idx[:,cond2,[blocks[1],blocks[3]]]]
#Drop block and condidition levels to subtract means
for df_tmp in [cond1_bl1,cond1_bl2,cond2_bl1,cond2_bl2]:
df_tmp.index=df_tmp.index.droplevel([1,2])
out=pd.concat([all_df,cond1_df,cond2_df,cond1_bl1,cond1_bl2,cond2_bl1,cond2_bl2],axis=1)
elif (include_blocks == True) and (biat==True):
if len(blocks)>=2:
cond1_bl1=blcnd.loc[idx[:,cond1,[blocks[0],blocks[1]]]]
cond2_bl1=blcnd.loc[idx[:,cond2,[blocks[0],blocks[1]]]]
for df_tmp in [cond1_bl1,cond2_bl1]:
df_tmp.index=df_tmp.index.droplevel([1,2])
out=pd.concat([all_df,cond1_df,cond2_df,cond1_bl1,cond2_bl1],axis=1)
if len(blocks)>=4:
cond1_bl2=blcnd.loc[idx[:,cond1,[blocks[2],blocks[3]]]]
cond2_bl2=blcnd.loc[idx[:,cond2,[blocks[2],blocks[3]]]]
for df_tmp in [cond1_bl2,cond2_bl2]:
df_tmp.index=df_tmp.index.droplevel([1,2])
out=pd.concat([out,cond1_bl2,cond2_bl2],axis=1)
if len(blocks)==6:
cond1_bl3=blcnd.loc[idx[:,cond1,[blocks[4],blocks[5]]]]
cond2_bl3=blcnd.loc[idx[:,cond2,[blocks[4],blocks[5]]]]
for df_tmp in [cond1_bl3,cond2_bl3]:
df_tmp.index=df_tmp.index.droplevel([1,2])
out=pd.concat([out,cond1_bl3,cond2_bl3],axis=1)
elif include_blocks == False:
out=pd.concat([all_df,cond1_df,cond2_df],axis=1)
return(out)
def error_fastslow_column_names(cond1,cond2,fast_rt,slow_rt,blocks,weighted):
'''
Provide names for columns that include the condition name as well as the ms entered for too fast\too slow trials.
08-2017
Alexander Millner <alexmillner@gmail.com
'''
if weighted == True:
#All column names for output
col_names=['overall_error_rate','%s_error_rate'%cond1,'%s_error_rate'%cond2]
for bl in range(1,int(len(blocks)/2)+1):
col_names.append('%s_bl%d_error_rate'%(cond1,bl))
col_names.append('%s_bl%s_error_rate'%(cond2,bl))
col_names.extend(['overall_fast_rt_rate_%dms'%(fast_rt),\
'%s_fast_rt_rate_%dms'%(cond1,fast_rt),'%s_fast_rt_rate_%dms'%(cond2,fast_rt)])
for bl in range(1,int(len(blocks)/2)+1):
col_names.append('%s_bl%d_fast_rt_rate_%dms'%(cond1,bl,fast_rt))
col_names.append('%s_bl%d_fast_rt_rate_%dms'%(cond2,bl,fast_rt))
col_names.extend(['overall_slow_rt_rate_%dms'%(slow_rt),\
'%s_slow_rt_rate_%dms'%(cond1,slow_rt),'%s_slow_rt_rate_%dms'%(cond2,slow_rt)])
for bl in range(1,int(len(blocks)/2)+1):
col_names.append('%s_bl%d_slow_rt_rate_%dms'%(cond1,bl,slow_rt))
col_names.append('%s_bl%d_slow_rt_rate_%dms'%(cond2,bl,slow_rt))
col_names.append('num_blocks')
elif weighted == False:
#All column names for output
col_names=['overall_error_rate','%s_error_rate'%cond1,'%s_error_rate'%cond2,\
'overall_fast_rt_rate_%dms'%(fast_rt),\
'%s_fast_rt_rate_%dms'%(cond1,fast_rt),'%s_fast_rt_rate_%dms'%(cond2,fast_rt),\
'overall_slow_rt_rate_%dms'%(slow_rt),\
'%s_slow_rt_rate_%dms'%(cond1,slow_rt),'%s_slow_rt_rate_%dms'%(cond2,slow_rt)]
#Column names for 1\0 output regarding which criteria were flagged (errors, too many fast or slow trials)
flag_col_names= ['%s_flag'%i for i in col_names]
return(col_names,flag_col_names)
def num_trls_column_names(cond1,cond2,fast_rt,slow_rt,blocks,incl_excl_switch,weighted):
'''Column names for number of trials overall, within condition and within block
(with a switch to name both before and after excluding fast\slow trials).
08-2017
Alexander Millner <alexmillner@gmail.com
'''
if weighted == True:
block_num_col_names=['overall_num_trls_%s_fastslow_rt'%(incl_excl_switch),\
'%s_num_trls_%s_fastslow_rt'%(cond1,incl_excl_switch),'%s_num_trls_%s_fastslow_rt'%(cond2,incl_excl_switch)]
for bl in range(1,int(len(blocks)/2)+1):
block_num_col_names.append('%s_bl%d_num_trls_%s_fastslow_rt'%(cond1,bl,incl_excl_switch))
block_num_col_names.append('%s_bl%d_num_trls_%s_fastslow_rt'%(cond2,bl,incl_excl_switch))
elif weighted == False:
block_num_col_names=['overall_num_trls_%s_fastslow_rt'%(incl_excl_switch),\
'%s_num_trls_%s_fastslow_rt'%(cond1,incl_excl_switch),'%s_num_trls_%s_fastslow_rt'%(cond2,incl_excl_switch)]
return(block_num_col_names)
def get_error_fastslow_rates(df,correct,subject,condition,block,cond1,cond2,blocks,flag_outformat,include_blocks,\
rt,fast_rt,slow_rt,error_or_correct,weighted,errors_after_fastslow_rmvd,df_fastslow_rts_rmvd,biat):
'''
Uses blcnd_extract function to get error rates, fast slow rates, etc...
08-2017
Alexander Millner <alexmillner@gmail.com
'''
##Errors
if errors_after_fastslow_rmvd == False:
df_err=df
elif errors_after_fastslow_rmvd == True:
df_err=df_fastslow_rts_rmvd
###Can enter either column where errors are 1 and correct responses are 0 or vice versa
if error_or_correct=='error':
err_vars=blcnd_extract(df_err,correct,subject,condition,block,cond1,cond2,blocks,biat,flag_outformat,include_blocks)
elif error_or_correct=='correct':
err_vars=1-blcnd_extract(df_err,correct,subject,condition,block,cond1,cond2,blocks,biat,flag_outformat,include_blocks)
#Fast RT
df['fast_rt']=(df[rt]<fast_rt)*1
fast_rt_vars=blcnd_extract(df,'fast_rt',subject,condition,block,cond1,cond2,blocks,biat,flag_outformat,include_blocks)
#Slow RT
df['slow_rt']=(df[rt]>=slow_rt)*1
slow_rt_vars=blcnd_extract(df,'slow_rt',subject,condition,block,cond1,cond2,blocks,biat,flag_outformat,include_blocks)
if weighted == True:
## Number of blocks for each subject
num_blocks=df.groupby([subject])[block].unique().apply(lambda x: len(x))
outcms=[err_vars,\
fast_rt_vars,\
slow_rt_vars,\
num_blocks]
elif weighted == False:
outcms=[err_vars,\
fast_rt_vars,\
slow_rt_vars]
return(outcms)
def analyze_iat(df,subject,rt,correct,condition,cond1,cond2,block='block',blocks=[2,3,5,6],weighted=True,\
fast_rt=400,slow_rt=10000,\
overall_err_cut=.3,cond_err_cut=.4,block_err_cut=.4,\
overall_fastslowRT_cut=.10,cond_fastslowRT_cut=.25,block_fastslowRT_cut=.25,\
num_blocks_cutoff=4,\
fastslow_stats=False,biat=False,biat_rmv_xtrls=4,biat_trl_num=False,\
error_or_correct='correct',errors_after_fastslow_rmvd=False,flag_outformat='pct',print_to_excel=False,\
each_stim=False,stimulus=False):
"""Takes a dataframe containing raw IAT (or BIAT) data (all trials, all subjects) and returns
the number of blocks, percentage of errors, reaction times that are too fast and too slow,
flags to remove subjects and D scores for each subject.
Parameters
----------
df : pandas dataframe
Trial x trial IAT data for each subject
subject : str
Column name containing subject number
rt : str
Column name containing reaction time (in ms) for each trial
correct : str
Column name containing whether trial was correct (where correct = 1, error = 0)
(can also use if columns specifies errors; see 'error_or_correct' parameter)
condition : str
Column name containing condition (e.g. Black-Good\White-Bad vs. Black-Bad\White-Good)
cond1 : str
Name of first condition (e.g. 'Black-Good\White-Bad'): bias for this condition will result in negative D score
cond2 : str
Name of second condition (e.g. 'Black-Bad\White-Good'): bias for this condition will result in positive D score
block : str
Column that contains block information
blocks : list
A list containing the numbers corresponding to the relevant blocks, default : [2,3,5,6]
weighted : Boolean
If True return weighted D scores; if False return unweighted D scores, default : True
fast_rt : int
Reaction time (in ms) considered too fast, default: 400
slow_rt : int
Reaction time (in ms) considered too slow, default: 10000
overall_err_cut : float
Cutoff for subject exclusion: overall error rate (decimal), default : .3
cond_err_cut : float
Cutoff for subject exclusion: error rate (decimal) within each condition, default : .4
block_err_cut : float
Cutoff for subject exclusion: error rate (decimal) within a single block, default : .4
overall_fastslowRT_cut=.10
Cutoff for subject exclusion: overall rate of trials with too fast or too slow RT (decimal), default : .1
cond_fastslowRT_cut : float
Cutoff for subject exclusion: rate of trials with too fast or too slow RT (decimal) within each condition, default : .25
block_fastslowRT_cut : float
Cutoff for subject exclusion: rate of trials with too fast or too slow RT (decimal) within each block, default : .25
num_blocks_cutoff : int
Cutoff for subject exclusion: Minimum number of blocks required, default : 4
error_or_correct : str
Enter 'error' to enter a column for 'correct' where error = 1, correct = 0, default: 'correct'
errors_after_fastslow_rmvd : Boolean
If True calculates error rates after removing all fast\slow trials (similar to R package iat); if False error rates calculated with all trials, default : False
fastslow_stats : Boolean
Return a second dataframe containing the number and percentage of fast\slow trials across all subjects
and across subjects with usable data, default : False
biat : Boolean
Enter True if analyzing a Brief Implicit Assoc Test (BIAT), False if regular IAT, default : False
*** One open issue with BIAT flags in pyiat is that currently flags for fast and slow trials use the same cutoff pct.
Recommended scoring procedures (Nosek et al. 2014) recommend a flag for fast trials but not slow.
This is not currently possible in pyiat. However, you can see the pct of slow and fast trials
and create your own flags from this.***
biat_rmv_xtrls : int
Number of trials to remove from beginning of each block. BIAT recommendad scoring procedures (Nosek et al. 2014) remove first 4 trials of each block b/c
they are practice trials but not all BIAT have practice trials, default : 4
biat_trl_num : str
The name of the column that contains trial number, default : False
flag_outformat : str
Can enter 'count' to return number of errors and too fast\slow trials (if fastslow_stats set to True), default : 'pct'
print_to_excel : Boolean
Print an excel workbook that contains output, default : False
each_stim : Boolean
Return D scores for each individual stimulus (i.e. word), default : False
stimulus : Boolean
If each stim = True, then give name of column containing each stimulus (i.e. word), default : False
Returns
-------
pandas DataFrame with
-error rates (overall, each condition, each block (error rates *include* fast\slow trials)),
-rates of fast\slow trials (overall, each condition, each block)
-exclusion flags (overall flag indicating subject should be excluded and for each category informing why subject was flagged)
-D scores (overall and block 1 and block 2 if weighted)
if fastslow_stats = True:
pandas DataFrame with rates of fast\slow trials across all subjects and across only subjects NOT flagged for exclusion
(to report the overall number\pct of trials excluded from a study)
Examples
--------
>>> weighted_d,fastslow_stats_df=iat(it,subject='session_id',rt='latency',
... condition='cond',correct='correct',
... cond1='nosh_me',cond2='sh_me',block='block',
... blocks=[2,3,5,6],fastslow_stats=True,each_stim=False,
... stimulus='trial_name')
Copyright (C) 2017 Alexander Millner <alexmillner@gmail.com>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
idx=pd.IndexSlice
df=df[(df[condition]==cond1)|(df[condition]==cond2)].copy(deep=True)
if df[df[correct]>1].shape[0]!=0 or df[df[correct]<0].shape[0]!=0:
raise ValueError('The \'correct\' column can only contain the values 0 and 1')
#For weighted d scores, we return all block-related stats whereas
#for unweighted we are just comparing conditions and care less about blocks
include_blocks=weighted
#Make column names
col_names,flag_col_names=error_fastslow_column_names(cond1,cond2,fast_rt,slow_rt,blocks,weighted)
block_num_col_names_incl=num_trls_column_names(cond1,cond2,fast_rt,slow_rt,blocks,'incl',weighted)
block_num_col_names_excl=num_trls_column_names(cond1,cond2,fast_rt,slow_rt,blocks,'excl',weighted)
if biat == True:
df_orig=df.copy()
#This finds all unique trials numbers, sorts them and must be greater than the 4th item
df=df[df[biat_trl_num]>=sorted(df[biat_trl_num].unique())[biat_rmv_xtrls]]
df.loc[(df[rt]>2000)&(df[rt]<10000),rt]=2000
df.loc[df[rt]<400,rt]=400
#Make dfs where trials that are too fast or too slow are removed
df_fastslow_rts_rmvd=df[-(df[rt]>=slow_rt)]
if biat == False:
df_fastslow_rts_rmvd=df_fastslow_rts_rmvd[-(df_fastslow_rts_rmvd[rt]<fast_rt)]
#Get error and fast\slow trials
outcms=get_error_fastslow_rates(df,correct,subject,condition,block,cond1,cond2,blocks,flag_outformat,include_blocks,\
rt,fast_rt,slow_rt,error_or_correct,weighted,errors_after_fastslow_rmvd,df_fastslow_rts_rmvd,biat)
#Figure out number of trials after removing fast\slow rt trials
#in each block and total number of fast and slow trials (and remove them)
pre_trl_count_vars=blcnd_extract(df,rt,subject,condition,block,cond1,cond2,blocks,biat,flag_outformat='count',include_blocks=include_blocks)
pre_trl_count_vars.columns=block_num_col_names_incl
post_trl_count_vars=blcnd_extract(df_fastslow_rts_rmvd,rt,subject,condition,block,cond1,cond2,blocks,biat,flag_outformat='count',include_blocks=include_blocks)
post_trl_count_vars.columns=block_num_col_names_excl
if weighted == True:
##Cutoffs for the pct of errors or fast or slow trials that's considered excessive
cutoffs=[overall_err_cut,cond_err_cut,cond_err_cut]
cutoffs.extend(list(np.repeat(block_err_cut,len(blocks))))
cutoffs.extend([overall_fastslowRT_cut,cond_fastslowRT_cut,cond_fastslowRT_cut])
cutoffs.extend(list(np.repeat(block_fastslowRT_cut,len(blocks))))
cutoffs.extend([overall_fastslowRT_cut,cond_fastslowRT_cut,cond_fastslowRT_cut])
cutoffs.extend(list(np.repeat(block_fastslowRT_cut,len(blocks))))
cutoffs.append(num_blocks_cutoff)
elif weighted == False:
##Cutoffs for the pct of errors or fast or slow trials that's considered excessive
cutoffs=[overall_err_cut,cond_err_cut,cond_err_cut,\
overall_fastslowRT_cut,cond_fastslowRT_cut,cond_fastslowRT_cut,\
overall_fastslowRT_cut,cond_fastslowRT_cut,cond_fastslowRT_cut]
#Put together and put into rates - containing just the rates -
#and flags (i.e. whether the rate ) is over a threshold
flags=pd.DataFrame(columns=flag_col_names,index=(df.groupby([subject])[subject].apply(lambda x: x.unique()[0])).tolist())
rates=pd.concat(outcms,axis=1)
rates.columns=col_names
for col,fcol,cutoff in zip(col_names,flag_col_names,cutoffs):
if col!='num_blocks':
flags.loc[:,fcol]=((rates[col]>cutoff)*1)
elif col=='num_blocks':
flags.loc[:,fcol]=((rates[col]<cutoff)*1)
flags['iat_flag']=flags.sum(axis=1)
all_num_trl_per_block=pd.concat([pre_trl_count_vars,post_trl_count_vars],axis=1)
#Get D scores with df with removed fast\slow trials
d=iat_get_dscore(df_fastslow_rts_rmvd,subject,rt,block,condition,cond1,cond2,blocks,weighted,biat,each_stim,stimulus)
all_iat_out = pd.concat([all_num_trl_per_block,rates,flags,d],axis=1)
if each_stim==False:
all_iat_out.loc[all_iat_out.dscore.isnull(),'iat_flag']=all_iat_out.loc[all_iat_out.dscore.isnull(),'iat_flag']+1
#Print output to excel
if print_to_excel==True:
from datetime import datetime
dt=datetime.now()
dt=dt.strftime('%m_%d_%Y_%H_%M_%S')
iat_excel = pd.ExcelWriter('pyiat_output_%s.xlsx'%dt)
all_iat_out.to_excel(iat_excel,sheet_name='pyiat')
if fastslow_stats == True:
if biat == True:
df=df_orig
all_fast_slow_rt=overall_fast_slow_stats(df,rt,fast_rt,slow_rt,subject,flags)
if print_to_excel==True:
all_fast_slow_rt.to_excel(iat_excel,sheet_name='Num_Pct_Fast_Slow_RT_Trials')
iat_excel.save()
return(all_iat_out,all_fast_slow_rt)
elif fastslow_stats == False:
if print_to_excel==True:
iat_excel.save()
return(all_iat_out)
| amillner/pyiat | pyiat/pyiat.py | pyiat.py | py | 32,040 | python | en | code | 1 | github-code | 36 |
33147673762 | #!/usr/bin/env python3
from . base_instruction import BaseInstruction
from error_handler import ErrorHandler
class INS_Defvar(BaseInstruction):
def __init__(self, instruction, programMemory):
self.instruction = instruction
self.programMemory = programMemory
def eval(self):
if len(self.instruction['args']) != 1:
ErrorHandler.ERROR_XML_STRUCTURE()
self.validateVar(f"{self.instruction['args']['1']['type']}@{self.instruction['args']['1']['value']}")
varPath = self.instruction['args']['1']['value'].split('@')
self.checkFrameExistence(self.programMemory, varPath[0])
# NOW LETS DEFINE THE VARIABLE IN CERTAIN FRAME!
self.programMemory[varPath[0]][varPath[1]] = {
"type": "",
"value": ""
} | hondem/FIT | ipp_proj_1/instructions/ins_defvar.py | ins_defvar.py | py | 724 | python | en | code | 0 | github-code | 36 |
44682923693 | from flask import Flask, render_template, request, session, url_for, redirect
from flask_sqlalchemy import SQLAlchemy
import wikipedia as wk
import random
import re
from retry import retry
from nltk.tokenize import sent_tokenize
import nltk
nltk.download('all')
#TODO - BETTER TEXT REPLACE HE/HER - WIKIPEDIA BETTER SEARCH (KNOWLEDGE TREE?) - CSS (PACKAGE?)
#------------
app = Flask(__name__)
app.config["SESSION_PERMANENT"] = False
app.config["SESSION_TYPE"] = "filesystem"
app.secret_key = "123"
@app.route('/', methods=['GET',"POST"])
def home():
def findfamous():
with open("data/famouspeople.txt","r") as f:
lines = f.readlines()
person = random.choice(lines).strip()
return person
@retry(FileNotFoundError, delay=1, tries=5)
def findfacts():
famousperson = findfamous()
famousperson = famousperson.replace(" ","_")
try:
result = wk.summary(famousperson, auto_suggest=False) #sentences = 10
famousperson = famousperson.replace(" ","_")
except Exception as e:
raise FileNotFoundError
return(famousperson,result)
def cleandata(tup):
name = tup[0].replace("_"," ")
text = tup[1]
prohibitedWords = []
prohibitedWords.append(name)
for i in name.split(" "):
prohibitedWords.append(i)
big_regex = re.compile('|'.join(map(re.escape, prohibitedWords)))
result = big_regex.sub("XXXXXXX", text)
result = result.replace(" She "," They ").replace(" He "," They ").replace(" His "," Their ").replace(" Her "," Their ")
#.replace("his","their").replace("her","their")
#here NLTK
print("pre")
randomlines = sent_tokenize(result)
randomlines.pop(0)
randomlines.pop(0)
print("post")
randomFact = random.choice(randomlines)
num = random.randint(1,3)
return (randomFact,name,num)
def gameloop():
result,name,num = (cleandata(findfacts()))
guesses = [0,0,0,0,0,0]
guesses[num] = name
guesses = guesses[1:6]
for j,i in enumerate(guesses):
if i == 0:
guesses[j] = findfamous()
return result,guesses,name,num
correctornot="?"
if session.get("points") is not None:
pass
else:
session["points"] = 0
if request.method == 'POST':
if request.form['submit_button'] == 'New Try':
result,guesses,name,num = gameloop()
session['name'] = name.split(" ")[0]
print("New Try")
print(guesses)
return render_template("home.html",result = result, guesses = guesses,correctornot=correctornot,points = session["points"])
elif request.form['submit_button'] != 'New Try':
submi = request.form['submit_button']
print("player clicked button")
print(submi)
print(session['name'])
if submi == session['name']:
session["points"] = session["points"] + 1
return render_template("home.html",correctornot=correctornot,result = "correct",points = session["points"])
if submi != session['name']:
session["points"] = session["points"] - 1
return render_template("home.html",correctornot=correctornot,result = "wrong",points = session["points"])
elif request.method == 'GET':
print("No Post Back Call")
return render_template('home.html', result = "Click play to get started!", guesses = [],points = session["points"])
if __name__ == '__main__':
app.run() | Freskoko/WikipediaQuizFlask | app.py | app.py | py | 3,771 | python | en | code | 0 | github-code | 36 |
36622911721 | #"""Build and train for the AI Models."""
#imports
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import datetime
import os
from data_load import DataLoader
import numpy as np
import tensorflow as tf
model_name = ""
def reshape_function(data, label):
reshaped_data = tf.reshape(data, [-1, 10, 1])
return reshaped_data, label
def calculate_model_size(model):
print(model.summary())
var_sizes = [
np.product(list(map(int, v.shape))) * v.dtype.size
for v in model.trainable_variables
]
print("Model size:", sum(var_sizes) / 1024, "KB")
def build_cnn(seq_length):
"""Builds a convolutional neural network in Keras."""
global model_name
if args.modelnumber == "0":
model_name = "-CNN_model-0"
model = tf.keras.Sequential()
model.add(tf.keras.layers.Conv2D(
10, (20, 10),
padding="same",
activation="relu",
input_shape=(seq_length, 10, 1)))
model.add(tf.keras.layers.MaxPooling2D((3, 3)))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(9, activation='linear'))
model.summary()
elif args.modelnumber == "1":
model_name = "-CNN_model-1"
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(
10, (20, 10),
padding="same",
activation="relu",
input_shape=(seq_length, 10, 1)),
tf.keras.layers.MaxPool2D((3, 3)),
tf.keras.layers.Dropout(0.1),
tf.keras.layers.Conv2D(16, (10, 1), padding="same",
activation="relu"),
tf.keras.layers.MaxPool2D((3, 1), padding="same"),
tf.keras.layers.Dropout(0.1),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(16, activation="relu"),
tf.keras.layers.Dropout(0.1),
tf.keras.layers.Dense(9, activation="relu")
])
model_path = os.path.join("./netmodels", "CNN")
print("Built CNN.")
if not os.path.exists(model_path):
os.makedirs(model_path)
return model, model_path
def build_lstm(seq_length):
"""Builds an LSTM in Keras."""
#LSTM Sequential model with 2 layers, 100 neurons in first layer after it a flatten and then a dense-layer with 9 neurons
#Best performing model till now 28.11.2023 14:26
#RMSE 1.4 -> but no accurate predictions epochs 30 -> seq 20 -> batch 64
#Loss: 0.939727783203125, RMSE: 0.9693955779075623 -> epochs 30 -> batch 64 -> seq 20
global model_name
#TODO add modelnumber to foldername
if args.modelnumber == "0":
model_name = "-LSTM_model-0"
model = tf.keras.Sequential([
tf.keras.Input(shape=(seq_length, 10)),
tf.keras.layers.LSTM(100),
tf.keras.layers.Dense(units=9, activation="linear"),
])
model.summary()
if args.modelnumber == "1":
model_name = "-LSTM_model-1"
model = tf.keras.Sequential([
tf.keras.Input(shape=(seq_length, 10)),
tf.keras.layers.LSTM(100),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(units=9, activation="linear"),
])
model.summary()
elif args.modelnumber == "2":
model_name = "-LSTM_model-2"
#LSTM Sequential model with 2 layers, 100 neurons in first layer after it a Dropoutlayer with 20% and then a dense-layer with 9 neurons
model = tf.keras.Sequential([
tf.keras.Input(shape=(seq_length, 10)),
tf.keras.layers.LSTM(100),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(units=9, activation="linear"),
])
model.summary()
elif args.modelnumber == "3":
model_name = "-LSTM_model-3"
model = tf.keras.Sequential([
tf.keras.Input(shape=(seq_length, 10)),
tf.keras.layers.LSTM(100),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(units=9, activation="softmax"),
])
model.summary()
elif args.modelnumber == "4":
model_name = "-LSTM_model-4"
#LSTM Sequential model with 3 layers, 100 neurons in first layer, 100 neurons in second layer and then a dense-layer with 9 neurons
model = tf.keras.Sequential([
tf.keras.Input(shape=(seq_length, 10)),
tf.keras.layers.LSTM(100, return_sequences = True),
tf.keras.layers.LSTM(100),
tf.keras.layers.Dense(units=9, activation="linear"),
])
model.summary()
elif args.modelnumber == "5":
model_name = "-LSTM_model-5"
model = tf.keras.Sequential([
tf.keras.layers.Bidirectional(
tf.keras.layers.LSTM(100, return_sequences = True),
input_shape=(seq_length, 10)),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.LSTM(100),
tf.keras.layers.Dense(units=9, activation="linear")
])
model_path = os.path.join("./netmodels", "LSTM")
print("Built LSTM.")
if not os.path.exists(model_path):
os.makedirs(model_path)
return model, model_path
def load_data(train_data_path, valid_data_path, test_data_path, seq_length):
data_loader = DataLoader(
train_data_path, valid_data_path, test_data_path, seq_length=seq_length)
data_loader.format()
return data_loader.train_len, data_loader.train_data, data_loader.valid_len, \
data_loader.valid_data, data_loader.test_len, data_loader.test_data
def build_net(args, seq_length):
if args.model == "CNN":
model, model_path = build_cnn(seq_length)
elif args.model == "LSTM":
model, model_path = build_lstm(seq_length)
else:
print("Please input correct model name.(CNN LSTM)")
return model, model_path
def train_net(
model,
model_path, # pylint: disable=unused-argument
train_len, # pylint: disable=unused-argument
train_data,
valid_len,
valid_data, # pylint: disable=unused-argument
test_len,
test_data,
kind):
"""Trains the model."""
calculate_model_size(model)
#tested batch_sizes = 64, 128, 16, 10, 64
#RMSE 1,7 -> 10 epochs -> batch 64 -> sequenc 20
epochs = 30
#The batch_size argument specifies how many pieces of training data to feed into the network before measuring its accuracy and updating its weights and biases.
batch_size = 64
rmse = tf.keras.metrics.RootMeanSquaredError()
model.compile(
optimizer='adam',
loss='mse',
metrics=[tf.keras.metrics.RootMeanSquaredError(), "accuracy"])
if kind == "CNN":
train_data = train_data.map(reshape_function)
test_data = test_data.map(reshape_function)
valid_data = valid_data.map(reshape_function)
test_labels = np.zeros(test_len)
idx = 0
for data, label in test_data: # pylint: disable=unused-variable
test_labels[idx] = label.numpy()
print(str(label))
idx += 1
#load train_data_entry for test
print("--> trainTest_labels: ")
trainTest_labels = np.zeros(train_len)
idx = 0
for data, label in train_data: # pylint: disable=unused-variable
trainTest_labels[idx] = label.numpy()
print(str(label))
idx += 1
trainTest_data = train_data.batch(batch_size)
train_data = train_data.batch(batch_size).repeat()
valid_data = valid_data.batch(batch_size)
test_data = test_data.batch(batch_size)
#EaelyStop
#EarlyStopping() saves us a lot of time, it stops the model training once it realizes that there will be no more decrease in loss in further epochs and training can now be stopped earlier than described epochs.
early_stop = tf.keras.callbacks.EarlyStopping(monitor = 'val_loss', patience = 2)
model.fit(
train_data,
epochs=epochs,
validation_data=valid_data,
steps_per_epoch=1000,
#validation_steps=int((valid_len - 1) / batch_size + 1),
validation_steps=1,
#callbacks=[tensorboard_callback, early_stop])
callbacks=[tensorboard_callback])
loss, rmse, acc= model.evaluate(test_data)
pred = np.argmax(model.predict(test_data), axis=1)
print("\n\n\n TEST PREDICTION \n\n\n")
print("\n Prediction should be:")
print(test_labels)
print("\n Prediction")
print(pred)
print("\n\n\n TEST PREDICTION END \n\n\n")
#num_classes: The possible number of labels the classification task can
confusion = tf.math.confusion_matrix(
labels=tf.constant(test_labels),
predictions=tf.constant(pred),
num_classes=9)
print(confusion)
print("Loss: {}, RMSE: {}, Accuracy: {}".format(loss, rmse, acc))
# Convert the model to the TensorFlow Lite format without quantization
converter = tf.lite.TFLiteConverter.from_keras_model(model)
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS]
converter._experimental_lower_tensor_list_ops = False
tflite_model = converter.convert()
# Save the model to disk
open("model.tflite", "wb").write(tflite_model)
# Convert the model to the TensorFlow Lite format with quantization
converter = tf.lite.TFLiteConverter.from_keras_model(model)
converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_SIZE]
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS]
converter._experimental_lower_tensor_list_ops = False
tflite_model = converter.convert()
# Save the model to disk
open("model_quantized.tflite", "wb").write(tflite_model)
basic_model_size = os.path.getsize("model.tflite")
print("Basic model is %d bytes" % basic_model_size)
quantized_model_size = os.path.getsize("model_quantized.tflite")
print("Quantized model is %d bytes" % quantized_model_size)
difference = basic_model_size - quantized_model_size
print("Difference is %d bytes" % difference)
if __name__ == "__main__":
#print("Num GPUs Available: ", len(tf.config.list_physical_devices('GPU')))
parser = argparse.ArgumentParser()
parser.add_argument("--model", "-m")
parser.add_argument("--modelnumber", "-mn")
args = parser.parse_args()
#args.model = "LSTM"
#args.modelnumber = "0"
#seq_length data window sizes tested = 2988, 128, 640, 64, 10
#wenn die seq_length sehr klein model ungenauer bzw größerer RMSE ??? why -> weil das fenster zu klein und das model somit keinen gescheiten zusammenhang erkennen kann ??
#seq_length = 128 -> RMSE 1.378 -> early stop 17 epochs
#seq_length = 20 # RMSE LSTM -> 2.3 -> 10 Epochs
#seq_length = 128 # RMSE LSTM -> 1.7 -> 10 Epochs
seq_length = 20
print("Start to load data...")
train_len, train_data, valid_len, valid_data, test_len, test_data = \
load_data("./Data/train/train.json", "./Data/valid/valid.json", "./Data/test/test.json", seq_length)
print("Start to build net...")
model, model_path = build_net(args, seq_length)
logdir = "logs/scalars/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + model_name
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=logdir)
print("Start training...")
train_net(model, model_path, train_len, train_data, valid_len, valid_data,
test_len, test_data, args.model)
print("Training finished!")
#LIST OF TESTED LSTM MODELS
"""
#Loss: 2.5077505111694336, RMSE: 1.583587884902954 -> 5 epochs
model = tf.keras.Sequential([
tf.keras.layers.Bidirectional(
tf.keras.layers.LSTM(20),
input_shape=(seq_length, 10)), # output_shape=(batch, 44)
#tf.keras.layers.Dropout(0.2),
#tf.keras.layers.Flatten(),
tf.keras.layers.Dense(11, activation="sigmoid") # (batch, 4)
])
model.summary()
"""
"""
#good model 2 -> RMSE 1.4 ohne dropout layer 24epochs batch 64 seq 20-> mit dropout layer RMSE
#22.11.2023 - 14:34
model = tf.keras.Sequential([
tf.keras.layers.Bidirectional(
tf.keras.layers.LSTM(100, return_sequences = True),
input_shape=(seq_length, 10)), # output_shape=(batch, 44)
tf.keras.layers.LSTM(100),
tf.keras.layers.Dropout(0.2),
#tf.keras.layers.Dense(11, activation="sigmoid") # (batch, 4)
tf.keras.layers.Dense(11)#, activation="relu") # (batch, 4)
#tf.keras.layers.Dense(11, activation="linear") # (batch, 4)
])
"""
"""
model = tf.keras.Sequential([
tf.keras.layers.InputLayer((seq_length,15)),
#tf.keras.layers.LSTM(100, return_sequences = True),
tf.keras.layers.LSTM(100),
#tf.keras.layers.LSTM(50),
#tf.keras.layers.Dense(8, activation = 'relu'),
#tf.keras.layers.Dense(30, activation = 'relu'),
tf.keras.layers.Dense(11, activation = 'linear')
#tf.keras.layers.Dense(11, activation = 'softmax')
])
"""
"""
model = tf.keras.Sequential([
tf.keras.layers.InputLayer((seq_length,15)),
#tf.keras.layers.LSTM(100, return_sequences = True),
tf.keras.layers.LSTM(15, return_sequences = True),
tf.keras.layers.LSTM(30),
tf.keras.layers.Dense(15),
#tf.keras.layers.LSTM(50),
#tf.keras.layers.Dense(8, activation = 'relu'),
#tf.keras.layers.Dense(30, activation = 'relu'),
##tf.keras.layers.Dropout(0.1),
##tf.keras.layers.Flatten(),
tf.keras.layers.Dense(11, activation = 'softmax')
#tf.keras.layers.Dense(11, activation = 'softmax')
])
"""
"""
n_features = 15
model = tf.keras.Sequential()
model.add(tf.keras.layers.InputLayer((seq_length,n_features)))
model.add(tf.keras.layers.LSTM(15, return_sequences = True))
model.add(tf.keras.layers.LSTM(100, return_sequences = True))
model.add(tf.keras.layers.LSTM(50))
#model.add(tf.keras.layers.Dense(8, activation = 'relu'))
model.add(tf.keras.layers.Dense(11, activation = 'linear'))
model.summary()
"""
"""
#seq 2000 batch 16 -> RMSE 1.41 after 6 epochs
n_features = 15
model = tf.keras.Sequential()
model.add(tf.keras.layers.InputLayer((seq_length,n_features)))
model.add(tf.keras.layers.LSTM(100))
#model.add(tf.keras.layers.LSTM(100, return_sequences = True))
#model.add(tf.keras.layers.LSTM(50))
#model.add(tf.keras.layers.Dense(8, activation = 'relu'))
model.add(tf.keras.layers.Dense(11, activation = 'linear'))
model.summary()
"""
"""
n_features = 15
model = tf.keras.Sequential()
model.add(tf.keras.layers.Bidirectional(
tf.keras.layers.LSTM(100),
input_shape=(seq_length, 15)))
##model.add(tf.keras.layers.InputLayer((seq_length,n_features)))
##model.add(tf.keras.layers.LSTM(100))
###model.add(tf.keras.layers.LSTM(100))
###model.add(tf.keras.layers.LSTM(100))
#model.add(tf.keras.layers.LSTM(100, return_sequences = True))
#model.add(tf.keras.layers.LSTM(50))
#model.add(tf.keras.layers.Dense(8, activation = 'relu'))
model.add(tf.keras.layers.Dropout(0.1))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(11, activation="linear"))
model.summary()
"""
"""
#WORKING 0.9 RMSE
model = tf.keras.Sequential([
tf.keras.layers.InputLayer((seq_length,15)),
tf.keras.layers.LSTM(100, return_sequences = True),
tf.keras.layers.LSTM(100, return_sequences = True),
tf.keras.layers.LSTM(50),
#tf.keras.layers.Dense(8, activation = 'relu'),
tf.keras.layers.Dense(30, activation = 'relu'),
tf.keras.layers.Dense(11, activation = 'linear')
#tf.keras.layers.Dense(11, activation = 'softmax')
])
"""
"""
model = tf.keras.Sequential([
tf.keras.layers.Bidirectional(
tf.keras.layers.LSTM(100),
input_shape=(seq_length, 15)),
#tf.keras.layers.LSTM(100, return_sequences = True),
#tf.keras.layers.LSTM(100, return_sequences = True),
#tf.keras.layers.LSTM(50),
tf.keras.layers.Dense(8, activation = 'relu'),
tf.keras.layers.Dense(1, activation = 'linear')
])
"""
"""
model = tf.keras.Sequential
model.add(tf.keras.layers.InputLayer((seq_length,15)))
model.add(tf.keras.layers.LSTM(100, return_sequences = True))
model.add(tf.keras.layers.LSTM(100, return_sequences = True))
model.add(tf.keras.layers.LSTM(50))
model.add(tf.keras.layers.Dense(8, activation = 'relu'))
model.add(tf.keras.layers.Dense(1, activation = 'linear'))
"""
#LIST OF TESTED CNN MODELS
"""
model_0 = tf.keras.Sequential(
[
#tf.keras.layers.Input(shape=input_shape),
tf.keras.layers.Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.MaxPooling2D((2, 2)),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.MaxPooling2D((2, 2)),
tf.keras.layers.Dropout(0.3),
tf.keras.layers.Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.MaxPooling2D((2, 2)),
tf.keras.layers.Dropout(0.4),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation='relu', kernel_initializer='he_uniform'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Dropout(0.5),
#tf.keras.layers.Dense(num_classes_0, activation='softmax')
]
)
"""
"""
#good model
n_features = 10
model = tf.keras.Sequential()
model.add(tf.keras.layers.InputLayer((seq_length,n_features)))
#model.add(tf.keras.layers.BatchNormalization())
model.add(tf.keras.layers.LSTM(70, return_sequences = True))
#model.add(tf.keras.layers.BatchNormalization())
#model.add(tf.keras.layers.LSTM(100, return_sequences = True))
model.add(tf.keras.layers.Dropout(0.2))
model.add(tf.keras.layers.LSTM(50))
#model.add(tf.keras.layers.Dense(8, activation = 'relu'))
##model.add(tf.keras.layers.Dense(11, activation = 'linear'))
model.add(tf.keras.layers.Dropout(0.2))
model.add(tf.keras.layers.Dense(11, activation = 'linear'))
model.summary()
""" | leahimJarun/SensoGripProjectAiModel | train.py | train.py | py | 18,429 | python | en | code | 0 | github-code | 36 |
17754409752 | import tornado.ioloop
import tornado.web
import tornado.httpserver
import io
import os
from sqlalchemy import Column, ForeignKey, Integer, String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy import create_engine
from sqlalchemy import inspect
from sqlalchemy import text
from sqlalchemy.orm import sessionmaker
import mercantile
import pyproj
import yaml
import sys
import itertools
import re
def GetTM2Source(file):
with open(file,'r') as stream:
tm2source = yaml.load(stream)
return tm2source
def GeneratePrepared():
# We have land polygons, but want water (ocean/sea) polygons.
# Creating a diff against the northern hemisphere segfaults Postgres, perhaps because of awkward mathematics around the north pole?
# Instead, diff against a tile crop.
# 1. ST_Intersection(geometry, !bbox_nobuffer!) — the multiple bits of land in this tile (null if we're in the ocean)
# 2. ST_Union(...) — all joined together into a multipolygon (null in the ocean)
# 3. ST_Difference(...) — the negative (*null* in the ocean)
# 4. COALESCE(..., !bbox_nobuffer!) — if null from the ocean, return the original bounding box
# This test is hardcoded to north_osm_land_polygons_gen7 for speed.
tile_geom_query = "SELECT ST_AsMVTGeom(geometry,!bbox_nobuffer!,4096,0,true) AS mvtgeometry FROM (" + \
" SELECT COALESCE(ST_Difference(!bbox_nobuffer!, ST_Union(ST_Intersection(geometry, !bbox_nobuffer!))), !bbox_nobuffer!) AS geometry FROM north_osm_land_polygons_gen7 WHERE geometry && !bbox_nobuffer! " + \
") AS x WHERE geometry IS NOT NULL AND NOT ST_IsEmpty(geometry) AND ST_AsMVTGeom(geometry,!bbox_nobuffer!,4096,0,true) IS NOT NULL"
base_query = "SELECT ST_ASMVT('water', 4096, 'mvtgeometry', tile) FROM ("+tile_geom_query+") AS tile WHERE tile.mvtgeometry IS NOT NULL"
# Ocean:
# 5.0 7.0 26.0 EXECUTE gettile( ST_SetSRID(ST_MakeBox2D(ST_Point(-5068105.193371859, -6194350.79189894), ST_Point(-4504982.39410832, -5631227.992635399)), 3575) , 3928032.9189700056, 512, 512);
# → Null.
# Coast:
# 5.0 9.0 28.0 EXECUTE gettile( ST_SetSRID(ST_MakeBox2D(ST_Point(-3941859.59484478, -7320596.390426019), ST_Point(-3378736.7955812397, -6757473.5911624795)), 3575) , 3928032.9189700056, 512, 512);
# → Data
# Land:
# 5.0 12.0 29.0 EXECUTE gettile( ST_SetSRID(ST_MakeBox2D(ST_Point(-2252491.19705416, -7883719.18968956), ST_Point(-1689368.3977906199, -7320596.390426019)), 3575) , 3928032.9189700056, 512, 512);
# → SRID=3575;GEOMETRYCOLLECTION EMPTY
query = base_query.replace("!bbox_nobuffer!","$1").replace("!scale_denominator!","$2").replace("!pixel_width!","$3").replace("!pixel_height!","$4")
print (base_query)
prepared = "PREPARE gettile(geometry, numeric, numeric, numeric) AS " + query + ";"
print(prepared)
return(prepared)
print("Starting up")
prepared = GeneratePrepared()
connection_string = 'postgresql://'+os.getenv('POSTGRES_USER','openmaptiles')+':'+os.getenv('POSTGRES_PASSWORD','openmaptiles')+'@'+os.getenv('POSTGRES_HOST','postgres')+':'+os.getenv('POSTGRES_PORT','5432')+'/'+os.getenv('POSTGRES_DB','openmaptiles')
engine = create_engine(connection_string)
inspector = inspect(engine)
DBSession = sessionmaker(bind=engine)
session = DBSession()
print("Running prepare statement")
session.execute(prepared)
def bounds(zoom,x,y,buff):
print('Tile',zoom,x,y,'with buffer',buff)
map_width_in_metres = 2 * 2**0.5*6371007.2
tiles_down = 2**(zoom)
tiles_across = 2**(zoom)
x = x - 2**(zoom-1)
y = -(y - 2**(zoom-1)) - 1
tile_width_in_metres = (map_width_in_metres / tiles_across)
tile_height_in_metres = (map_width_in_metres / tiles_down)
ws = ((x - buff)*tile_width_in_metres, (y - buff)*tile_width_in_metres)
en = ((x+1+buff)*tile_height_in_metres, (y+1+buff)*tile_height_in_metres)
print("Zoom, buffer", zoom, buff)
print("West: ", ws[0])
print("South: ", ws[1])
print("East: ", en[0])
print("North: ", en[1])
return {'w':ws[0],'s':ws[1],'e':en[0],'n':en[1]}
def zoom_to_scale_denom(zoom): # For !scale_denominator!
# From https://github.com/openstreetmap/mapnik-stylesheets/blob/master/zoom-to-scale.txt
map_width_in_metres = 2 * 2**0.5*6371007.2 # Arctic
tile_width_in_pixels = 512.0 # This asks for a zoom level higher, since the tiles are doubled.
standardized_pixel_size = 0.00028
map_width_in_pixels = tile_width_in_pixels*(2.0**zoom)
return str(map_width_in_metres/(map_width_in_pixels * standardized_pixel_size))
def replace_tokens(query,tilebounds,scale_denom,z):
s,w,n,e = str(tilebounds['s']),str(tilebounds['w']),str(tilebounds['n']),str(tilebounds['e'])
start = query.replace("!bbox!","ST_SetSRID(ST_MakeBox2D(ST_Point("+w+", "+s+"), ST_Point("+e+", "+n+")), 3575)").replace("!scale_denominator!",scale_denom).replace("!pixel_width!","512").replace("!pixel_height!","512")
return start
def get_mvt(zoom,x,y):
try: # Sanitize the inputs
sani_zoom,sani_x,sani_y = float(zoom),float(x),float(y)
del zoom,x,y
except:
print('suspicious')
return 1
scale_denom = zoom_to_scale_denom(sani_zoom)
tilebounds = bounds(sani_zoom,sani_x,sani_y,0)
final_query = "EXECUTE gettile(!bbox!, !scale_denominator!, !pixel_width!, !pixel_height!);"
sent_query = replace_tokens(final_query,tilebounds,scale_denom,sani_zoom)
print(sani_zoom, sani_x, sani_y, sent_query)
response = list(session.execute(sent_query))
layers = filter(None,list(itertools.chain.from_iterable(response)))
final_tile = b''
for layer in layers:
final_tile = final_tile + io.BytesIO(layer).getvalue()
return final_tile
class GetTile(tornado.web.RequestHandler):
def get(self, zoom,x,y):
self.set_header("Content-Type", "application/x-protobuf")
self.set_header("Content-Disposition", "attachment")
self.set_header("Access-Control-Allow-Origin", "*")
response = get_mvt(zoom,x,y)
self.write(response)
def m():
if __name__ == "__main__":
# Make this prepared statement from the tm2source
application = tornado.web.Application([
(r"/tiles/([0-9]+)[/_]([0-9]+)[/_]([0-9]+).pbf", GetTile),
(r"/([^/]*)", tornado.web.StaticFileHandler, {"path": "./static", "default_filename": "index_3575.html"})
])
server = tornado.httpserver.HTTPServer(application)
server.bind(8080)
server.start(1)
print("Postserve started..")
#application.listen(8080)
tornado.ioloop.IOLoop.instance().start()
m()
| gbif/gbif-basemaps | polar-water-tiles/polar-water-preview/server_3575.py | server_3575.py | py | 6,778 | python | en | code | 1 | github-code | 36 |
11577553681 | # 10798
words = []
for _ in range(5):
words.append(list(input()))
word = ''
for i in range(15):
for j in range(5):
try:
word += words[j][i]
except IndexError:
continue
print(word) | starcat37/Algorithm | BOJ/Bronze/10798.py | 10798.py | py | 212 | python | en | code | 0 | github-code | 36 |
73683828585 | from typing import Optional, Tuple
import numpy as np
import torch
from pytorch_lightning import LightningDataModule
from torch.utils.data import DataLoader, Dataset
from src.datamodules.components.diarization_dataset import (
DiarizationDataset,
DiarizationDatasetforInfer,
)
def collate_fn(batch):
ys, ts, ilens = list(zip(*batch))
ilens = np.array(ilens)
ys = np.array(
[
np.pad(y, [(0, np.max(ilens) - len(y)), (0, 0)], "constant", constant_values=(-1,))
for y in ys
]
)
ts = np.array(
[
np.pad(t, [(0, np.max(ilens) - len(t)), (0, 0)], "constant", constant_values=(+1,))
for t in ts
]
)
ys = torch.from_numpy(np.array(ys)).to(torch.float32)
ts = torch.from_numpy(np.array(ts)).to(torch.float32)
ilens = torch.from_numpy(np.array(ilens)).to(torch.int32)
return ys, ts, ilens
class DiarizationDataModule(LightningDataModule):
def __init__(
self,
data_dirs: Tuple[str, str, str],
chunk_size: int = 2000,
context_size: int = 7,
frame_size: int = 1024,
frame_shift: int = 256,
subsampling: int = 10,
sample_rate: int = 8000,
input_transform: str = "logmel23_mn",
n_speakers: int = None,
batch_sizes: Tuple[int, int, int] = (64, 64, 1),
num_workers: int = 0,
):
super().__init__()
# this line allows to access init params with 'self.hparams' attribute
self.save_hyperparameters(logger=False)
self.data_train: Optional[Dataset] = None
self.data_val: Optional[Dataset] = None
self.data_test: Optional[Dataset] = None
def prepare_data(self):
pass
def setup(self, stage: Optional[str] = None):
"""Load data. Set variables: `self.data_train`, `self.data_val`, `self.data_test`.
This method is called by lightning when doing `trainer.fit()` and `trainer.test()`,
so be careful not to execute the random split twice! The `stage` can be used to
differentiate whether it's called before trainer.fit()` or `trainer.test()`.
"""
if not self.data_train and not self.data_val and not self.data_test:
train_dir, val_dir, test_dir = self.hparams.data_dirs
self.data_train = DiarizationDataset(
data_dir=train_dir,
chunk_size=self.hparams.chunk_size,
context_size=self.hparams.context_size,
frame_size=self.hparams.frame_size,
frame_shift=self.hparams.frame_shift,
subsampling=self.hparams.subsampling,
sample_rate=self.hparams.sample_rate,
input_transform=self.hparams.input_transform,
n_speakers=self.hparams.n_speakers,
)
self.data_val = DiarizationDataset(
data_dir=val_dir,
chunk_size=self.hparams.chunk_size,
context_size=self.hparams.context_size,
frame_size=self.hparams.frame_size,
frame_shift=self.hparams.frame_shift,
subsampling=self.hparams.subsampling,
sample_rate=self.hparams.sample_rate,
input_transform=self.hparams.input_transform,
n_speakers=self.hparams.n_speakers,
)
self.data_test = DiarizationDatasetforInfer(
data_dir=test_dir,
chunk_size=self.hparams.chunk_size,
context_size=self.hparams.context_size,
frame_size=self.hparams.frame_size,
frame_shift=self.hparams.frame_shift,
subsampling=self.hparams.subsampling,
sample_rate=self.hparams.sample_rate,
input_transform=self.hparams.input_transform,
n_speakers=self.hparams.n_speakers,
)
def train_dataloader(self):
return DataLoader(
dataset=self.data_train,
batch_size=self.hparams.batch_sizes[0],
num_workers=self.hparams.num_workers,
shuffle=True,
collate_fn=collate_fn,
)
def val_dataloader(self):
return DataLoader(
dataset=self.data_val,
batch_size=self.hparams.batch_sizes[1],
num_workers=self.hparams.num_workers,
shuffle=False,
collate_fn=collate_fn,
)
def test_dataloader(self):
return DataLoader(
dataset=self.data_test,
batch_size=self.hparams.batch_sizes[2],
num_workers=self.hparams.num_workers,
shuffle=False,
)
| DaseiNaN/Speech-Diarization | src/datamodules/diarization_datamodule.py | diarization_datamodule.py | py | 4,687 | python | en | code | 1 | github-code | 36 |
23495813882 | import datetime
import tkinter.messagebox as tm
from tkinter import *
import tkinter.ttk as ttk
import sqlite3
from PIL import ImageTk,Image
path="logo1.png"
sum=0
def myfunction(event):
canvas.configure(scrollregion=canvas.bbox("all"), width=1328, height=455)
def Numberonly1(event):
global sum
item1 = (m1.get())
sum += item1
def Numberonly2(event):
global sum
item2 = (m2.get())
sum += item2
def Numberonly3(event):
global sum
item3 = (m3.get())
sum += item3
def Numberonly4(event):
global sum
item4 = (m4.get())
sum += item4
def Numberonly5(event):
global sum
item5 = (m5.get())
sum += item5
def Numberonly6(event):
global sum
item6 = (m6.get())
sum += item6
def Numberonly16():
global sum
s.set(sum)
avg = (sum / 6)
answer.set(round(avg, 2))
def logged():
s = str(datetime.datetime.now())
tm.showinfo("Log", "Entry created successfully at " + s)
def Database():
global conn, cursor
conn = sqlite3.connect("Student.db")
cursor = conn.cursor()
cursor.execute(
"CREATE TABLE IF NOT EXISTS STUDENT (SNO INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,FirstName TEXT, MiddleName TEXT, LastName TEXT, DateOfBirth INTEGER, MonthOfBirth TEXT, YearOfBirth INTEGER, Gender TEXT, EmailID TEXT, Contact1 TEXT, Contact2 TEXT, Hobbies TEXT, PermanentAddress TEXT, Pincode TEXT, Locality TEXT, City TEXT, PO TEXT, PS TEXT, Lifestyle TEXT, State TEXT, Country TEXT, ParentsName TEXT, ParentsAddress TEXT, ParentsOccupation TEXT, ParentsContact TEXT, ParentsEmail TEXT, GuardianName TEXT, GuardianAddress TEXT, GuardianOccupation TEXT, GuardianContact TEXT, GuardianEmail TEXT, Class12Stream TEXT, English INTEGER, Vernacular INTEGER, Mathematics INTEGER, Physics INTEGER, Chemistry INTEGER, ComputerScience INTEGER, Class12Percentage INTEGER, Class12Aggregate INTEGER)")
conn.commit()
def Errorcheck1(event):
str1 = firstname.get()
for i in range(len(str1)):
p1 = str1[i]
p2 = ord(p1)
if ((p2 < 65) or ((p2 > 90) and (p2 < 97)) or (p2 > 122)):
tm.showerror("Error", "Invalid First Name")
tm.showinfo("my message", "Re-enter your first name")
firstname.set("")
break
def Errorcheck2(event):
str1 = middlename.get()
for i in range(len(str1)):
p1 = str1[i]
p2 = ord(p1)
if ((p2 < 65) or ((p2 > 90) and (p2 < 97)) or (p2 > 122)):
tm.showerror("Error", "Invalid Middle Name")
tm.showinfo("my message", "Re-enter your Middle name")
middlename.set("")
break
def Errorcheck3(event):
str1 = lastname.get()
for i in range(len(str1)):
p1 = str1[i]
p2 = ord(p1)
if ((p2 < 65) or ((p2 > 90) and (p2 < 97)) or (p2 > 122)):
tm.showerror("Error", "Invalid Last Name")
tm.showinfo("my message", "Re-enter your Middle name")
lastname.set("")
break
def Errorcheck9(event):
str1 = parent.get()
for i in range(len(str1)):
p1 = str1[i]
p2 = ord(p1)
if ((p2 < 65) or ((p2 > 90) and (p2 < 97)) or (p2 > 122) or (p2!=32)):
tm.showerror("Error", "Invalid Parents Name")
tm.showinfo("my message", "Re-enter your Parents name")
parent.set("")
break
def Errorcheck10(event):
str1 = guardian.get()
for i in range(len(str1)):
p1 = str1[i]
p2 = ord(p1)
if ((p2 < 65) or ((p2 > 90) and (p2 < 97)) or (p2 > 122) or (p2!=32)):
tm.showerror("Error", "Invalid Guardian Name")
tm.showinfo("my message", "Re-enter your Guardian name")
guardian.set("")
break
def Errorcheck4(event):
try:
str1 = int(cl6a.get())
str2 = cl6b.get()
str3 = int(cl6c.get())
if(type(str1) is not int or type(str3) is not int):
raise ValueError("Error in type occured")
if ((str3 % 400 == 0) or (str3 % 4 == 0 and str3 % 100 != 0)):
pc = 1
else:
pc = 0
if (((str1 > 28) and (str2 == "February") and (pc == 0))):
tm.showerror("Error", "Invalid Date Entered")
tm.showinfo("my message", "Re-enter your Date Of Birth")
cl6a.set("")
cl6b.set("")
cl6c.set("")
except ValueError as ve:
print(ve)
def Errorcheck5(event):
str1 = phone1.get()
if(len(str1)>10):
tm.showerror("Error", "Invalid Contact Number Entered")
tm.showinfo("my message", "Re-enter your Contact Number")
phone1.set("")
def Errorcheck7(event):
str1 = phone3.get()
if (len(str1) > 10):
tm.showerror("Error", "Invalid Contact Number Entered")
tm.showinfo("my message", "Re-enter your Contact Number")
phone3.set("")
def Errorcheck6(event):
str1 = phone2.get()
if (len(str1) > 10):
tm.showerror("Error", "Invalid Contact Number Entered")
tm.showinfo("my message", "Re-enter your Contact Number")
phone2.set("")
def Errorcheck8(event):
str1 = phone4.get()
if (len(str1) > 10):
tm.showerror("Error", "Invalid Contact Number Entered")
tm.showinfo("my message", "Re-enter your Contact Number")
phone4.set("")
def DatabaseAdd():
Database()
global conn, cursor
cursor.execute(
"INSERT INTO STUDENT(FirstName, MiddleName, LastName, DateOfBirth, MonthOfBirth, YearOfBirth, Gender, EmailID, Contact1, Contact2, Hobbies, PermanentAddress, Pincode, Locality, City, PO, PS, Lifestyle, State, Country, ParentsName, ParentsAddress, ParentsOccupation, ParentsContact, ParentsEmail, GuardianName, GuardianAddress, GuardianOccupation, GuardianContact, GuardianEmail, Class12Stream, English, Vernacular, Mathematics, Physics, Chemistry, ComputerScience, Class12Percentage, Class12Aggregate) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
(str(firstname.get()), str(middlename.get()), str(lastname.get()), str(cl6a.get()), str(cl6b.get()),
str(cl6c.get()), str(i1.get()), str(email1.get()), str(phone1.get()), str(phone2.get()), str(hobby.get()),
str(address1.get()), str(pincode.get()), str(locality.get()), str(city.get()), str(po.get()),
str(ps.get()), str(i2.get()), str(state.get()), str(cl7a.get()), str(parent.get()),
str(parentaddress.get()), str(parentoccupation.get()), str(phone3.get()), str(email2.get()),
str(guardian.get()), str(guardaddress.get()), str(guardoccupation.get()), str(phone4.get()),
str(email3.get()), str(c31a.get()), str(m1.get()), str(m2.get()), str(m3.get()), str(m4.get()),
str(m5.get()), str(m6.get()), str(answer.get()), str(s.get())))
conn.commit()
firstname.set(""), middlename.set(""), lastname.set(""), cl6a.set(""), cl6b.set(""), cl6c.set(""), i1.set(
""), email1.set(""), phone1.set(""), phone2.set(""), hobby.set(""), address1.set(""), pincode.set(
""), locality.set(""), city.set(""), po.set(""), ps.set(""), i2.set(""), state.set(""), cl7a.set(
""), parent.set(""), parentaddress.set(""), parentoccupation.set(""), phone3.set(""), email2.set(
""), guardian.set(""), guardaddress.set(""), guardoccupation.set(""), phone4.set(""), email3.set(
""), c31a.set(""), m1.set("0"), m2.set("0"), m3.set("0"), m4.set("0"), m5.set("0"), m6.set("0"), answer.set(
"0"), s.set("0")
cursor.close()
conn.close()
logged()
def DatabaseView():
Database()
frame1 = Toplevel()
global conn, cursor
frame1.title("View Contents")
w = 450
h = 75
ws = root.winfo_screenwidth()
hs = root.winfo_screenheight()
x = (ws / 2) - (w / 2)
y = (hs / 2) - (h / 2)
frame1.geometry('%dx%d+%d+%d' % (w, h, x, y))
def Viewall():
Database()
ViewFrame = Toplevel()
cursor.execute("SELECT * FROM STUDENT")
conn.commit()
fetch = cursor.fetchall()
scrollbarx = Scrollbar(ViewFrame, orient=HORIZONTAL)
scrollbary = Scrollbar(ViewFrame, orient=VERTICAL)
tree = ttk.Treeview(ViewFrame, columns=(
"SNo", "FirstName", "MiddleName", "LastName", "DateOfBirth", "MonthOfBirth", "YearOfBirth", "Gender",
"EmailID", "Contact1", "Contact2", "Hobbies", "PermanentAddress", "Pincode", "Locality", "City", "PO", "PS",
"Lifestyle", "State", "Country", "ParentsName", "ParentsAddress", "ParentsOccupation", "ParentsContact",
"ParentsEmail", "GuardianName", "GuardianAddress", "GuardianOccupation", "GuardianContact", "GuardianEmail",
"Class12Stream", "English", "Vernacular", "Mathematics", "Physics", "Chemistry", "ComputerScience",
"Class12Percentage", "Class12Aggregate"),
selectmode=EXTENDED, yscrollcommand=scrollbary.set, xscrollcommand=scrollbarx.set)
scrollbary.config(command=tree.yview)
scrollbary.pack(side=RIGHT, fill=Y)
scrollbarx.config(command=tree.xview)
scrollbarx.pack(side=BOTTOM, fill=X)
tree.heading('SNo', text="SNo", anchor=CENTER), tree.heading('FirstName', text="FirstName",
anchor=CENTER), tree.heading('MiddleName',
text="MiddleName",
anchor=CENTER), tree.heading(
'LastName', text="LastName", anchor=CENTER), tree.heading('DateOfBirth', text="DateOfBirth",
anchor=CENTER), tree.heading('MonthOfBirth',
text="MonthOfBirth",
anchor=CENTER), tree.heading(
'YearOfBirth', text="YearOfBirth", anchor=CENTER), tree.heading('Gender', text="Gender",
anchor=CENTER), tree.heading('EmailID',
text="EmailID",
anchor=CENTER), tree.heading(
'Contact1', text="Contact1", anchor=CENTER), tree.heading('Contact2', text="Contact2",
anchor=CENTER), tree.heading('Hobbies',
text="Hobbies",
anchor=CENTER), tree.heading(
'PermanentAddress', text="PermanentAddress", anchor=CENTER), tree.heading('Pincode', text="Pincode",
anchor=CENTER), tree.heading(
'Locality', text="Locality", anchor=CENTER), tree.heading('City', text="City",
anchor=CENTER), tree.heading('PO', text="PO",
anchor=CENTER), tree.heading(
'PS', text="PS", anchor=CENTER), tree.heading('Lifestyle', text="Lifestyle",
anchor=CENTER), tree.heading('State', text="State",
anchor=CENTER), tree.heading(
'Country', text="Country", anchor=CENTER), tree.heading('ParentsName', text="ParentsName",
anchor=CENTER), tree.heading('ParentsAddress',
text="ParentsAddress",
anchor=CENTER), tree.heading(
'ParentsOccupation', text="ParentsOccupation", anchor=CENTER), tree.heading('ParentsContact',
text="ParentsContact",
anchor=CENTER), tree.heading(
'ParentsEmail', text="ParentsEmail", anchor=CENTER), tree.heading('GuardianName', text="GuardianName",
anchor=CENTER), tree.heading(
'GuardianAddress', text="GuardianAddress", anchor=CENTER), tree.heading('GuardianOccupation',
text="GuardianOccupation",
anchor=CENTER), tree.heading(
'GuardianContact', text="GuardianContact", anchor=CENTER), tree.heading('GuardianEmail',
text="GuardianEmail",
anchor=CENTER), tree.heading(
'Class12Stream', text="Class12Stream", anchor=CENTER), tree.heading('English', text="English",
anchor=CENTER), tree.heading(
'Vernacular', text="Vernacular", anchor=CENTER), tree.heading('Mathematics', text="Mathematics",
anchor=CENTER), tree.heading('Physics',
text="Physics",
anchor=CENTER), tree.heading(
'Chemistry', text="Chemistry", anchor=CENTER), tree.heading('ComputerScience', text="ComputerScience",
anchor=CENTER), tree.heading(
'Class12Percentage', text="Class12Percentage", anchor=CENTER), tree.heading('Class12Aggregate',
text="Class12Aggregate",
anchor=CENTER)
tree.column('#0', stretch=NO, minwidth=0, width=0), tree.column('#1', stretch=NO, minwidth=0,
width=140), tree.column('#2', stretch=NO,
minwidth=0,
width=140), tree.column(
'#3', stretch=NO, minwidth=0, width=140), tree.column('#4', stretch=NO, minwidth=0,
width=140), tree.column('#5', stretch=NO,
minwidth=0,
width=140), tree.column(
'#6', stretch=NO, minwidth=0, width=140), tree.column('#7', stretch=NO, minwidth=0,
width=150), tree.column('#8', stretch=NO,
minwidth=0,
width=150), tree.column(
'#9', stretch=NO, minwidth=0, width=150), tree.column('#10', stretch=NO, minwidth=0,
width=140), tree.column('#11', stretch=NO,
minwidth=0,
width=140), tree.column(
'#12', stretch=NO, minwidth=0, width=140), tree.column('#13', stretch=NO, minwidth=0,
width=140), tree.column('#14', stretch=NO,
minwidth=0,
width=140), tree.column(
'#15', stretch=NO, minwidth=0, width=140), tree.column('#16', stretch=NO, minwidth=0,
width=140), tree.column('#17', stretch=NO,
minwidth=0,
width=140), tree.column(
'#18', stretch=NO, minwidth=0, width=140), tree.column('#19', stretch=NO, minwidth=0,
width=140), tree.column('#20', stretch=NO,
minwidth=0,
width=140), tree.column(
'#21', stretch=NO, minwidth=0, width=140), tree.column('#22', stretch=NO, minwidth=0,
width=140), tree.column('#23', stretch=NO,
minwidth=0,
width=140), tree.column(
'#24', stretch=NO, minwidth=0, width=140), tree.column('#25', stretch=NO, minwidth=0,
width=140), tree.column('#26', stretch=NO,
minwidth=0,
width=140), tree.column(
'#27', stretch=NO, minwidth=0, width=140), tree.column('#28', stretch=NO, minwidth=0,
width=140), tree.column('#29', stretch=NO,
minwidth=0,
width=140), tree.column(
'#30', stretch=NO, minwidth=0, width=140), tree.column('#31', stretch=NO, minwidth=0,
width=140), tree.column('#32', stretch=NO,
minwidth=0,
width=140), tree.column(
'#33', stretch=NO, minwidth=0, width=140), tree.column('#34', stretch=NO, minwidth=0,
width=140), tree.column('#35', stretch=NO,
minwidth=0,
width=140), tree.column(
'#36', stretch=NO, minwidth=0, width=140), tree.column('#37', stretch=NO, minwidth=0,
width=140), tree.column('#38', stretch=NO,
minwidth=0,
width=140), tree.column(
'#39', stretch=NO, minwidth=0, width=140)
tree.pack()
for data in fetch:
tree.insert('', 'end', values=data)
cursor.close()
conn.close()
def Search():
Database()
ViewFrame = Toplevel()
scrollbarx = Scrollbar(ViewFrame, orient=HORIZONTAL)
scrollbary = Scrollbar(ViewFrame, orient=VERTICAL)
tree = ttk.Treeview(ViewFrame, columns=(
"SNo", "FirstName", "MiddleName", "LastName", "DateOfBirth", "MonthOfBirth", "YearOfBirth", "Gender",
"EmailID", "Contact1", "Contact2", "Hobbies", "PermanentAddress", "Pincode", "Locality", "City", "P.O",
"P.S", "Lifestyle", "State", "Country", "ParentsName", "ParentsAddress", "ParentsOccupation",
"ParentsContact", "ParentsEmail", "GuardianName", "GuardianAddress", "GuardianOccupation",
"GuardianContact", "GuardianEmail", "Class12Stream", "English", "Vernacular", "Mathematics", "Physics",
"Chemistry", "ComputerScience", "Class12Percentage", "Class12Aggregate"),
selectmode=EXTENDED, yscrollcommand=scrollbary.set, xscrollcommand=scrollbarx.set)
scrollbary.config(command=tree.yview)
scrollbary.pack(side=RIGHT, fill=Y)
scrollbarx.config(command=tree.xview)
scrollbarx.pack(side=BOTTOM, fill=X)
tree.heading('SNo', text="SNo", anchor=CENTER), tree.heading('FirstName', text="FirstName",
anchor=CENTER), tree.heading('MiddleName',
text="MiddleName",
anchor=CENTER), tree.heading(
'LastName', text="LastName", anchor=CENTER), tree.heading('DateOfBirth', text="DateOfBirth",
anchor=CENTER), tree.heading('MonthOfBirth',
text="MonthOfBirth",
anchor=CENTER), tree.heading(
'YearOfBirth', text="YearOfBirth", anchor=CENTER), tree.heading('Gender', text="Gender",
anchor=CENTER), tree.heading('EmailID',
text="EmailID",
anchor=CENTER), tree.heading(
'Contact1', text="Contact1", anchor=CENTER), tree.heading('Contact2', text="Contact2",
anchor=CENTER), tree.heading('Hobbies',
text="Hobbies",
anchor=CENTER), tree.heading(
'PermanentAddress', text="PermanentAddress", anchor=CENTER), tree.heading('Pincode', text="Pincode",
anchor=CENTER), tree.heading(
'Locality', text="Locality", anchor=CENTER), tree.heading('City', text="City",
anchor=CENTER), tree.heading('P.O',
text="P.O",
anchor=CENTER), tree.heading(
'P.S', text="P.S", anchor=CENTER), tree.heading('Lifestyle', text="Lifestyle",
anchor=CENTER), tree.heading('State', text="State",
anchor=CENTER), tree.heading(
'Country', text="Country", anchor=CENTER), tree.heading('ParentsName', text="ParentsName",
anchor=CENTER), tree.heading('ParentsAddress',
text="ParentsAddress",
anchor=CENTER), tree.heading(
'ParentsOccupation', text="ParentsOccupation", anchor=CENTER), tree.heading('ParentsContact',
text="ParentsContact",
anchor=CENTER), tree.heading(
'ParentsEmail', text="ParentsEmail", anchor=CENTER), tree.heading('GuardianName', text="GuardianName",
anchor=CENTER), tree.heading(
'GuardianAddress', text="GuardianAddress", anchor=CENTER), tree.heading('GuardianOccupation',
text="GuardianOccupation",
anchor=CENTER), tree.heading(
'GuardianContact', text="GuardianContact", anchor=CENTER), tree.heading('GuardianEmail',
text="GuardianEmail",
anchor=CENTER), tree.heading(
'Class12Stream', text="Class12Stream", anchor=CENTER), tree.heading('English', text="English",
anchor=CENTER), tree.heading(
'Vernacular', text="Vernacular", anchor=CENTER), tree.heading('Mathematics', text="Mathematics",
anchor=CENTER), tree.heading('Physics',
text="Physics",
anchor=CENTER), tree.heading(
'Chemistry', text="Chemistry", anchor=CENTER), tree.heading('ComputerScience', text="ComputerScience",
anchor=CENTER), tree.heading(
'Class12Percentage', text="Class12Percentage", anchor=CENTER), tree.heading('Class12Aggregate',
text="Class12Aggregate",
anchor=CENTER)
tree.column('#0', stretch=NO, minwidth=0, width=0), tree.column('#1', stretch=NO, minwidth=0,
width=140), tree.column('#2', stretch=NO,
minwidth=0,
width=140), tree.column(
'#3', stretch=NO, minwidth=0, width=140), tree.column('#4', stretch=NO, minwidth=0,
width=140), tree.column('#5', stretch=NO,
minwidth=0,
width=140), tree.column(
'#6', stretch=NO, minwidth=0, width=140), tree.column('#7', stretch=NO, minwidth=0,
width=140), tree.column('#8', stretch=NO,
minwidth=0,
width=140), tree.column(
'#9', stretch=NO, minwidth=0, width=140), tree.column('#10', stretch=NO, minwidth=0,
width=140), tree.column('#11', stretch=NO,
minwidth=0,
width=140), tree.column(
'#12', stretch=NO, minwidth=0, width=140), tree.column('#13', stretch=NO, minwidth=0,
width=140), tree.column('#14', stretch=NO,
minwidth=0,
width=140), tree.column(
'#15', stretch=NO, minwidth=0, width=140), tree.column('#16', stretch=NO, minwidth=0,
width=140), tree.column('#17', stretch=NO,
minwidth=0,
width=140), tree.column(
'#18', stretch=NO, minwidth=0, width=140), tree.column('#19', stretch=NO, minwidth=0,
width=140), tree.column('#20', stretch=NO,
minwidth=0,
width=140), tree.column(
'#21', stretch=NO, minwidth=0, width=140), tree.column('#22', stretch=NO, minwidth=0,
width=140), tree.column('#23', stretch=NO,
minwidth=0,
width=140), tree.column(
'#24', stretch=NO, minwidth=0, width=140), tree.column('#25', stretch=NO, minwidth=0,
width=140), tree.column('#26', stretch=NO,
minwidth=0,
width=140), tree.column(
'#27', stretch=NO, minwidth=0, width=140), tree.column('#28', stretch=NO, minwidth=0,
width=140), tree.column('#29', stretch=NO,
minwidth=0,
width=140), tree.column(
'#30', stretch=NO, minwidth=0, width=140), tree.column('#31', stretch=NO, minwidth=0,
width=140), tree.column('#32', stretch=NO,
minwidth=0,
width=140), tree.column(
'#33', stretch=NO, minwidth=0, width=140), tree.column('#34', stretch=NO, minwidth=0,
width=140), tree.column('#35', stretch=NO,
minwidth=0,
width=140), tree.column(
'#36', stretch=NO, minwidth=0, width=140), tree.column('#37', stretch=NO, minwidth=0,
width=140), tree.column('#38', stretch=NO,
minwidth=0,
width=140), tree.column(
'#39', stretch=NO, minwidth=0, width=140)
tree.pack()
if st.get() != "":
cursor.execute("SELECT * FROM `STUDENT` WHERE `FirstName` LIKE ?", ('%' + str(st.get()) + '%',))
conn.commit()
fetch = cursor.fetchall()
for data in fetch:
tree.insert('', 'end', values=data)
cursor.close()
conn.close()
def Reset():
st.set("")
Button(frame1, text="View All", command=Viewall).pack(side=LEFT, anchor=N, padx=10, pady=10)
Button(frame1, text="Search", command=Search).pack(side=LEFT, anchor=N, padx=10, pady=10)
st = StringVar()
Entry(frame1, textvariable=st, width=30).pack(side=LEFT, anchor=N, padx=5, pady=11)
st.get()
Button(frame1, text="Reset", command=Reset).pack(side=LEFT, anchor=N, padx=10, pady=10)
frame1.resizable(0, 0)
def Exit():
result = tm.askquestion('Inventory Management v1.3', 'Are you sure you want to exit?', icon="warning")
if result == 'yes':
root.destroy()
cursor.close()
conn.close()
exit()
def Chnglog():
tm.showinfo("Changelog",
"v1.0 - Only GUI \nv1.1 - Accepts inputs and saves it to text file \nv1.2 - Open previous logs\nv1.3 - SQLite3 Database integration")
def About():
tm.showinfo("About", "Python GUI Project\nInventory Management v1.3")
root = Tk()
sizex = 5000
sizey = 4000
posx = 100
posy = 100
root.wm_geometry("%dx%d+%d+%d" % (sizex, sizey, posx, posy))
# create a drop down menu
menu = Menu(root)
root.title("Student Admission System")
root.config(menu=menu)
# file menu
file = Menu(menu, tearoff=0)
menu.add_cascade(label="File", menu=file)
file.add_command(label="Open File", command=DatabaseView)
file.add_separator()
file.add_command(label="Exit", command=Exit)
# help menu
hlp = Menu(menu, tearoff=0)
menu.add_cascade(label="Help", menu=hlp)
hlp.add_command(label="About", command=About)
hlp.add_command(label="Changelog", command=Chnglog)
myframe = Frame(root, relief=GROOVE, width=sizex, height=sizey, bd=1)
myframe.place(x=5, y=200)
img = ImageTk.PhotoImage(Image.open(path))
#The Label widget is a standard Tkinter widget used to display a text or image on the screen.
panel = Label(root, image = img)
#The Pack geometry manager packs widgets in rows or columns.
panel.place(x=40,y=30)
canvas = Canvas(myframe)
frame = Frame(canvas, bg="light blue")
myscrollbar1 = Scrollbar(myframe, orient="vertical", command=canvas.yview)
canvas.configure(yscrollcommand=myscrollbar1.set)
myscrollbar1.pack(side="right", fill="y")
myscrollbar2 = Scrollbar(myframe, orient="horizontal", command=canvas.xview)
canvas.configure(xscrollcommand=myscrollbar2.set)
myscrollbar2.pack(side="bottom", fill="x")
canvas.pack(side="left")
canvas.create_window((0, 0), window=frame, anchor='nw')
frame.bind("<Configure>", myfunction)
# data()
root.configure(bg="black")
label = Label(root, text="APPLICATION FORM OF ST.THOMAS' COLLEGE ")
label.config(font=("Baskerville Old Face", 34, 'bold'), fg="blue")
label.place(x=220, y=75)
l4s = Label(frame, text="Personal Details :-", bg="green", fg="yellow")
l4s.config(font=("Courier", 25, 'bold'))
l4s.grid(row=3, column=0, pady=50, sticky="W")
l5 = Label(frame, text="First Name", bg="light blue")
l5.config(font=("Aeril", 20))
l5.grid(row=5, column=0)
firstname = StringVar()
el5a = Entry(frame, width=30, textvariable=firstname)
el5a.config(font=("Aeril", 15))
el5a.bind('<Leave>',Errorcheck1)
el5a.grid(row=5, column=1, sticky="W", columnspan=2)
l5b = Label(frame, text="Middle Name", bg="light blue")
l5b.config(font=("Aeril", 20))
l5b.grid(row=6, column=0, pady=50)
middlename = StringVar()
el5b = Entry(frame, width=30, textvariable=middlename)
el5b.config(font=("Aeril", 15))
el5b.bind('<Leave>',Errorcheck2)
el5b.grid(row=6, column=1, sticky="W", columnspan=2)
l5c = Label(frame, text="Last Name", bg="light blue")
l5c.config(font=("Aeril", 20))
l5c.grid(row=7, column=0)
lastname = StringVar()
el5c = Entry(frame, width=30, textvariable=lastname)
el5c.config(font=("Aeril", 15))
el5c.bind('<Leave>',Errorcheck3)
el5c.grid(row=7, column=1, sticky="W", columnspan=2)
# DATE OF BIRTH
l6 = Label(frame, text="Date Of Birth", bg="light blue")
l6.config(font=("Aerial", 20))
l6.grid(row=8, column=0, pady=50)
cl6a = ttk.Combobox(frame, values=[i for i in range(1, 32)])
cl6a.set("DATE")
cl6a.bind("<<ComboboxSelected>>")
cl6a.config(font=("Aerial", 15), width='15')
cl6a.grid(row=8, column=1, sticky="W", columnspan=2)
cl6b = ttk.Combobox(frame,
values=["January", "February", "March", "April", "May", "June", "July", "August", "September",
"October", "November", "December"])
cl6b.set("MONTH")
cl6b.bind("<<ComboboxSelected>>")
cl6b.config(font=("Aerial", 15), width='15')
cl6b.place(x=690, y=411)
cl6c = ttk.Combobox(frame, values=[i for i in range(1975, 2019)])
cl6c.bind('<Leave>',Errorcheck4)
cl6c.set("YEAR")
cl6c.bind("<<ComboboxSelected>>")
cl6c.config(font=("Aerial", 15), width='15')
cl6c.place(x=920, y=411)
# GENDER
l7 = Label(frame, text="Gender", bg="light blue")
l7.config(font=("Aerial", 20))
l7.grid(row=9, column=0)
i1 = StringVar()
r1 = Radiobutton(frame, text="Male", value="Male", variable=i1)
r1.config(font=("Aerial", 15))
r1.grid(row=9, column=1, sticky="W", columnspan=2)
r2 = Radiobutton(frame, text="Female", value="Female", variable=i1)
r2.config(font=("Aerial", 15))
r2.place(x=610, y=496)
r3 = Radiobutton(frame, text="Others", value="Others", variable=i1)
r3.config(font=("Aerial", 15))
r3.place(x=780, y=496)
# EMAIL
l8 = Label(frame, text="Email ID", bg="light blue")
l8.config(font=("Aerial", 20))
l8.grid(row=10, column=0, pady=40)
email1 = StringVar()
el8 = Entry(frame, width=50, textvariable=email1)
el8.config(font=("Aeril", 15))
el8.grid(row=10, column=1, sticky="W")
# CONTACT NO 1
l9 = Label(frame, text="Contact Number 1", bg="light blue")
l9.config(font=("Aerial", 20))
l9.grid(row=11, column=0)
phone1 = StringVar()
el9 = Entry(frame, width=30, textvariable=phone1)
el9.bind('<Leave>',Errorcheck5)
el9.config(font=("Aeril", 15))
el9.grid(row=11, column=1, sticky="W")
# CONTACT NO 2
l10 = Label(frame, text="Contact Number 2", bg="light blue")
l10.config(font=("Aerial", 20))
l10.grid(row=12, column=0, pady=40)
phone2 = StringVar()
el10 = Entry(frame, width=30, textvariable=phone2)
el10.config(font=("Aeril", 15))
el10.bind('<Leave>',Errorcheck6)
el10.grid(row=12, column=1, sticky="W")
# HOBBIES
l11 = Label(frame, text="Hobbies", bg="light blue")
l11.config(font=("Aerial", 20))
l11.grid(row=14, column=0)
hobby = StringVar()
el11 = Entry(frame, width=50, textvariable=hobby)
el11.config(font=("Aeril", 15))
el11.grid(row=14, column=1, sticky="W")
l4s = Label(frame, text="Residential Details :-", bg="green", fg="yellow")
l4s.config(font=("Courier", 25, 'bold'))
l4s.grid(row=15, column=0, pady=50)
# PERMANENT ADDRESS
l12 = Label(frame, text="Permanent Address", bg="light blue")
l12.config(font=("Aerial", 20))
l12.grid(row=17, column=0)
address1 = StringVar()
el12 = Entry(frame, width=80, textvariable=address1)
el12.config(font=("Aeril", 15))
el12.grid(row=17, column=1, sticky="W")
# PINCODE
l13 = Label(frame, text="Pincode", bg="light blue")
l13.config(font=("Aerial", 20))
l13.grid(row=18, column=0, pady=50)
pincode = StringVar()
el13 = Entry(frame, width=15, textvariable=pincode)
el13.config(font=("Aeril", 15))
el13.grid(row=18, column=1, sticky="W")
# LOCALITY
l14 = Label(frame, text="Locality", bg="light blue")
l14.config(font=("Aerial", 20))
l14.grid(row=20, column=0)
locality = StringVar()
el14 = Entry(frame, width=20, textvariable=locality)
el14.config(font=("Aeril", 15))
el14.grid(row=20, column=1, sticky="W")
# CITY
l12 = Label(frame, text="City", bg="light blue")
l12.config(font=("Aerial", 20))
l12.grid(row=22, column=0, pady=45)
city = StringVar()
el12 = Entry(frame, width=20, textvariable=city)
el12.config(font=("Aeril", 15))
el12.grid(row=22, column=1, sticky="W")
# PO
l13 = Label(frame, text="Post Office(P.O)", bg="light blue")
l13.config(font=("Aerial", 20))
l13.grid(row=24, column=0)
po = StringVar()
el13 = Entry(frame, width=20, textvariable=po)
el13.config(font=("Aeril", 15))
el13.place(x=462, y=1335)
# PS
l14 = Label(frame, text="Police Station(P.S)", bg="light blue")
l14.config(font=("Aerial", 20))
l14.place(x=850, y=1330)
ps = StringVar()
el14 = Entry(frame, width=20, textvariable=ps)
el14.config(font=("Aeril", 15))
el14.place(x=1182, y=1335)
# Urban/rural
l15 = Label(frame, text="Lifestyle", bg="light blue")
l15.config(font=("Aerial", 20))
l15.grid(row=30, column=0, pady=45)
i2 = StringVar()
r1 = Radiobutton(frame, text="Urban", value="Urban", variable=i2)
r1.config(font=("Aerial", 15))
r1.grid(row=30, column=1, sticky="W", columnspan=2)
r2 = Radiobutton(frame, text="Rural", value="Rural", variable=i2)
r2.config(font=("Aerial", 15))
r2.place(x=600, y=1413)
# State
l16 = Label(frame, text="State", bg="light blue")
l16.config(font=("Aerial", 20,))
l16.grid(row=31, column=0, pady=10)
state = StringVar()
el16 = Entry(frame, width=20, textvariable=state)
el16.config(font=("Aeril", 15))
el16.grid(row=31, column=1, sticky="W")
# Country
l17 = Label(frame, text="Country", bg="light blue")
l17.config(font=("Aerial", 20,))
l17.grid(row=32, column=0, pady=30)
cl7a = ttk.Combobox(frame, values=["Afghanistan", "Albania", "Algeria", "Andorra", "Angola", "Antigua & Barbuda",
"Argentina", "Armenia", "Australia", "Austria", "Azerbaijan", "Bahamas",
"Bahrai", "Bangladesh", "Barbados", "Belarus", "Belgium", "Belize", "Benin",
"Bhutan", "Bolivia", "Bosnia and Herzegovina", "Botswana", "Brazil", "Brunei",
"Bulgaria", "Burkina Faso", "Burundi", "Cabo Verde", "Cambodia", "Cameroon",
"Canada", "Central African Republic (CAR)", "Chad", "Chile", "China", "Colombia",
"Comoros", "Costa Rica", "Cote d'Ivoire", "Croatia", "Cuba", "Cyprus", "Czechia",
"Denmark", "Djibouti", "Dominica", "Dominican Republic", "Ecuador", "Egypt",
"El Salvador", "Equatorial Guinea", "Eritrea", "Estonia",
"Eswatini (formerly Swaziland)", "Ethiopia", "Fiji", "Finland", "France",
"Gabon", "Gambia", "Georgia", "Germany", "Ghana", "Greece", "Grenada",
"Guatemala", "Guinea", "Guinea-Bissau", "Guyana", "Haiti", "Honduras", "Hungary",
"Iceland", "India", "Indonesia", "Iran", "Iraq", "Ireland", "Israel", "Italy",
"Jamaica", "Japan", "Jordan", "Kazakhstan", "Kenya", "Kiribati", "Kosovo",
"Kuwait", "Kyrgyzstan", "Laos", "Latvia", "Lebanon", "Lesotho", "Liberia",
"Libya", "Liechtenstein", "Lithuania", "Luxembourg", "Madagascar", "Malawi",
"Malaysia", "Maldives", "Mali", "Malt", "Marshall Islands", "Mauritius",
"Mexico", "Micronesia", "Moldova", "Monaco", "Mongolia", "Montenegro", "Morocco",
"Mozambique", "Myanmar(formerly Burma)", "Namibia", "Nauru"
, "Nepal", "Netherlands", "New Zealand", "Nicaragua", "Niger", "Nigeria", "North Korea",
"North Macedonia (formerly Macedonia)", "Norway", "Oman", "Pakistan", "Palau",
"Palestine", "Panama", "Papua New Guinea", "Paraguay", "Peru", "Philippines",
"Poland", "Portugal", "Qatar", "Romania", "Russia", "Rwanda",
"Saint Kitts and Nevis", "Saint Lucia", "Saint Vincent and the Grenadines",
"Samoa", "San Marino", "Sao Tome and Principe", "Saudi Arabia", "Senegal",
"Serbia", "Seychelles", "Sierra Leone", "Singapore", "Slovakia", "Slovenia",
"Solomon Islands", "Somalia", "South Africa", "South Korea", "South Sudan",
"Spain", "Sri Lanka", "Sudan", "Suriname", "Sweden", "Switzerland", "Syria",
"Taiwan", "Tajikistan", "Tanzania", "Thailand", "Timor-Leste", "Togo,Tonga",
"Trinidad and Tobago", "Tunisia", "Turkey", "Turkmenistan", "Tuvalu", "Uganda",
"Ukraine", "United Arab Emirates (UAE)", "United Kingdom (UK)",
"United States of America (USA)", "Uruguay", "Uzbekistan", "Vanuatu",
"Vatican City (Holy See)", "Venezuela", "Vietnam", "Yemen", "Zambia",
"Zimbabwe"])
cl7a.set("Select A Country")
cl7a.bind("<<ComboboxSelected>>")
cl7a.config(font=("Aerial", 15), width='30')
cl7a.grid(row=32, column=1, sticky="W", columnspan=2)
l18s = Label(frame, text="Parents' Details :-")
l18s.config(font=("Courier", 25, 'bold'))
l18s.grid(row=33, column=0, pady=40, sticky="W")
# Parent's name
l19 = Label(frame, text="Parents Name", bg="light blue")
l19.config(font=("Aerial", 20,))
l19.grid(row=34, column=0, pady=10)
parent = StringVar()
el19 = Entry(frame, width=20, textvariable=parent)
el19.config(font=("Aeril", 15))
el19.grid(row=34, column=1, sticky="W")
# Parent's address
l20 = Label(frame, text="Parents Address", bg="light blue")
l20.config(font=("Aerial", 20,))
l20.grid(row=35, column=0, pady=30)
parentaddress = StringVar()
el20 = Entry(frame, width=30, textvariable=parentaddress)
el20.config(font=("Aeril", 15))
el20.grid(row=35, column=1, sticky="W")
# Parent's occupation
l21 = Label(frame, text="Parents Occupation", bg="light blue")
l21.config(font=("Aerial", 20,))
l21.grid(row=36, column=0, pady=20)
parentoccupation = StringVar()
el21 = Entry(frame, width=20, textvariable=parentoccupation)
el21.config(font=("Aeril", 15))
el21.grid(row=36, column=1, sticky="W")
# Parents' contact
l22 = Label(frame, text="Parents Contact", bg="light blue")
l22.config(font=("Aerial", 20,))
l22.grid(row=37, column=0, pady=20)
phone3 = StringVar()
el22 = Entry(frame, width=20, textvariable=phone3)
el22.config(font=("Aeril", 15))
el22.bind('<Leave>',Errorcheck7)
el22.grid(row=37, column=1, sticky="W")
# Parents' email
l23 = Label(frame, text="Parents Email", bg="light blue")
l23.config(font=("Aerial", 20,))
l23.grid(row=38, column=0, pady=20)
email2 = StringVar()
el23 = Entry(frame, width=20, textvariable=email2)
el23.config(font=("Aeril", 15))
el23.grid(row=38, column=1, sticky="W")
# Guardian's Name
l24 = Label(frame, text="Guardian Name", bg="light blue")
l24.config(font=("Aerial", 20,))
l24.grid(row=39, column=0, pady=30)
guardian = StringVar()
el24 = Entry(frame, width=20, textvariable=guardian)
el24.config(font=("Aeril", 15))
el24.grid(row=39, column=1, sticky="W")
# Guardian's address
l25 = Label(frame, text="Guardian Address", bg="light blue")
l25.config(font=("Aerial", 20,))
l25.grid(row=40, column=0, pady=20)
guardaddress = StringVar()
el25 = Entry(frame, width=30, textvariable=guardaddress)
el25.config(font=("Aeril", 15))
el25.grid(row=40, column=1, sticky="W")
# Guardians' occupation
l26 = Label(frame, text="Guardian Occupation", bg="light blue")
l26.config(font=("Aerial", 20,))
l26.grid(row=41, column=0, pady=20)
guardoccupation = StringVar()
el26 = Entry(frame, width=20, textvariable=guardoccupation)
el26.config(font=("Aeril", 15))
el26.grid(row=41, column=1, sticky="W")
# Guardians' contact
l27 = Label(frame, text="Guardian Contact", bg="light blue")
l27.config(font=("Aerial", 20,))
l27.grid(row=42, column=0, pady=20)
phone4 = StringVar()
el27 = Entry(frame, width=20, textvariable=phone4)
el27.config(font=("Aeril", 15))
el27.bind('<Leave>',Errorcheck8)
el27.grid(row=42, column=1, sticky="W")
# Guardians' email
l28 = Label(frame, text="Guardian Email", bg="light blue")
l28.config(font=("Aerial", 20,))
l28.grid(row=43, column=0, pady=20)
email3 = StringVar()
el28 = Entry(frame, width=20, textvariable=email3)
el28.config(font=("Aeril", 15))
el28.grid(row=43, column=1, sticky="W")
l29s = Label(frame, text="Educational Details :-", bg="green", fg="yellow")
l29s.config(font=("Courier", 25, 'bold'))
l29s.grid(row=44, column=0, pady=40, sticky="W")
# Stream
l30 = Label(frame, text="Class 12 Stream", bg="light blue")
l30.config(font=("Aerial", 20,))
l30.grid(row=45, column=0, pady=30)
c31a = ttk.Combobox(frame, values=["PMC-Comp", "PMC-B", "PMC-Comm", "PMC-Arts"])
c31a.set("Class 12 Stream")
c31a.bind("<<ComboboxSelected>>")
c31a.config(font=("Aerial", 15), width='20')
c31a.grid(row=45, column=1, sticky="W", columnspan=2)
l30 = Label(frame, text="According to selection , choose your subjects and enter corresponding marks",
bg="light blue")
l30.config(font=("Aerial", 20,))
l30.grid(row=46, column=0, pady=30, columnspan=3, sticky="W")
m1 = IntVar()
m2 = IntVar()
m3 = IntVar()
m4 = IntVar()
m5 = IntVar()
m6 = IntVar()
answer = IntVar()
s = IntVar()
cb1 = Checkbutton(frame, text="English")
cb1.config(font=("Aerial", 15))
cb1.grid(row=47, column=0)
cben1 = Entry(frame, width=10, textvariable=m1)
cben1.config(font=("Aeril", 15))
cben1.bind("<Leave>", Numberonly1)
cben1.grid(row=47, column=1, sticky="W")
cb2 = Checkbutton(frame, text="Vernacular")
cb2.config(font=("Aerial", 15))
cb2.grid(row=48, column=0, pady=45)
cben2 = Entry(frame, width=10, textvariable=m2)
cben2.config(font=("Aeril", 15))
cben2.bind("<Leave>", Numberonly2)
cben2.grid(row=48, column=1, sticky="W")
cb3 = Checkbutton(frame, text="Mathematics")
cb3.config(font=("Aerial", 15))
cb3.grid(row=49, column=0, pady=15)
cben3 = Entry(frame, width=10, textvariable=m3)
cben3.config(font=("Aeril", 15))
cben3.bind("<Leave>", Numberonly3)
cben3.grid(row=49, column=1, sticky="W")
cb4 = Checkbutton(frame, text="Physics")
cb4.config(font=("Aerial", 15))
cb4.grid(row=50, column=0, pady=15)
cben4 = Entry(frame, width=10, textvariable=m4)
cben4.config(font=("Aeril", 15))
cben4.bind("<Leave>", Numberonly4)
cben4.grid(row=50, column=1, sticky="W")
cb5 = Checkbutton(frame, text="Chemistry")
cb5.config(font=("Aerial", 15))
cb5.grid(row=51, column=0, pady=15)
cben5 = Entry(frame, width=10, textvariable=m5)
cben5.config(font=("Aeril", 15))
cben5.bind("<Leave>", Numberonly5)
cben5.grid(row=51, column=1, sticky="W")
cb6 = Checkbutton(frame, text="Computer_Science")
cb6.config(font=("Aerial", 15))
cb6.grid(row=52, column=0, pady=15)
cben6 = Entry(frame, width=10, textvariable=m6)
cben6.config(font=("Aeril", 15))
cben6.bind("<Leave>", Numberonly6)
cben6.grid(row=52, column=1, sticky="W")
cal_but = Button(frame, padx=10, bd=7, font=("Helvetica", 10, "bold"), width=15, text="Calculate Percentage",
bg="blue", command=Numberonly16).grid(row=62, column=0, pady=10)
l35 = Label(frame, text="Class 12 percentage", bg="light blue")
l35.config(font=("Aerial", 20,))
l35.grid(row=53, column=0, pady=30)
cben16 = Entry(frame, width=10, textvariable=answer, state=DISABLED)
cben16.config(font=("Aeril", 15))
cben16.grid(row=53, column=1, sticky="W")
l36 = Label(frame, text="Class 12 Aggregate", bg="light blue")
l36.config(font=("Aerial", 20,))
l36.grid(row=54, column=0, pady=30)
cben17 = Entry(frame, width=10, textvariable=s, state=DISABLED)
cben17.config(font=("Aeril", 15))
cben17.grid(row=54, column=1, sticky="W")
cb19 = Checkbutton(frame,
text="I agree to the terms and conditions and hereby declare to abide by the rules and regulations of the college",
bg="light green")
cb19.config(font=("Aerial", 15))
cb19.grid(row=66, column=0, pady=15, columnspan=3)
sub_but = Button(frame, padx=10, bd=7, font=("Helvetica", 10, "bold"), width=15, text="SUBMIT", bg="red",
fg="white", command=DatabaseAdd).grid(row=67, column=0, padx=100)
# Thanks
l16p = Label(frame, text="Thank", bg="light blue")
l16p.config(font=("Aerial", 20))
l16p.grid(row=400, column=750)
# You
l15 = Label(frame, text="You", bg="light blue")
l15.config(font=("Aerial", 20))
l15.grid(row=400, column=800)
# So much
l15 = Label(frame, text="So Much Visit Again", bg="light blue")
l15.config(font=("Aerial", 20))
l15.grid(row=400, column=850)
root.mainloop()
| Adrish1999/Python-GUI | Reg_Form_Without_Login.py | Reg_Form_Without_Login.py | py | 54,535 | python | en | code | 0 | github-code | 36 |
74307505383 | # -*- coding: utf-8 -*-
"""
Created on Wed Jan 18 10:23:50 2017
@author: lcp5y3
"""
#----------------------------------------------------------------------------
# file of function which allow to decode data from uart protocole
# CRUBS_ll
#-----------------------------------------------------------------------------
#----------------------short--------------------------------------------------
distance=[]
angle=[]
temps=[]
cmd_d=[]
cmd_a=[]
theta=[]
#--------------------------float----------------------------------------------
p_dist=0
i_dist=0
d_dist=0
p_ang=0
i_ang=0
d_ang=0
#------------var de sauvegarde de data -------------------------------------
char_table=[0,1,2,3,4,5,6]
int_table=[0,1,2,distance,angle,cmd_d]
short_table=[0,1,2,3,4,5,cmd_a]
flt_table=[0,1,2,3,p_dist,i_dist,d_dist,7,8,9,p_ang,i_ang,d_ang,theta]
#----------------var de paramètrage------------------------------------------
pdt = 0.01 #pas de temps pour l'affichage du temps
b_int = 32
b_char = 8
b_short = 16
b_flt = 32
ch_mask = 0
sht_mask = 1
int_mask = 2
flt_mask = 3
byte_mask = 255
flt_coef = 1000
seuil_max = 1000000000
size_int = 6
size_char = 3
size_short = 4
start_b = 252
stop_b = 244
#-----------------------------------------------------------------------------
# function two's complement
#-----------------------------------------------------------------------------
def complementA2(variable, nb_bit):
return -1*((variable-1)^(pow(2,nb_bit)-1)) # var-1 xor 2puissanceX -1
#return variable-1-pow(2,nb_bit)
#transforme char en byte read like int
def char_to_byte(trame):
for i in range(len(trame)):
trame[i]=ord(trame[i])
# return trame
def checksum(data):
return(sum(data[:]) & byte_mask)
def base_temps(longueur):
bt = 0.01
temps[:]=[]
for i in range(longueur):
temps.append(bt*(1+i))
def clear():
distance[:]=[]
temps[:]=[]
angle[:]=[]
cmd_d[:]=[]
theta[:]=[]
#-----------------------------------------------------------------------------
#reading functions
#-----------------------------------------------------------------------------
#read ca char with the protocole CRUBS_ll-------------------------------------
def read_char(trame,adresse,signe): # reste le signe a regardr ici
char_table[adresse].append(trame[1])
#read an short with the protocole CRUBS_ll-------------------------------------
def read_sht(trame,adresse,signe):
resultat =0
for i in range(len(trame)):
resultat = resultat <<8
resultat += trame[i]
#print("DEBUG: short signe ",signe," resultat: ",resultat)
if(signe == 0):
short_table[adresse].append(resultat)
else:
short_table[adresse].append(complementA2(resultat, b_short))
#print("DEBUG: valeur ",short_table[adresse][-1],"|| adresse: ",adresse)
#read an int with the protocole CRUBS_ll--------------------------------------
def read_int(trame,adresse,signe):
resultat = 0
for i in range(len(trame)):
resultat = resultat <<8
resultat += trame[i]
if(signe == 0):
int_table[adresse].append(resultat)
else:
int_table[adresse].append(complementA2(resultat, b_int))
#read an int with the protocole CRUBS_ll--------------------------------------
def read_flt(data,adresse,signe):
resultat = 0
#print("DEBUG: valeur de la trame dans le read flt ",data)
for i in range(len(data)):
resultat = resultat <<8
resultat += data[i]
#print("DEBUG: float resultat ", resultat)
if(signe == 0):
flt_table[adresse].append(resultat/flt_coef)
else:
flt_table[adresse].append(complementA2(resultat, b_flt)/flt_coef)
#print("DEBUG: valeur ",flt_table[adresse][-1],"|| adresse: ",adresse)
#function to detect the end of a trame---------------------------------------
def eot(trame):
if(trame == stop_b):
return True
else:
return False
def eo_transmit(trame):
if(len(trame)>=3):
if(sum(trame[-3:])==311 and trame[-1]==100):
print("fin de transmission")
return True
else:
return False
#-----------------------------------------------------------------------------
# sending function
#-----------------------------------------------------------------------------
#function to add the start/stop byte
def ss_byte(data):
data.append(stop_b)
data.insert(0,start_b)
# function to send a char-----------------------------------------------------
def send_char(data,adresse,char_data):
char_data[:]=[] #on nettoie
#ajout du bit adresse signe type
char_data.append(adresse)
if(data<0):
char_data[0]=(char_data[0]<<1)+1
data = complementA2(data,b_char)
else:
char_data[0]=char_data[0]<<1
char_data[0]=(char_data[0]<<2)+ch_mask
#envoi
char_data.append(data)
char_data.append(checksum(char_data[:]))
ss_byte(char_data) #ajout start/stop byte
# function to send a short----------------------------------------------------
def send_sht(data,adresse,sht_data):
sht_data[:]=[]
#prepa du bit d'adresse
sht_data.append(adresse)
if(data<0):
sht_data[0]=(sht_data[0]<<1)+1
data = complementA2(data,b_short)
print(hex(data)) #debug
else:
sht_data[0] = sht_data[0]<<1
sht_data[0]=(sht_data[0]<<2)+sht_mask
#prepa des datas
sht_data.append(data >> 8)
sht_data.append(data & byte_mask)
sht_data.append(checksum(sht_data[:]))
#prepa du byte de start
ss_byte(sht_data)
#function to send an int------------------------------------------------------
def send_int(data,adresse,int_data):
int_data[:]=[] #on nettoie
#ajout du bit adresse signe type
int_data.append(adresse)
if(data<0):
int_data[0]=(int_data[0]<<1)+1
data = complementA2(data,b_int)
else:
int_data[0]=int_data[0]<<1
int_data[0]=(int_data[0]<<2)+int_mask
#envoi
int_data.append(data >> 24)
int_data.append((data >> 16) & byte_mask)
int_data.append((data >> 8) & byte_mask)
int_data.append((data & 15) & byte_mask)
int_data.append(checksum(int_data[:]))
ss_byte(int_data) #ajout start/stop byte
#function to send a float-----------------------------------------------------
def send_flt(data,adresse,flt_data):
flt_data[:]=[]
#ajout du bit adresse signe type
flt_data.append(adresse)
if(data<0):
flt_data[0]=(flt_data[0]<<1)+1
data = complementA2(data,b_flt)
else:
flt_data[0]=flt_data[0]<<1
flt_data[0]=(flt_data[0]<<2)+flt_mask
#adaptation des données
data=int(data*flt_coef)
flt_data.append(data >> 24)
flt_data.append((data >> 16) & byte_mask)
flt_data.append((data >> 8) & byte_mask)
flt_data.append((data & 15) & byte_mask)
flt_data.append(checksum(flt_data[:]))
ss_byte(flt_data) #ajout start/stop byte
#debug function---------------------------------------------------------------
def print_list(liste):
for i in range(len(liste)):
print(bin(liste[i]))
| lcp5y3/tenchWichSpeak | pyqt/CRUBS_ll_decode.py | CRUBS_ll_decode.py | py | 7,281 | python | en | code | 0 | github-code | 36 |
22778807898 | import copy
import numpy as np
import random
from collections import defaultdict
from torch.utils.data.sampler import Sampler
class RandomClassSampler(Sampler):
"""Randomly samples N classes each with K instances to
form a minibatch of size N*K.
Modified from https://github.com/KaiyangZhou/deep-person-reid.
Args:
data_source (list): list of Datums.
batch_size (int): batch size.
n_ins (int): number of instances per class to sample in a minibatch.
"""
def __init__(self, data_source, batch_size, n_ins):
if batch_size < n_ins:
raise ValueError(
"batch_size={} must be no less "
"than n_ins={}".format(batch_size, n_ins)
)
self.data_source = data_source
self.batch_size = batch_size
self.n_ins = n_ins
self.ncls_per_batch = self.batch_size // self.n_ins
self.index_dic = defaultdict(list)
for index, item in enumerate(data_source):
self.index_dic[item.label].append(index)
self.labels = list(self.index_dic.keys())
assert len(self.labels) >= self.ncls_per_batch
# estimate number of images in an epoch
self.length = len(list(self.__iter__()))
def __iter__(self):
batch_idxs_dict = defaultdict(list)
for label in self.labels:
idxs = copy.deepcopy(self.index_dic[label])
if len(idxs) < self.n_ins:
idxs = np.random.choice(idxs, size=self.n_ins, replace=True)
random.shuffle(idxs)
batch_idxs = []
for idx in idxs:
batch_idxs.append(idx)
if len(batch_idxs) == self.n_ins:
batch_idxs_dict[label].append(batch_idxs)
batch_idxs = []
avai_labels = copy.deepcopy(self.labels)
final_idxs = []
while len(avai_labels) >= self.ncls_per_batch:
selected_labels = random.sample(avai_labels, self.ncls_per_batch)
for label in selected_labels:
batch_idxs = batch_idxs_dict[label].pop(0)
final_idxs.extend(batch_idxs)
if len(batch_idxs_dict[label]) == 0:
avai_labels.remove(label)
return iter(final_idxs)
def __len__(self):
return self.length
| MaXuSun/domainext | domainext/data/samplers/random_class.py | random_class.py | py | 2,346 | python | en | code | 8 | github-code | 36 |
29326071622 | # coding=utf-8
import matplotlib.pyplot as plt
from gensim.models import Word2Vec
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import roc_curve, auc
import data_processing
import globe
import word2vec_gensim_train
# 读入数据
# pos_file_path = '/home/zhangxin/work/workplace_python/DeepNaturalLanguageProcessing/DeepNLP/data/test3.txt'
# neg_file_path = '/home/zhangxin/work/workplace_python/DeepNaturalLanguageProcessing/DeepNLP/data/test2.txt'
pos_file_path = '/Users/li/workshop/DataSet/sentiment/train/result_pos.txt'
neg_file_path = '/Users/li/workshop/DataSet/sentiment/train/result_neg.txt'
tmp = data_processing.read_data(pos_file_path, neg_file_path)
res = data_processing.data_split(tmp[0], tmp[1])
x_train = res[0]
x_test = res[1]
label_train = res[2]
label_test = res[3]
x_train = data_processing.text_clean(x_train)
x_test = data_processing.text_clean(x_test)
# 生成文本向量
n_dim = globe.n_dim
# model_path = '/home/zhangxin/work/workplace_python/DeepNaturalLanguageProcessing/DeepNLP/word2vecmodel/mymodel'
model_path = globe.model_path
word2vec_model = Word2Vec.load(model_path)
vecs = word2vec_gensim_train.text_vecs(x_train, x_test, n_dim, word2vec_model)
train_vecs = vecs[0]
test_vecs = vecs[1]
# 分类训练
lr = SGDClassifier(loss='log', penalty='l1')
lr.fit(train_vecs, label_train)
print('Test Accuracy: %.2f' % lr.score(test_vecs, label_test))
pred_probas = lr.predict_proba(test_vecs)[:, 1]
fpr, tpr, _ = roc_curve(label_test, pred_probas)
roc_auc = auc(fpr, tpr)
plt.plot(fpr, tpr, label='area = %.2f' %roc_auc)
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.legend(loc='lower right')
plt.show()
| STHSF/DeepNaturalLanguageProcessing | TextClassification/sentiment_analysis/sentiment_analysis_zh/word2vec_classify_run.py | word2vec_classify_run.py | py | 1,700 | python | en | code | 16 | github-code | 36 |
30466599177 | class Solution:
def read(self, buf, n):
temp = [''] * 4 ##新开一个空间,让buf4往里面读数
index = 0
while True:
count = read4(temp)
size = min(count, n - index) # 看还够不够都放进buf里取的
for i in range(size): #对于读进来的数把buf里存入buf4里的数
buf[index] = temp[i]
index += 1 #这里的idx记的是在我自己的内存里存到第几位了
if index == n or count < 4: # 如果file读完了, 或者到达buf到头了,就return
return index
| dundunmao/LeetCode2019 | 157 Read N Characters Given Read4.py | 157 Read N Characters Given Read4.py | py | 609 | python | zh | code | 0 | github-code | 36 |
1406747066 | #Your task is to complete this function
#Your should return the required output
class Solution:
def maxLen(self, n, arr):
#Code here
curr_sum, max_sum = 0, 0
prefix_sum = {}
for (i, curr) in enumerate(arr):
curr_sum += curr
if not curr_sum:
max_sum = i + 1
else:
if curr_sum in prefix_sum:
max_sum = max(max_sum, i - prefix_sum[curr_sum])
else:
prefix_sum[curr_sum] = i
return max_sum
#{
# Driver Code Starts
if __name__=='__main__':
t= int(input())
for i in range(t):
n = int(input())
arr = list(map(int, input().strip().split()))
ob = Solution()
print(ob.maxLen(n ,arr))
# Contributed by: Harshit Sidhwa
# } Driver Code Ends | anishgupta675/Striver_SDE_Sheet | Arrays_Part_IV/Largest_Subarray_with_K_sum/Solution.py | Solution.py | py | 863 | python | en | code | 0 | github-code | 36 |
34972782273 |
from .helpers import flattenToSet, console
from .nodes import Nodes
from .locality import Locality
from .nodefeature import NodeFeatures
from .edgefeature import EdgeFeatures
from .computed import Computeds
from .text import Text
from ..search.search import Search
API_REFS = dict(
AllComputeds=("Computed", "computedall", "computed-data"),
AllEdges=("Features", "edgeall", "edge-features"),
AllFeatures=("Features", "nodeall", "node-features"),
C=("Computed", "computed", "computed-data"),
Call=("Computed", "computedall", "computed-data"),
Computed=("Computed", "computed", "computed-data"),
ComputedString=("Computed", "computedstr", "computed-data"),
Cs=("Computed", "computedstr", "computed-data"),
E=("Features", "edge", "edge-features"),
Eall=("Features", "edgeall", "edge-features"),
Edge=("Features", "edge", "edge-features"),
EdgeString=("Features", "edgestr", "edge-features"),
Es=("Features", "edgestr", "edge-features"),
F=("Features", "node", "node-features"),
Fall=("Features", "nodeall", "node-features"),
Feature=("Features", "node", "node-features"),
FeatureString=("Features", "nodestr", "node-features"),
Fs=("Features", "nodestr", "node-features"),
L=("Locality", "locality", "locality"),
Locality=("Locality", "locality", "locality"),
N=("Nodes", "nodes", "navigating-nodes"),
Nodes=("Nodes", "nodes", "navigating-nodes"),
S=("Search", "search", "search"),
Search=("Search", "search", "search"),
T=("Text", "text", "text"),
TF=("Fabric", "fabric", "loading"),
Text=("Text", "text", "text"),
)
class Api(object):
def __init__(self, TF):
self.TF = TF
self.ignored = tuple(sorted(TF.featuresIgnored))
TF.ignored = self.ignored
self.F = NodeFeatures()
self.Feature = self.F
self.E = EdgeFeatures()
self.Edge = self.E
self.C = Computeds()
self.Computed = self.C
tmObj = TF.tmObj
TF.silentOn = tmObj.silentOn
TF.silentOff = tmObj.silentOff
TF.isSilent = tmObj.isSilent
TF.setSilent = tmObj.setSilent
TF.info = tmObj.info
TF.warning = tmObj.warning
TF.error = tmObj.error
TF.cache = tmObj.cache
TF.reset = tmObj.reset
TF.indent = tmObj.indent
TF.loadLog = tmObj.cache
TF.ensureLoaded = self.ensureLoaded
TF.makeAvailableIn = self.makeAvailableIn
setattr(self, "FeatureString", self.Fs)
setattr(self, "EdgeString", self.Es)
setattr(self, "ComputedString", self.Cs)
setattr(self, "AllFeatures", self.Fall)
setattr(self, "AllEdges", self.Eall)
setattr(self, "AllComputeds", self.Call)
def Fs(self, fName):
if not hasattr(self.F, fName):
self.TF.error(f'Node feature "{fName}" not loaded')
return None
return getattr(self.F, fName)
def Es(self, fName):
if not hasattr(self.E, fName):
self.TF.error(f'Edge feature "{fName}" not loaded')
return None
return getattr(self.E, fName)
def Cs(self, fName):
if not hasattr(self.C, fName):
self.TF.error(f'Computed feature "{fName}" not loaded')
return None
return getattr(self.C, fName)
def Fall(self):
return sorted(x[0] for x in self.F.__dict__.items())
def Eall(self):
return sorted(x[0] for x in self.E.__dict__.items())
def Call(self):
return sorted(x[0] for x in self.C.__dict__.items())
def makeAvailableIn(self, scope):
for member in dir(self):
if "_" not in member and member[0].isupper():
scope[member] = getattr(self, member)
if member not in API_REFS:
console(f'WARNING: API member "{member}" not documented')
grouped = {}
for (member, (head, sub, ref)) in API_REFS.items():
grouped.setdefault(ref, {}).setdefault((head, sub), []).append(member)
# grouped
# node-features=>(Features, node)=>[F, ...]
docs = []
for (ref, groups) in sorted(grouped.items()):
chunks = []
for ((head, sub), members) in sorted(groups.items()):
chunks.append(" ".join(sorted(members, key=lambda x: (len(x), x))))
docs.append((head, ref, tuple(chunks)))
return docs
# docs
# (Features, node-features, ('F ...', ...))
def ensureLoaded(self, features):
F = self.F
E = self.E
TF = self.TF
warning = TF.warning
needToLoad = set()
loadedFeatures = set()
for fName in sorted(flattenToSet(features)):
fObj = TF.features.get(fName, None)
if not fObj:
warning(f'Cannot load feature "{fName}": not in dataset')
continue
if fObj.dataLoaded and (hasattr(F, fName) or hasattr(E, fName)):
loadedFeatures.add(fName)
else:
needToLoad.add(fName)
if len(needToLoad):
TF.load(
needToLoad, add=True, silent="deep",
)
loadedFeatures |= needToLoad
return loadedFeatures
def addOtype(api):
setattr(api.F.otype, "all", tuple(o[0] for o in api.C.levels.data))
setattr(
api.F.otype, "support", dict(((o[0], (o[2], o[3])) for o in api.C.levels.data))
)
def addLocality(api):
api.L = Locality(api)
api.Locality = api.L
def addNodes(api):
api.N = Nodes(api)
api.Nodes = api.N
def addText(api):
api.T = Text(api)
api.Text = api.T
def addSearch(api, silent):
api.S = Search(api, silent)
api.Search = api.S
| aarek-eng/txtpy | txtpy/core/api.py | api.py | py | 5,762 | python | en | code | 1 | github-code | 36 |
201309717 | #name introduction
"""
Topic: Programming Logic and Design
Author: Viernes, Michael
Submitted to: Mr. Madrigalejos
"""
"""
# Getter functions (NOT USED FOR THE MOMENT FOR HOMEWORK 04).
def getName():
name = input("Your name: ")
return name
def getAge():
age = input("Your age: ")
return age
def getAddress():
address = input("Your address: ")
return address
"""
def getInfo(): # gets name, age, address.
# Declares info local variables.
name, age, address = "", "", ""
name = input("Your name: ")
age = input("Your age: ")
address = input("Your address: ")
return name, age, address # end getInfo()
def printInfo(user_name, user_age, user_address):
print(f"\n\nHi, my name is {user_name}. I am {user_age} years old and I live in {user_address}.\n")
"""
def decorator(decorFunc): # This will be added at some point.
def consoleDecorator():
return
return consoleDecorator
"""
def main(): # REDEFINED main program.
"""
Creating array. # THE FOLLOWING CODE WILL NOT BE USED!
#info = []
# THE FOLLOWING CODE WILL NOT BE USED!
#info.append(getName())
#info.append(getAge())
#info.append(getAddress())
"""
print("\n--------------------")
print("\tIntroduction")
print("--------------------")
print("\tRequesting Personal Information: \n\n")
m_name ,m_age ,m_address = getInfo()
printInfo(m_name, m_age, m_address)
while True:
quit = "None"
main() # Calls main program.
quit = input("Press Q to quit: ")
if str(quit).upper() == "Q": # Experimented on str's upper func.
# Experimented on multi line prints.
print(
"""Thank you for your participation.\n"""
)
break
| MichaelViernes271/PLD-Homework-04 | name-intro.py | name-intro.py | py | 1,869 | python | en | code | 1 | github-code | 36 |
19226676283 | import dash
from dash.dependencies import Input, Output, State
import dash_core_components as dcc
import dash_html_components as html
import plotly.graph_objs as go
import dash_bootstrap_components as dbc
from app import app
from apps import general_functions as gf
#from apps_igf import func_gral
main_layout = dbc.Container([
dbc.Row([
#Controles
dbc.Col([
html.P("Selecciona un RFC:"),
dcc.Dropdown(
id="gral_drp_0",
options=[{"label": "RFC001", "value":"RFC001"},
{"label": "RFC002", "value":"RFC002"},
{"label": "RFC003", "value":"RFC003"}]
),
html.Br(),
html.P("Año:", className="control_label"),
dcc.Dropdown(
id="gral_drp_1",
options=[{"label": "2020", "value":"2020"},
{"label": "2019", "value":"2019"},
{"label": "2018", "value":"2018"}],
),
html.Br(),
html.P("Rango de meses:", className="control_label"),
dcc.RangeSlider(
id="gral_drp_2",
min=1,
max=12,
value=[1, 12],
),
html.Br(),
html.P("Tipo de Documento:", className="control_label"),
dcc.Dropdown(
id="gral_drp_3",
options=[{"label": "Pago", "value":"Pago"},
{"label": "Egreso", "value":"Egreso"},
{"label": "Ingreso", "value":"Ingreso"}],
),
html.Br(),
html.P("Tipo de Moneda:", className="control_label"),
dcc.RadioItems(
id="gral_drp_4",
options=[
{"label": "Pesos MXN ", "value": "MXN"},
{"label": "Dolares USD ", "value": "USD"},],
labelStyle={"display": "inline-block"},
),
],className='col-md-3', style={'border-radius': '5px','background-color': '#f9f9f9'}),
#Montos
dbc.Col([
html.H1("MONTOS", className='text-center'),
gf.mosaico("#00cc96", "Monto Total Emision", "gral_msc_mon_1"),
gf.mosaico("#00cc96", "Monto Total Recepcion", "gral_msc_mon_2"),
html.Div(
[dcc.Graph(id="gral_grf_mts_1",
config=dict(displayModeBar=False))],
),
],className='col-md-8', width={'offset':1})
]),
html.Br(),
dbc.Row([
#Facturas
dbc.Col([
html.H1("FACTURAS", className='text-center'),
gf.mosaico("#00cc96", "Cantidad de Facturas Emitidas", "gral_msc_fac_1"),
gf.mosaico("#00cc96", "Cantidad de Facturas Recibidas", "gral_msc_fac_2"),
html.Div(
[dcc.Graph(id="gral_grf_fac_1",
config=dict(displayModeBar=False))],
),
],className='col-md-8', width={'offset':4})
])
],fluid=True)
layout = html.Div([
html.Div([main_layout]),
])
| jGarciaGz/bocetos | gral2.py | gral2.py | py | 3,189 | python | en | code | 0 | github-code | 36 |
35396901278 | from __future__ import (nested_scopes, generators, division, absolute_import, with_statement,
print_function, unicode_literals)
from twitter.common.collections import OrderedSet
from twitter.common.dirutil.fileset import Fileset
from twitter.common.lang import Compatibility
def assert_list(obj, expected_type=Compatibility.string, can_be_none=True, default=(),
allowable=(list, Fileset, OrderedSet, set, tuple), raise_type=ValueError):
"""
This function is used to ensure that parameters set by users in BUILD files are of acceptable types.
:param obj : the object that may be a list. It will pass if it is of type in allowable.
:param expected_type : this is the expected type of the returned list contents.
:param can_be_none : this defines whether or not the obj can be None. If True, return default.
:param default : this is the default to return if can_be_none is True and obj is None.
:param allowable : the acceptable types for obj. We do not want to allow any iterable (eg string).
:param raise_type : the error to throw if the type is not correct.
"""
val = obj
if val is None:
if can_be_none:
val = list(default)
else:
raise raise_type('Expected an object of acceptable type %s, received None and can_be_none is False' % allowable)
if [typ for typ in allowable if isinstance(val, typ)]:
lst = list(val)
for e in lst:
if not isinstance(e, expected_type):
raise raise_type('Expected a list containing values of type %s, instead got a value %s of %s' %
(expected_type, e, e.__class__))
return lst
else:
raise raise_type('Expected an object of acceptable type %s, received %s instead' % (allowable, val))
| fakeNetflix/square-repo-pants | src/python/pants/base/validation.py | validation.py | py | 1,754 | python | en | code | 0 | github-code | 36 |
3511362879 | import re
def name_score(name):
total = 0
for x in name:
total += ord(x)-ord('A')+1
return total
name_list = []
for name in open("p022_names.txt").read().split(","):
name = re.findall("\"(.*)\"",name)[0]
name_list.append(name)
name_list = sorted(name_list)
i=1
total = 0
for name in name_list:
total += i*name_score(name)
i += 1
print(total)
| PetraVidnerova/euler | 22.py | 22.py | py | 396 | python | en | code | 0 | github-code | 36 |
27698021659 | # -*- coding: utf-8 -*-#
'''
# Name: dnn_regression-keras
# Description:
# Author: super
# Date: 2020/6/2
'''
from HelperClass2.MnistImageDataReader import *
from keras.models import Sequential
from keras.layers import Dense
import matplotlib.pyplot as plt
import os
os.environ['KMP_DUPLICATE_LIB_OK']='True'
def load_data():
train_file = "../data/ch09.train.npz"
test_file = "../data/ch09.test.npz"
dataReader = DataReader_2_0(train_file, test_file)
dataReader.ReadData()
# dr.NormalizeX()
# dr.NormalizeY(YNormalizationMethod.Regression)
dataReader.Shuffle()
dataReader.GenerateValidationSet()
x_train, y_train = dataReader.XTrain, dataReader.YTrain
x_test, y_test = dataReader.XTest, dataReader.YTest
x_val, y_val = dataReader.XDev, dataReader.YDev
return x_train, y_train, x_test, y_test, x_val, y_val
def build_model():
model = Sequential()
model.add(Dense(4, activation='sigmoid', input_shape=(1, )))
model.add(Dense(1, activation='linear'))
model.compile(optimizer='Adam',
loss='mean_squared_error')
return model
#画出训练过程中训练和验证的精度与损失
def draw_train_history(history):
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
if __name__ == '__main__':
x_train, y_train, x_test, y_test, x_val, y_val = load_data()
# print(x_train.shape)
# print(x_test.shape)
# print(x_val.shape)
model = build_model()
history = model.fit(x_train, y_train, epochs=50, batch_size=10, validation_data=(x_val, y_val))
draw_train_history(history)
loss = model.evaluate(x_test, y_test)
print("test loss: {}".format(loss))
weights = model.get_weights()
print("weights: ", weights) | Knowledge-Precipitation-Tribe/Neural-network | code/DNN/dnn_regression-keras.py | dnn_regression-keras.py | py | 1,937 | python | en | code | 3 | github-code | 36 |
74031939303 | import json
import sys
import aes_functions
import rsa_functions
from exceptions.Exceptions import IncorrectData
from socket_class import SOCKET_SIMPLE_TCP
def receiveAESMessage(s):
return s.receive(), s.receive(), s.receive()
def checkMessageGCM(key, iv, cif, mac):
res = aes_functions.decipherAES_GCM(key, iv, cif, mac)
if res is not False:
return res
else:
print("AIUDAAAA :(")
print("Corrupted Message")
def sendAESMessage(socket, criptograma, mac, nonce):
socket.send(criptograma)
socket.send(mac)
socket.send(nonce)
def bob_socket(port):
return SOCKET_SIMPLE_TCP('127.0.0.1', port)
class Bob:
def __init__(self):
self.name = "Bob"
self.port = 5552
self.PK_BOB = rsa_functions.create_RSAKey()
self.KBT = aes_functions.create_AESKey()
self.KPT = rsa_functions.load_RSAKey_Public("TTP.pub")
def savePK(self):
return rsa_functions.save_RSAKey_Public("Bob.pub", self.PK_BOB)
if __name__ == '__main__':
"""--STEP 0--"""
bob = Bob()
bob.savePK()
print(bob.PK_BOB.public_key().export_key())
try:
socket = bob_socket(bob.port)
socket.connect()
except Exception as e:
sys.exit(f"An error occurred creating the socket with TTP: {e}")
"""--STEP 2--"""
print("Establishing a connection with TTP...")
try:
engineKAT = aes_functions.startAES_GCM(bob.KBT)
print("Sending data to TTP...")
message = [bob.name, bob.KBT.hex()]
json_AT = json.dumps(message)
print("Message B -> T (decryption): " + json_AT)
# Encrypt data
encrypted_message = rsa_functions.cipherRSA_OAEP(json_AT.encode("utf-8"), bob.KPT.public_key())
encrypted_signature = rsa_functions.signatureRSA_PSS(bob.KBT.hex().encode("utf-8"), bob.PK_BOB)
# Send encrypted data
socket.send(encrypted_message)
socket.send(encrypted_signature)
except Exception as e:
socket.close()
sys.exit(f"An error occurred in step 2: {e}")
finally:
print("END STEP 2")
input("Press any key to continue")
"""--Step 5--"""
try:
socket = bob_socket(5555)
socket.listen()
except Exception as e:
sys.exit(f"An error occurred creating the socket with Alice: {e}")
try:
print("Waiting for Alice...")
msg = socket.receive()
cipher_BT, mac_BT, iv_BT, cif_AB, mc_AB, iv_AB = json.loads(msg)
decrypted_message = checkMessageGCM(bob.KBT, bytes.fromhex(iv_BT), bytes.fromhex(cipher_BT),
bytes.fromhex(mac_BT))
TS, KAB = json.loads(decrypted_message.decode('utf-8'))
KAB = bytearray.fromhex(KAB)
decrypted_message = checkMessageGCM(KAB, bytes.fromhex(iv_AB), bytes.fromhex(cif_AB),
bytes.fromhex(mc_AB))
sessionName, aux = json.loads(decrypted_message)
if sessionName != 'Alice' and aux != TS:
raise IncorrectData("Possible data modification during communication")
else:
print("Reliable data, continued")
except Exception as e:
socket.close()
sys.exit(f"An error occurred in step 5: {e}")
finally:
print("END STEP 5")
input("Press any key to continue")
"""--Step 6--"""
try:
resolution = float(TS) + 1
engineKAB = aes_functions.startAES_GCM(KAB)
cif, mac, iv = aes_functions.cipherAES_GCM(engineKAB, str(resolution).encode("utf-8"))
sendAESMessage(socket, cif, mac, iv)
except Exception as e:
socket.close()
sys.exit(f"An error occurred in step 6: {e}")
finally:
print("END STEP 6")
input("Press any key to continue")
"""--Step 7--"""
try:
print("Waiting for Alice")
cif, mac, iv = receiveAESMessage(socket)
textoClaro = checkMessageGCM(KAB, iv, cif, mac)
msg = textoClaro.decode("utf-8")
print("Message ->" + msg)
except Exception as e:
socket.close()
sys.exit(f"An error occurred in step 7: {e}")
finally:
print("END STEP 7")
input("Press any key to continue")
"""--Step 8--"""
try:
msg = "Hello Word!"
engineKAB = aes_functions.startAES_GCM(KAB)
cif, mac, iv = aes_functions.cipherAES_GCM(engineKAB, msg.encode("utf-8"))
sendAESMessage(socket, cif, mac, iv)
except Exception as e:
socket.close()
sys.exit(f"An error occurred in step 8: {e}")
finally:
print("END STEP 8")
| makrron/simplified-kerberos-protocol | p-b.py | p-b.py | py | 4,633 | python | en | code | 0 | github-code | 36 |
71903311144 | import torch.nn as nn
import torch
import torch.optim as optim
import numpy as np
from torch.utils.data import DataLoader
from prior_learning.toy_env.toyloader import toyenv_Dataset
size = 8
seq_len = 32
categories = 16
batch_size = 128
feature_dim = 16
features = np.random.random((categories, feature_dim))
train_loader = DataLoader(toyenv_Dataset(features, size, seq_len, categories), batch_size = batch_size, num_workers= 40, shuffle = True)
net = nn.Sequential(
nn.Linear(feature_dim, 32),
nn.ReLU(),
nn.Linear(32, 16),
nn.ReLU(),
nn.Linear(16, 1)
)
net.cuda()
criteria = nn.L1Loss()
optimizer = optim.Adam(net.parameters(), lr=1e-3)
reg_sum = 0
loss_sum = 0
for i, data in enumerate(train_loader):
if i == 29999:
optimizer = optim.Adam(net.parameters(), lr=1e-4)
blocks, masks, rewards = [d.cuda() for d in data]
blocks = blocks.view(batch_size * seq_len, feature_dim)
rewards_hat = net(blocks)
rewards_hat = rewards_hat.view(batch_size, seq_len)
reg = torch.mean(torch.abs(rewards_hat)) * 0.01
rewards_hat = torch.sum(rewards_hat * masks, 1)
loss = criteria(rewards_hat, rewards) + reg
loss_sum += loss.item()
reg_sum += reg.item()
optimizer.zero_grad()
loss.backward()
optimizer.step()
if i % 2000 == 1999:
print('[{}] loss: {}, reg: {}'.format(i + 1, loss_sum / 100, reg_sum / 100))
loss_sum = 0
reg_sum = 0
if i % 10000 == 9999:
result = net(torch.from_numpy(features).float().cuda()).flatten().detach().cpu().numpy()
print('=' * 40)
print(result)
print('='*40)
| buoyancy99/sap | prior_learning/toy_env/train_toy.py | train_toy.py | py | 1,633 | python | en | code | 1 | github-code | 36 |
28924241951 | #11238. Fibo
"""
피보나치 수와 최대공약수와 유사한 문제.
gcd(a,b)%M= gcd(a%M, b%M)이 성립하는진 사실 잘 모르겠지만..
그러지 않고선 메모리 초과가 날 것 같다.
gcd(Fib(m),Fib(n))=Fib(gcd(m,n))이라고 한다.
이에 대한 증명은 구글링을 통해 공부해보자. (재밌어보인다)
"""
big_num=1000000007
#행렬 a,b가 주어졌을때 그 행렬곱을 구하는 함수
def matmul(a,b):
row=len(a); common=len(b); column=len(b[0])
c=[[0]*column for _ in range(row)]
for i in range(row):
for k in range(column):
for j in range(common):
c[i][k]+= a[i][j] * b[j][k]
c[i][k]%=big_num
return c
#분할정복을 통해 행렬의 K제곱을 구하는 함수
def powerMatrix(N,K):
if K==1:
return [[ ele %big_num for ele in row] for row in N ]
else:
if K%2==0:
partial=powerMatrix(N,K//2)
result=matmul(partial,partial)
return result
else:
partial=powerMatrix(N,(K-1)//2)
tmp=matmul(partial,partial)
return matmul(tmp,N)
#Operation start
import sys
import math
input=sys.stdin.readline
T=int(input().rstrip())
for _ in range(T):
n,m=map(int,input().rstrip().split())
result=math.gcd(n,m)
initial=[[0],[1]]
matrix=[[0,1],[1,1]]
#fib(N),fib(N+1)이 각각 있을 것
if result==1: final_matrix=initial
else: final_matrix=matmul(powerMatrix(matrix,result-1),initial)
final_result=final_matrix[-1][-1]
print(final_result) | GuSangmo/BOJ_practice | PS/DP/matrix_DP/11238.py | 11238.py | py | 1,587 | python | ko | code | 0 | github-code | 36 |
42850936844 | from django.urls import path
from . import views
urlpatterns = [
path('register/', views.registerPage, name='register'),
path('login/', views.loginPage, name='login'),
path('logout/', views.logoutUser, name='logout'),
path('event_create/', views.event_create, name='event_create'),
path('event_manager/', views.event_manager, name='event_manager'),
path('event_update/<str:pk>/', views.event_update, name='event_update'),
path('event_delete/<str:pk>/', views.event_delete, name='event_delete'),
] | Barnacle322/esoapp | eventsmanager/eventcreation/urls.py | urls.py | py | 525 | python | en | code | 0 | github-code | 36 |
31061019305 |
from ..utils import Object
class CancelUploadFile(Object):
"""
Stops the uploading of a file. Supported only for files uploaded by using uploadFile. For other files the behavior is undefined
Attributes:
ID (:obj:`str`): ``CancelUploadFile``
Args:
file_id (:obj:`int`):
Identifier of the file to stop uploading
Returns:
Ok
Raises:
:class:`telegram.Error`
"""
ID = "cancelUploadFile"
def __init__(self, file_id, extra=None, **kwargs):
self.extra = extra
self.file_id = file_id # int
@staticmethod
def read(q: dict, *args) -> "CancelUploadFile":
file_id = q.get('file_id')
return CancelUploadFile(file_id)
| iTeam-co/pytglib | pytglib/api/functions/cancel_upload_file.py | cancel_upload_file.py | py | 735 | python | en | code | 20 | github-code | 36 |
27977418436 | #!/usr/bin/env python
import config
import json
import requests
import sys
"""
Copyright (c) 2020, Cisco Systems, Inc. and/or its affiliates
Creates webhooks in a repo upon release using
GitHub API v3 POST /repos/:owner/:repo/hooks
Requires a file with repo names, one per line,
and a personal access token with access to each repo.
Usage:
python create_webhook.py devnet_repos.txt
"""
def get_webhook(gh_orgname, repo_name, gh_username, gh_api_key):
api_uri = "https://api.github.com/repos/{}/{}/hooks".format(gh_orgname, repo_name)
print(api_uri)
session = requests.Session()
session.auth = (gh_username, gh_api_key)
try:
gethooks = session.get(api_uri)
print(json.dumps(gethooks.json(), indent=4))
except:
print(gethooks.status_code)
print("Response text: {}".format(gethooks.text))
def post_create_webhook(gh_orgname, repo_name, gh_username, gh_api_key, gh_webhook_url, gh_secret):
api_uri = "https://api.github.com/repos/{}/{}/hooks".format(gh_orgname, repo_name)
print("API endpoint: {}".format(api_uri))
print("Username: {}".format(gh_username))
print("API Key: {}".format(gh_api_key))
print("Secret for payload: {}".format(gh_secret))
try:
headers = {'User-Agent': '{}'.format(gh_username),
'Content-Type': 'application/json',
'Authorization': 'token {}'.format(gh_api_key)
}
print(headers)
payload = {
'name': 'web',
'active': True,
'events': ['release'],
'config': {
'url': '{}'.format(gh_webhook_url),
'content_type': 'json',
'secret': '{}'.format(gh_secret),
'insecure_ssl': '0'
}
}
session = requests.Session()
makehooks = requests.Request('POST', api_uri, json=payload, headers=headers).prepare()
resp = session.send(makehooks)
print(resp.status_code)
print(json.dumps(resp.json(), indent=4))
except:
print(resp.status_code)
print("Response text: {}".format(resp.text))
sys.exit()
def main(filename):
if not len(args) == 1:
print("Enter the filename for the file that contains the list of repos, one per line")
return
filename = args[0]
# Read data in from a text list of all LL repo names
repolist = []
with open(filename) as f:
repolist = f.readlines()
for repo in repolist:
repo_name = repo.rstrip('\n')
print("Working on this repo: " + repo_name)
#getresponse = get_webhook(config.gh_orgname, repo_name, config.gh_username, config.gh_api_key)
postresponse = post_create_webhook(config.gh_orgname, repo_name, config.gh_username, config.gh_api_key, config.gh_webhook_url, config.gh_secret)
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
| justwriteclick/gh-webhooks | create_webhook.py | create_webhook.py | py | 3,009 | python | en | code | 2 | github-code | 36 |
32694277113 | import forecast
import send_sms
from datetime import datetime
# Since the api call is made at 6:00 AM, hourly_forecast[0] is 6 AM
def main():
startTimes = [8, 8, 8, 8, 8]
endTimes = [18, 16, 18, 18, 10]
date = datetime.today()
dayOfWeek = date.weekday()
message = ""
phone_number = "+19257877379"
hourly_forecast = forecast.get_hourly_forecast("CA", "Goleta")
for i in range(5):
if (dayOfWeek == i):
minTemp = int(hourly_forecast[startTimes[dayOfWeek] - 6]['temp']['english'])
maxTemp = int(hourly_forecast[startTimes[dayOfWeek] - 6]['temp']['english'])
minTempTime = startTimes[dayOfWeek]
maxTempTime = endTimes[dayOfWeek]
for j in range(startTimes[dayOfWeek] - 6, endTimes[dayOfWeek] - 5):
if ("Rain" in hourly_forecast[j]['condition']):
message += "Rain forecasted at " + str(j % 12) + ":00. "
if (int(hourly_forecast[j]['temp']['english']) < minTemp):
minTemp = int(hourly_forecast[j]['temp']['english'])
minTempTime = j + 6
if (int(hourly_forecast[j]['temp']['english']) > maxTemp):
maxTemp = int(hourly_forecast[j]['temp']['english'])
maxTempTime = j + 6
message += "Min temp today is " + str(minTemp) + " at " \
+ str(minTempTime) + ":00. "
message += "Max temp today is " + str(maxTemp) + " at " \
+ str(maxTempTime) + ":00. "
#print(message)
send_sms.send_message(phone_number, message)
# checked hours should depend on day of the week
if __name__ == '__main__':
main()
| kailashbaas/Weather-SMS | main.py | main.py | py | 1,681 | python | en | code | 0 | github-code | 36 |
4579701597 | from django.http import JsonResponse
from django.views.generic import View
from .models import Scraper
from .validators import currency_serializer, get_valid_data
class ScraperAPI(View):
def get(self, *args, **kwargs):
currencies = Scraper.objects.all()
data = {"scrapers": list(map(currency_serializer, currencies))}
return JsonResponse(data)
def post(self, *args, **kwargs):
data, is_valid = get_valid_data('POST', self.request.body)
if not is_valid:
return JsonResponse(data, status=400)
if Scraper.objects.filter(currency=data['currency']).count() != 0:
return JsonResponse({"error": "This currency already exists"}, status=400)
scraper = Scraper.objects.create(currency=data['currency'], frequency=data['frequency'])
scraper.values.create(value=0)
data = {
"id" : scraper.id,
"created_at": scraper.create_at,
"currency" : scraper.currency,
"frequency" : scraper.frequency
}
return JsonResponse(data)
def put(self, *args, **kwargs):
data, is_valid = get_valid_data('PUT', self.request.body)
if not is_valid:
return JsonResponse(data, status=400)
if Scraper.objects.filter(pk=data['id']).count() == 0:
return JsonResponse({"error": "This Scraper not exists"}, status=400)
Scraper.objects.filter(pk=int(data['id'])).update(frequency=int(data['frequency']))
data = {"msg": "Scraper updated"}
return JsonResponse(data)
def delete(self, *args, **kwargs):
data, is_valid = get_valid_data('DELETE', self.request.body)
if not is_valid:
return JsonResponse(data, status=400)
if Scraper.objects.filter(pk=data['id']).count() == 0:
return JsonResponse({"error": "This Scraper not exists"}, status=400)
Scraper.objects.filter(pk=data['id']).delete()
data = {"msg": "Scraper deleted"}
return JsonResponse(data)
| chvilches/rg-corp | api/views.py | views.py | py | 2,052 | python | en | code | 0 | github-code | 36 |
4108394927 | from sys import stdin
input = stdin.readline
moves = [[1, 0], [0, 1], [1, 1], [-1, 1]]
alphabet = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
for _ in range(5):
r, c = [int(x) for x in input().split()]
grid = [input()[:-1] for _ in range(r)]
words = set()
for _ in range(int(input())):
before = input()[:-1]
after = ""
for i in before:
if i in alphabet:
after += i
words.add(after)
visited = set()
for a in range(r):
for b in range(c):
for move in moves:
path = set()
word = ""
x, y = a, b
while True:
if 0 <= x < r and 0 <= y < c:
path.add((x, y))
word += grid[x][y]
if word in words or word[::-1] in words:
visited = visited.union(path)
break
x += move[0]
y += move[1]
else:
break
sentence = ""
for i in range(r):
for j in range(c):
if (i, j) not in visited:
sentence += grid[i][j]
print(sentence)
| AAZZAZRON/DMOJ-Solutions | ecoo14r1p3.py | ecoo14r1p3.py | py | 1,243 | python | en | code | 1 | github-code | 36 |
32967623992 | from django import forms
from django.core.exceptions import ValidationError
from arcana_app.models import Driver, Truck, Trailer, Insurance, Freight
class DateInput(forms.DateInput):
input_type = 'date'
class TimeInput(forms.TimeInput):
input_type = 'time'
# class CheckboxInput(forms.CheckboxInput):
# input_type = 'checkbox'
class AddDriverForm(forms.ModelForm):
class Meta:
model = Driver
fields = '__all__'
widgets = {
'birth_date': DateInput(),
}
def __init__(self, *args, **kwargs):
super(AddDriverForm, self).__init__(*args, **kwargs)
for visible in self.visible_fields():
visible.field.widget.attrs['class'] = 'form-control'
class AddTruckForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(AddTruckForm, self).__init__(*args, **kwargs)
for visible in self.visible_fields():
visible.field.widget.attrs['class'] = 'form-control'
class Meta:
model = Truck
fields = '__all__'
widgets = {
'begin_MOT': DateInput(),
'expire_MOT': DateInput(),
}
# widgets = {
# 'has_actual_MOT': forms.CheckboxInput(
# attrs={'class': 'required checkbox form-select', 'disabled': 'disabled or true'}),
# }
class AddTrailerForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(AddTrailerForm, self).__init__(*args, **kwargs)
for visible in self.visible_fields():
visible.field.widget.attrs['class'] = 'form-control'
class Meta:
model = Trailer
fields = '__all__'
widgets = {
'begin_MOT': DateInput(),
'expire_MOT': DateInput(),
}
class AddInsuranceForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(AddInsuranceForm, self).__init__(*args, **kwargs)
for visible in self.visible_fields():
visible.field.widget.attrs['class'] = 'form-control'
class Meta:
model = Insurance
fields = '__all__'
def clean(self):
data = super().clean()
if not data['begin_date'] <= data['end_date']:
raise ValidationError("Begin date can't be earlier than end date!")
return data
class AddFreightForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(AddFreightForm, self).__init__(*args, **kwargs)
for visible in self.visible_fields():
visible.field.widget.attrs['class'] = 'form-control'
class Meta:
model = Freight
fields = '__all__'
widgets = {
'date_of_loading': DateInput(),
'date_of_unloading': DateInput(),
'hour_of_loading': TimeInput(),
'hour_of_unloading': TimeInput(),
}
| KamilNurzynski/Arcana | arcana_app/forms.py | forms.py | py | 2,848 | python | en | code | 1 | github-code | 36 |
29788143583 | import os
from extension.constants import ENV_OPTION_PREFIX
from extension.interface import ExtensionModules
class MockExtensionModules(ExtensionModules):
def inputs(self):
return []
def outputs(self):
return []
def generate_inputs(self, data):
pass
def generate_outputs(self, data):
pass
def test_properties(tmp_path):
name = 'foo'
root = str(tmp_path)
metadata = {}
config = {}
builder = MockExtensionModules(name, root, metadata, config)
assert builder.name is name
assert builder.root is root
assert builder.metadata is metadata
assert builder.config is config
def test_get_env_option(tmp_path):
builder = MockExtensionModules('foo', str(tmp_path), {}, {})
option = os.urandom(4).hex()
value = '9000'
env_var = f'{ENV_OPTION_PREFIX}{builder.name}_{option}'.upper()
assert builder.get_env_option(option) == ''
os.environ[env_var] = value
try:
assert builder.get_env_option(option) == value
finally:
del os.environ[env_var]
assert builder.get_env_option(option) == ''
def test_required_methods(tmp_path):
# This is just for code coverage
builder = MockExtensionModules('foo', str(tmp_path), {}, {})
builder.inputs()
builder.outputs()
builder.generate_inputs({})
builder.generate_outputs({})
| ofek/extensionlib | tests/test_interface.py | test_interface.py | py | 1,371 | python | en | code | 19 | github-code | 36 |
70212652263 | import keras
import keras_cv
import keras_core as keras
import tensorflow as tf
images = tf.ones(shape=(1, 512, 512, 3))
labels = {
"boxes": [
[
[0, 0, 100, 100],
[100, 100, 200, 200],
[300, 300, 100, 100],
]
],
"classes": [[1, 1, 1]],
}
model = keras_cv.models.YOLOV8Detector(
num_classes=20,
bounding_box_format="xywh",
backbone=keras_cv.models.YOLOV8Backbone.from_preset(
"yolo_v8_m_backbone_coco"
),
fpn_depth=2.
)
# Evaluate model
model(images)
# Get predictions using the model
model.predict(images)
# Train model
model.compile(
classification_loss='binary_crossentropy',
box_loss='ciou',
optimizer=tf.optimizers.SGD(global_clipnorm=10.0),
jit_compile=False,
)
model.fit(images, labels) | kevinmccall/cs4 | finalproject/kerastest.py | kerastest.py | py | 804 | python | en | code | 0 | github-code | 36 |
9491434540 | import tests.hakoblog # noqa: F401
from hakoblog.db import DB
from hakoblog.loader.user import UserLoader
from hakoblog.action.blog import BlogAction
from hakoblog.loader.blog import BlogLoader
from tests.util import random_string, create_user, global_user
def test_create():
db = DB()
user = create_user()
title = random_string(10)
blog_id = BlogAction.create(
db,
owner_id=user.id,
title=title,
)
found_blog = BlogLoader.find_by_id(db, blog_id)
assert found_blog.id == blog_id
assert found_blog.owner_id == user.id
assert found_blog.title == title
def test_ensure_global_blog_created():
db = DB()
with global_user(random_string(10)) as global_user_name:
assert UserLoader.find_by_name(db, global_user_name) is None
blog = BlogAction.ensure_global_blog_created(db)
found_user = UserLoader.find_by_name(db, global_user_name)
assert blog.owner_id == found_user.id
# Check no exeception raises
blog_again = BlogAction.ensure_global_blog_created(db)
assert blog_again.id == blog.id
| hakobe/hakoblog-python | tests/action/test_blog.py | test_blog.py | py | 1,116 | python | en | code | 10 | github-code | 36 |
36558187570 | import heapq
from typing import List
def topKFrequent(nums: List[int], k: int) -> List[int]: # Verified on Leetcode
frequencies = {}
for num in nums:
if num not in frequencies:
frequencies[num] = 1
else:
frequencies[num] += 1
temp = []
for num, f in frequencies.items():
temp.append((f, num))
min_heap = temp[:k]
heapq.heapify(min_heap)
for item in temp[k:]:
if item[0] > min_heap[0][0]:
heapq.heapreplace(min_heap, item)
return list(map(lambda x: x[1], min_heap))
if __name__ == "__main__":
print(topKFrequent([1, 1, 1, 2, 2, 3], 2)) | InderdeepSync/grokking-coding-interview | top_k_elements/top_k_frequent_elements.py | top_k_frequent_elements.py | py | 646 | python | en | code | 1 | github-code | 36 |
25651671407 |
from typing import Iterable, Tuple, TypeVar, Callable, Any, List, Dict, Union
import math
import numpy as np
import os.path
import torch
import torchaudio
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
import warnings
import pandas as pd
import plots
from utils import validate_audio
# Useful references for the dataloading using iterable datasets:
# https://medium.com/speechmatics/how-to-build-a-streaming-dataloader-with-pytorch-a66dd891d9dd
# https://pytorch.org/docs/stable/data.html#torch.utils.data.IterableDataset
# https://discuss.pytorch.org/t/example-for-torch-utils-data-iterabledataset/101175/13
# https://github.com/pytorch/pytorch/issues/13246#issuecomment-905703662
# https://discuss.pytorch.org/t/implementing-an-infinite-loop-dataset-dataloader-combo/35567/4
def interpolate(x, ratio):
'''
Interpolate the x to have equal time steps as targets
Input:
x: (batch_size, time_steps, class_num)
Output:
out: (batch_size, time_steps*ratio, class_num)
'''
(batch_size, time_steps, classes_num) = x.shape
upsampled = x[:, :, None, :].repeat(1, 1, ratio, 1)
upsampled = upsampled.reshape(batch_size, time_steps * ratio, classes_num)
return upsampled
def convert_output_format_polar_to_cartesian(in_dict):
out_dict = {}
for frame_cnt in in_dict.keys():
if frame_cnt not in out_dict:
out_dict[frame_cnt] = []
for tmp_val in in_dict[frame_cnt]:
ele_rad = tmp_val[3] * np.pi / 180.
azi_rad = tmp_val[2] * np.pi / 180
tmp_label = np.cos(ele_rad)
x = np.cos(azi_rad) * tmp_label
y = np.sin(azi_rad) * tmp_label
z = np.sin(ele_rad)
out_dict[frame_cnt].append([tmp_val[0], tmp_val[1], x, y, z])
return out_dict
def _read_audio(fname: str, directory_root: str, resampler: Union[torch.nn.Sequential, None], trim_seconds: int = -1) -> Tuple[torch.Tensor, int, float]:
''' trim_seconds = to limit how many seconds to load '''
fpath = os.path.join(directory_root,
fname)
metadata = torchaudio.info(fpath)
num_frames = trim_seconds if trim_seconds == -1 else trim_seconds * metadata.sample_rate
this_audio, fs = torchaudio.load(fpath, num_frames=num_frames)
duration_seconds = this_audio.shape[-1] / fs
assert validate_audio(this_audio), f'ERROR: {fname} audio is not valid.'
if resampler is not None:
this_audio = resampler(this_audio)
return torch.tensor(this_audio, dtype=torch.float), fs, duration_seconds
def _read_time_array(fname: str, directory_root: str) -> List:
''' Time arrays are the full list of events for a whole audio file.
This is before any parsing'''
fpath = os.path.join(directory_root,
fname)
fpath_csv = fpath.replace('mic', 'metadata').replace('foa', 'metadata').replace('wav', 'csv')
this_time_array = pd.read_csv(fpath_csv, header=None).values
return this_time_array
def load_output_format_file(fname: str, directory_root: str):
"""
Adapted from the official baseline.
Loads DCASE output format csv file and returns it in dictionary format
:param _output_format_file: DCASE output format CSV
:return: _output_dict: dictionary
"""
fpath = os.path.join(directory_root,
fname)
fpath_csv = fpath.replace('mic', 'metadata').replace('foa', 'metadata').replace('wav', 'csv')
_output_dict = {}
_fid = open(fpath_csv, 'r')
# next(_fid)
for _line in _fid:
_words = _line.strip().split(',')
_frame_ind = int(_words[0])
if _frame_ind not in _output_dict:
_output_dict[_frame_ind] = []
if len(_words) == 5: #polar coordinates
_output_dict[_frame_ind].append([int(_words[1]), int(_words[2]), float(_words[3]), float(_words[4])])
elif len(_words) == 6: # cartesian coordinates
_output_dict[_frame_ind].append([int(_words[1]), int(_words[2]), float(_words[3]), float(_words[4]), float(_words[5])])
_fid.close()
return _output_dict
def _add_rotated_label_each_frame(label, time_array4frame_event, start_frame, rotation_pattern=None):
""" From Sony """
event_class = time_array4frame_event[1]
azi_rad = time_array4frame_event[3] / 180 * np.pi
ele_rad = time_array4frame_event[4] / 180 * np.pi
if rotation_pattern:
azi_reflection, azi_rotation, ele_reflection = rotation_pattern
else:
azi_reflection, azi_rotation, ele_reflection = [1, 0, 1] # if None, no rotation
rotated_azi_rad = azi_reflection * azi_rad + azi_rotation
rotated_ele_rad = ele_reflection * ele_rad
x_axis = 1 * np.cos(rotated_ele_rad) * np.cos(rotated_azi_rad)
y_axis = 1 * np.cos(rotated_ele_rad) * np.sin(rotated_azi_rad)
z_axis = 1 * np.sin(rotated_ele_rad)
label[0, event_class, start_frame: start_frame + 10] = x_axis
label[1, event_class, start_frame: start_frame + 10] = y_axis
label[2, event_class, start_frame: start_frame + 10] = z_axis
return (label)
def _get_labels(time_array, start_sec, fs, chunk_size_audio, rotation_pattern=None, multi_track=False, num_classes=12):
"""
[frame number (int)], [active class index (int)], [track number index (int)], [azimuth (int)], [elevation (int)]
Frame, class, and track enumeration begins at 0. Frames correspond to a temporal resolution of 100msec.
Azimuth and elevation angles are given in degrees, rounded to the closest integer value, with azimuth and
elevation being zero at the front, azimuth ϕ∈[−180∘,180∘], and elevation θ∈[−90∘,90∘]. Note that the azimuth
angle is increasing counter-clockwise (ϕ=90∘ at the left).
"""
# This 100 is the sampling frequency of the labels
# And the 10 for index_diff stuff, is the desired sampling frequency, to match the spectrograms.
# So the spectrograms use a step_size = 240, with fs = 24000, which is 10 ms
# Therefore, here they have 100 / 10 = 10
# My intuition is that a different step_size, would require to change this
# TODO Is this really ok? Needs verification
num_axis = 3 # X, Y, Z
num_class = num_classes
num_frame = round(chunk_size_audio / fs * 100) + 1 # Each frame == 100 ms (0.1 seconds)
label = np.zeros([num_axis, num_class, num_frame])
end_sec = start_sec + chunk_size_audio / fs
index_diff = int(math.modf(start_sec * 10)[0] * 10) # get second decimal place
num_frame_wide = (int(np.ceil(end_sec * 10)) - int(np.floor(start_sec * 10)) + 1) * 10
# "+ 1" is buffer for numerical error, such as index_diff=3 and num_frame_wide=130
if not multi_track:
label_wide = np.zeros([num_axis, num_class, num_frame_wide])
for index, frame in enumerate(range(int(np.floor(start_sec * 10)), int(np.ceil(end_sec * 10)))):
time_array4frame = time_array[time_array[:, 0] == frame]
if time_array4frame.shape == (1, 5):
label_wide = _add_rotated_label_each_frame(label_wide, time_array4frame[0], index * 10,
rotation_pattern)
elif time_array4frame.shape == (2, 5):
label_wide = _add_rotated_label_each_frame(label_wide, time_array4frame[0], index * 10,
rotation_pattern)
label_wide = _add_rotated_label_each_frame(label_wide, time_array4frame[1], index * 10,
rotation_pattern)
elif time_array4frame.shape == (3, 5):
label_wide = _add_rotated_label_each_frame(label_wide, time_array4frame[0], index * 10,
rotation_pattern)
label_wide = _add_rotated_label_each_frame(label_wide, time_array4frame[1], index * 10,
rotation_pattern)
label_wide = _add_rotated_label_each_frame(label_wide, time_array4frame[2], index * 10,
rotation_pattern)
label = label_wide[:, :, index_diff: index_diff + num_frame]
else:
# TODO This is not ready
label_wide_1 = np.zeros([num_axis, num_class, num_frame_wide])
label_wide_2 = np.zeros([num_axis, num_class, num_frame_wide])
label_wide_3 = np.zeros([num_axis, num_class, num_frame_wide])
for index, frame in enumerate(range(int(np.floor(start_sec * 10)), int(np.ceil(end_sec * 10)))):
time_array4frame = time_array[time_array[:, 0] == frame]
if time_array4frame.shape == (1, 5):
label_wide_1 = _add_rotated_label_each_frame(label_wide_1, time_array4frame[0], index * 10,
rotation_pattern)
elif time_array4frame.shape == (2, 5):
label_wide_1 = _add_rotated_label_each_frame(label_wide_1, time_array4frame[0], index * 10,
rotation_pattern)
label_wide_2 = _add_rotated_label_each_frame(label_wide_2, time_array4frame[1], index * 10,
rotation_pattern)
elif time_array4frame.shape == (3, 5):
label_wide_1 = _add_rotated_label_each_frame(label_wide_1, time_array4frame[0], index * 10,
rotation_pattern)
label_wide_2 = _add_rotated_label_each_frame(label_wide_2, time_array4frame[1], index * 10,
rotation_pattern)
label_wide_3 = _add_rotated_label_each_frame(label_wide_3, time_array4frame[2], index * 10,
rotation_pattern)
label = np.stack((
label_wide_1[:, :, index_diff: index_diff + num_frame],
label_wide_2[:, :, index_diff: index_diff + num_frame],
label_wide_3[:, :, index_diff: index_diff + num_frame]
))
return (label)
def _read_fnames(directory_root: str, list_dataset: str) -> List:
"""Reads the fnames in the list_dataset.
Each fname corresponds to a single wav file in the dataset.
This to prepare the dataset, before loading any audio or labels."""
fnames = []
fpath = os.path.join(directory_root,
'list_dataset',
list_dataset)
for fname in pd.read_table(fpath, header=None).values.tolist():
if isinstance(fname, List): fname = fname[0]
parent_dir = directory_root.split('/')[-1] + '/'
if parent_dir in fname:
fname = fname.replace(parent_dir, '')
fnames.append(fname)
return fnames
def get_adpit_labels_for_file(_desc_file: Dict, _nb_label_frames: int, num_classes: int = 13) -> np.ndarray:
"""
ADAPATED from csl_feature_class from the baseline, with modifications to remove the dependcy to the class.
Reads description file and returns classification based SED labels and regression based DOA labels
for multi-ACCDOA with Auxiliary Duplicating Permutation Invariant Training (ADPIT)
:param _desc_file: metadata description file
:return: label_mat: of dimension [nb_frames, 6, 4(=act+XYZ), max_classes]
"""
se_label = np.zeros((_nb_label_frames, 6, num_classes)) # [nb_frames, 6, max_classes]
x_label = np.zeros((_nb_label_frames, 6, num_classes))
y_label = np.zeros((_nb_label_frames, 6, num_classes))
z_label = np.zeros((_nb_label_frames, 6, num_classes))
for frame_ind, active_event_list in _desc_file.items():
if frame_ind < _nb_label_frames:
active_event_list.sort(key=lambda x: x[0]) # sort for ov from the same class
active_event_list_per_class = []
for i, active_event in enumerate(active_event_list):
active_event_list_per_class.append(active_event)
if i == len(active_event_list) - 1: # if the last
if len(active_event_list_per_class) == 1: # if no ov from the same class
# a0----
active_event_a0 = active_event_list_per_class[0]
se_label[frame_ind, 0, active_event_a0[0]] = 1
x_label[frame_ind, 0, active_event_a0[0]] = active_event_a0[2]
y_label[frame_ind, 0, active_event_a0[0]] = active_event_a0[3]
z_label[frame_ind, 0, active_event_a0[0]] = active_event_a0[4]
elif len(active_event_list_per_class) == 2: # if ov with 2 sources from the same class
# --b0--
active_event_b0 = active_event_list_per_class[0]
se_label[frame_ind, 1, active_event_b0[0]] = 1
x_label[frame_ind, 1, active_event_b0[0]] = active_event_b0[2]
y_label[frame_ind, 1, active_event_b0[0]] = active_event_b0[3]
z_label[frame_ind, 1, active_event_b0[0]] = active_event_b0[4]
# --b1--
active_event_b1 = active_event_list_per_class[1]
se_label[frame_ind, 2, active_event_b1[0]] = 1
x_label[frame_ind, 2, active_event_b1[0]] = active_event_b1[2]
y_label[frame_ind, 2, active_event_b1[0]] = active_event_b1[3]
z_label[frame_ind, 2, active_event_b1[0]] = active_event_b1[4]
else: # if ov with more than 2 sources from the same class
# ----c0
active_event_c0 = active_event_list_per_class[0]
se_label[frame_ind, 3, active_event_c0[0]] = 1
x_label[frame_ind, 3, active_event_c0[0]] = active_event_c0[2]
y_label[frame_ind, 3, active_event_c0[0]] = active_event_c0[3]
z_label[frame_ind, 3, active_event_c0[0]] = active_event_c0[4]
# ----c1
active_event_c1 = active_event_list_per_class[1]
se_label[frame_ind, 4, active_event_c1[0]] = 1
x_label[frame_ind, 4, active_event_c1[0]] = active_event_c1[2]
y_label[frame_ind, 4, active_event_c1[0]] = active_event_c1[3]
z_label[frame_ind, 4, active_event_c1[0]] = active_event_c1[4]
# ----c2
active_event_c2 = active_event_list_per_class[2]
se_label[frame_ind, 5, active_event_c2[0]] = 1
x_label[frame_ind, 5, active_event_c2[0]] = active_event_c2[2]
y_label[frame_ind, 5, active_event_c2[0]] = active_event_c2[3]
z_label[frame_ind, 5, active_event_c2[0]] = active_event_c2[4]
elif active_event[0] != active_event_list[i + 1][0]: # if the next is not the same class
if len(active_event_list_per_class) == 1: # if no ov from the same class
# a0----
active_event_a0 = active_event_list_per_class[0]
se_label[frame_ind, 0, active_event_a0[0]] = 1
x_label[frame_ind, 0, active_event_a0[0]] = active_event_a0[2]
y_label[frame_ind, 0, active_event_a0[0]] = active_event_a0[3]
z_label[frame_ind, 0, active_event_a0[0]] = active_event_a0[4]
elif len(active_event_list_per_class) == 2: # if ov with 2 sources from the same class
# --b0--
active_event_b0 = active_event_list_per_class[0]
se_label[frame_ind, 1, active_event_b0[0]] = 1
x_label[frame_ind, 1, active_event_b0[0]] = active_event_b0[2]
y_label[frame_ind, 1, active_event_b0[0]] = active_event_b0[3]
z_label[frame_ind, 1, active_event_b0[0]] = active_event_b0[4]
# --b1--
active_event_b1 = active_event_list_per_class[1]
se_label[frame_ind, 2, active_event_b1[0]] = 1
x_label[frame_ind, 2, active_event_b1[0]] = active_event_b1[2]
y_label[frame_ind, 2, active_event_b1[0]] = active_event_b1[3]
z_label[frame_ind, 2, active_event_b1[0]] = active_event_b1[4]
else: # if ov with more than 2 sources from the same class
# ----c0
active_event_c0 = active_event_list_per_class[0]
se_label[frame_ind, 3, active_event_c0[0]] = 1
x_label[frame_ind, 3, active_event_c0[0]] = active_event_c0[2]
y_label[frame_ind, 3, active_event_c0[0]] = active_event_c0[3]
z_label[frame_ind, 3, active_event_c0[0]] = active_event_c0[4]
# ----c1
active_event_c1 = active_event_list_per_class[1]
se_label[frame_ind, 4, active_event_c1[0]] = 1
x_label[frame_ind, 4, active_event_c1[0]] = active_event_c1[2]
y_label[frame_ind, 4, active_event_c1[0]] = active_event_c1[3]
z_label[frame_ind, 4, active_event_c1[0]] = active_event_c1[4]
# ----c2
active_event_c2 = active_event_list_per_class[2]
se_label[frame_ind, 5, active_event_c2[0]] = 1
x_label[frame_ind, 5, active_event_c2[0]] = active_event_c2[2]
y_label[frame_ind, 5, active_event_c2[0]] = active_event_c2[3]
z_label[frame_ind, 5, active_event_c2[0]] = active_event_c2[4]
active_event_list_per_class = []
label_mat = np.stack((se_label, x_label, y_label, z_label), axis=2) # [nb_frames, 6, 4(=act+XYZ), max_classes]
return label_mat
def get_labels_for_file(_desc_file, _nb_label_frames, num_classes: int = 13):
"""
ADAPTED from csl_feature_class from the baseline, with modifications to remove the dependcy to the class.
Reads description file and returns classification based SED labels and regression based DOA labels
:param _desc_file: metadata description file
:return: label_mat: of dimension [nb_frames, 3*max_classes], max_classes each for x, y, z axis,
"""
# If using Hungarian net set default DOA value to a fixed value greater than 1 for all axis. We are choosing a fixed value of 10
# If not using Hungarian net use a deafult DOA, which is a unit vector. We are choosing (x, y, z) = (0, 0, 1)
se_label = np.zeros((_nb_label_frames, num_classes))
x_label = np.zeros((_nb_label_frames, num_classes))
y_label = np.zeros((_nb_label_frames, num_classes))
z_label = np.zeros((_nb_label_frames, num_classes))
for frame_ind, active_event_list in _desc_file.items():
if frame_ind < _nb_label_frames:
for active_event in active_event_list:
se_label[frame_ind, active_event[0]] = 1
x_label[frame_ind, active_event[0]] = active_event[2]
y_label[frame_ind, active_event[0]] = active_event[3]
z_label[frame_ind, active_event[0]] = active_event[4]
label_mat = np.concatenate((se_label, x_label, y_label, z_label), axis=1)
# Refortmat as ACCDOA:
output = torch.zeros(size=(3, num_classes, label_mat.shape[0]))
output = output.numpy()
for i in range(se_label.shape[-1]):
# coso = se_label[:, i] > 0.5
# print(np.count_nonzero(coso))
ss = x_label[:, i]
# bazinga = torch.stack([torch.from_numpy(x_label[:, i]´), y_label[:, i], z_label[:, i]], dim=0)
bazinga = np.stack([x_label[:, i], y_label[:, i], z_label[:, i]])
output[:, i, :] = bazinga
output = torch.from_numpy(output)
norm = torch.linalg.vector_norm(output, ord=2, dim=-3)
output = output / (norm + 1e-10)
if torch.any(torch.isnan(output)):
raise ValueError('ERROR: NaNs in the otuput labels')
####sampler = resampler(scale_factor=(10)) # TODO: This is incompatible with my test of backeds
####output = sampler(output)
#output = interpolate(output.detatch().cpu().numpy(), 10) # TODO Not tested
output = torch.repeat_interleave(output, 10, dim=-1) # TODO his is better, but still gets bad when the output size is large
return output.numpy()
return label_mat
def _random_slice(audio: torch.Tensor, fs: int, chunk_size_audio: float, trim_wavs: int, clip_length_seconds: int = 60) \
-> Tuple[torch.Tensor, int]:
"""Returns a random slice of an audio and the corresponding starting time in sencods (useful to extract labels) """
# Now we do it in seconds
if trim_wavs > 0:
star_min_sec, start_max_sec = 2, math.floor(trim_wavs - (chunk_size_audio/fs + 2))
else:
star_min_sec, start_max_sec = 0, math.floor(clip_length_seconds - chunk_size_audio/fs)
if star_min_sec == start_max_sec:
start_sec = star_min_sec
else:
start_sec = np.round(np.random.randint(star_min_sec,
min((audio.shape[-1] - chunk_size_audio / 2) / fs, start_max_sec),
1))[0]
start_index = start_sec * fs
sliced_audio = audio[:, start_index: start_index + round(chunk_size_audio)]
return sliced_audio, start_sec
def _fixed_slice(audio: torch.Tensor, fs: int, chunk_size_audio: float) -> Tuple[torch.Tensor, int]:
"""Returns a fixed slice of an audio and its corresponding time array (label)"""
start_sec = 5 # Hardcoded start at 5 seconds
start_sample = start_sec * fs
sliced_audio = audio[:, start_sample : int(start_sample + chunk_size_audio)]
return sliced_audio, start_sec
class resampler(nn.Sequential):
def __init__(self, scale_factor=(1, 0.1)):
super().__init__()
self.scale_factor = scale_factor
def forward(self, input):
out = nn.functional.interpolate(input, scale_factor=self.scale_factor, mode='nearest')
return out
class InfiniteDataLoader(DataLoader):
''' DataLoader that keeps returning batches even after the dataset is exhausted.
Useful when the __getitem__ of the dataset returns a random slice.
Ref:
https://gist.github.com/MFreidank/821cc87b012c53fade03b0c7aba13958
'''
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Initialize an iterator over the dataset.
self.dataset_iterator = super().__iter__()
def __iter__(self):
return self
def __next__(self):
try:
batch = next(self.dataset_iterator)
except StopIteration:
# Dataset exhausted, use a new fresh iterator.
self.dataset_iterator = super().__iter__()
batch = next(self.dataset_iterator)
return batch
class DCASE_SELD_Dataset(Dataset):
"""Dataset for the DCASE SELD (Task3), supports version 2021 and 2022.
This dataset first loads all the audio and labels to memory.
In the getitem, it returns a slice, from wavs.
This dataset is a map dataset, so each "epoch" will see each wav file only once.
But the slice of each wav can be randomly selected.
The audios and labels are stored in memory, but the slices are computed at runtime.
Parameters:
directory_root - Path to the the directory that contains 'foa_dev', 'metadata', 'list_dataset'
list_dataset - File with the wav filenames that we want to load. Filesnames are relative to directory_root.
trim_wavs - Trim wavs to this number of seconds when loading audio, so we load shorter wavs.
chunk_size - Size of the chunkds (slices) returned in getitem. In samples.
chuck_mode - {'full', 'fixed', 'random'} Where the getitem:
- Full - Returns the full wav and labels. Useful for validation, and to compute statistics.
- Fixed - Returns a slice at fixed start time of each wav. Useful for debugging.
- Random - Returns a random slice each time.
return_fname - Returns fname during the getitem
multi_track - Enables multi-track ACCDOA for the labels
ignore_labels - Use this to avoid returning labels in the get item. Useful for evaluation mode when there are no labels.
labels_backend - Method to extract the labels. Currently baseline works best, as it is based on the official baseline code.
"""
def __init__(self,
directory_root: str = './data/',
list_dataset: str = 'dcase2021t3_foa_overfit_vrgpu.txt',
trim_wavs: float = -1, # in seconds
chunk_size: int = 48000, # in samples
chunk_mode: str = 'fixed',
return_fname: bool = False,
multi_track: bool = False,
num_classes: int = 13,
ignore_labels: bool = False,
labels_backend: str = 'sony',
pad_labels: bool = True):
super().__init__()
self.directory_root = directory_root
self.list_dataset = list_dataset # list of wav filenames , e.g. './data_dcase2021_task3/foa_dev/dev-val/fold5_room1_mix001.wav'
self.chunk_size_audio = chunk_size
self.chunk_mode = chunk_mode
self.trim_wavs = trim_wavs # Trims the inputs wavs to the selected length in seconds
self.return_fname = return_fname
self.multi_track = multi_track
self.num_classes = num_classes
self.ignore_labels = ignore_labels # This is to avoid loading labels. Useful when doing evaluation.
self.labels_backend = labels_backend # Code to use when extracting labels from CSVs. For multitrack, we need the baseline. {'sony', 'backend'}
self.pad_labels = pad_labels # This is just to take into account that spectrograms will pad . Use when backend == baseline, and model = CRNN
self.resampler = None
if self.multi_track and self.labels_backend == 'sony':
warnings.warn('WARNING: When using multi-track labels, we should use the baseline back end.')
self._fnames = []
self._audios = {}
self.durations = {}
self._fs = {} # Per wav
self._time_array_dict = {} # Per wav
# Load full wavs and time_arrays to memory
self._fnames = _read_fnames(directory_root=self.directory_root, list_dataset=self.list_dataset)
for fname in self._fnames:
audio, fs, duration = _read_audio(fname=fname, directory_root=self.directory_root,
resampler=self.resampler, trim_seconds=self.trim_wavs)
if not self.ignore_labels:
if self.labels_backend == 'sony':
time_array = _read_time_array(fname=fname, directory_root=self.directory_root)
elif self.labels_backend == 'baseline':
time_array = load_output_format_file(fname=fname, directory_root=self.directory_root)
time_array = convert_output_format_polar_to_cartesian(time_array)
if self.multi_track:
time_array = get_adpit_labels_for_file(_desc_file=time_array, _nb_label_frames=math.ceil(duration * 100),
num_classes=self.num_classes)
else:
time_array = get_labels_for_file(_desc_file=time_array, _nb_label_frames=math.ceil(duration * 10), num_classes=num_classes)
self._time_array_dict[fname] = time_array
self._audios[fname] = audio
self._fs[fname] = fs
self.durations[fname] = duration
self.__validate()
print(self)
def __validate(self):
assert len(self._fnames) == len(self._audios), 'Fnames and audios should have the same count'
assert len(self._fnames) == len(self.durations), 'Fnames and durations should have the same count'
assert len(self._fnames) == len(self._fs), 'Fnames and fs should have the same count'
if not self.ignore_labels:
assert len(self._fnames) == len(self._time_array_dict), 'Fnames and time_arrays should have the same count'
def __len__(self):
return len(self._fnames)
def get_fnames(self):
return self._fnames
def __repr__(self):
fmt_str = 'Dataset ' + self.__class__.__name__ + '\n'
fmt_str += ' Number of unique wav files : {}\n'.format(len(self._fnames))
fmt_str += ' Root Location: {}\n'.format(self.directory_root)
fmt_str += ' List of files: {}\n'.format(self.list_dataset)
fmt_str += ' Chunk size: {}\n'.format(self.chunk_size_audio)
fmt_str += ' Chunk Mode: {}\n'.format(self.chunk_mode)
fmt_str += ' Trim audio: {}\n'.format(self.trim_wavs)
fmt_str += ' Multi_track: {}\n'.format(self.multi_track)
fmt_str += ' Ignore labels: {}\n'.format(self.ignore_labels)
fmt_str += ' Labels Backend: {}\n'.format(self.labels_backend)
return fmt_str
def __getitem__(self, item):
fname = self._fnames[item]
audio = self._audios[fname]
fs = self._fs[fname]
duration = self.durations[fname]
if not self.ignore_labels:
time_array = self._time_array_dict[fname]
else:
time_array = None
# Select a slice
if self.chunk_mode == 'fixed':
audio, start_sec = _fixed_slice(audio, fs, chunk_size_audio=self.chunk_size_audio)
labels_duration = self.chunk_size_audio
elif self.chunk_mode == 'random':
audio, start_sec = _random_slice(audio, fs, chunk_size_audio=self.chunk_size_audio, trim_wavs=self.trim_wavs, clip_length_seconds=duration)
labels_duration = self.chunk_size_audio
elif self.chunk_mode == 'full':
start_sec = 0
labels_duration = audio.shape[-1]
if not self.ignore_labels:
if self.labels_backend == 'sony':
label = _get_labels(time_array, start_sec=start_sec, fs=fs, chunk_size_audio=labels_duration, rotation_pattern=None,
multi_track=self.multi_track, num_classes=self.num_classes)
elif self.labels_backend == 'custom':
label = _get_labels_custom(time_array, start_sec=start_sec, fs=fs, chunk_size_audio=labels_duration, num_classes=self.num_classes)
else:
if not self.multi_track:
start_frame = int(start_sec) * 10
end_frame = start_frame + round(labels_duration / fs * 100)
if self.pad_labels:
end_frame += 1
label = time_array[... , start_frame: end_frame]
#raise NotImplementedError
else:
# TODO Hardcoded fs for laels at 100 ms
start_frame = int(start_sec) * 10
end_frame = start_frame + math.ceil(labels_duration / fs * 100) + 1
#label = get_adpit_labels_for_file(_desc_file=time_array, _nb_label_frames=math.ceil(duration * 10), num_classes=self.num_classes)
if end_frame > time_array.shape[0]:
label = np.concatenate([time_array, np.zeros([end_frame - start_frame - time_array.shape[0], *time_array.shape[1:]])], axis=0)
else:
label = time_array[start_frame: end_frame, ...]
if label.shape[0] < end_frame - start_frame:
label = np.concatenate([label, np.zeros([end_frame - start_frame - label.shape[0], *label.shape[1:]])], axis=0)
else:
label = np.empty(1)
if self.return_fname:
return audio, torch.from_numpy(label.astype(np.float32)), fname
else:
return audio, torch.from_numpy(label.astype(np.float32))
def test_dataset_train_iteration(num_iters=100, batch_size=32, num_workers=4):
# Here we test a typical train iteration, with the map dataset, but with infinite dataloader
# The main idea is that we dont have epochs, but iterations.
# This supports batching, and multiple workers
# This looks OK, each "epoch" samples each wavs only once, but with infinite dataloader we itierate foreacher
import matplotlib.pyplot as plt
import seaborn as sns
from itertools import islice
dataset_train = DCASE_SELD_Dataset(directory_root='/m/triton/scratch/work/falconr1/sony/data_dcase2022',
list_dataset='dcase2022_devtrain_all.txt',
chunk_size=int(24000 * 1.27),
chunk_mode='random',
trim_wavs=-1,
return_fname=True)
loader_train = InfiniteDataLoader(dataset_train, batch_size=batch_size, num_workers=num_workers, shuffle=True, drop_last=True)
# Begin iterating
ctr = 0
ctr_fnames = {}
for (x_batch, y_batch, fnames) in islice(loader_train, num_iters):
if ctr < 5:
print(f'iter: {ctr}')
print(f'x_batch.shape: {x_batch.shape}')
print(f'y_batch.shape: {y_batch.shape}')
print(torch.mean(x_batch, dim=(-1, -2)))
for fname in fnames:
if fname in ctr_fnames:
ctr_fnames[fname] += 1
else:
ctr_fnames[fname] = 1
ctr += 1
if ctr > 5:
break
# Display counter of how many times each wav was sliced
print(f'There are {len(ctr_fnames)} unique fnames.')
f, ax = plt.subplots(figsize=(10, 15))
df = pd.DataFrame(list(ctr_fnames.items()))
df.columns = ['fname', 'count']
sns.barplot(x="count", y="fname", data=df,
label="count", color="b")
sns.despine(left=True, bottom=True)
plt.show()
# Display wav durations
f, ax = plt.subplots(figsize=(10, 15))
df = pd.DataFrame(list(dataset_train.durations.items()))
df.columns = ['fname', 'duration']
sns.barplot(x="duration", y="fname", data=df,
label="duration", color="b")
sns.despine(left=True, bottom=True)
plt.show()
def _get_padders(chunk_size_seconds: float = 1.27,
duration_seconds: float = 60.0,
overlap: float = 0.5,
audio_fs=24000, labels_fs=100):
# Wavs:
fs = audio_fs
audio_full_size = fs * duration_seconds
audio_chunk_size = round(fs * chunk_size_seconds)
###audio_pad_size = math.ceil(audio_full_size / audio_chunk_size) + math.ceil(audio_fs / labels_fs * 1)
audio_pad_size = (math.ceil(audio_full_size / audio_chunk_size) * audio_chunk_size) - audio_full_size
audio_padder = nn.ConstantPad1d(padding=(0, audio_pad_size), value=0.0)
audio_step_size = math.floor(audio_chunk_size * overlap)
# Labels:
labels_fs = labels_fs # 100 --> 10 ms
labels_full_size = labels_fs * duration_seconds
labels_chunk_size = round(labels_fs * chunk_size_seconds) + 1
labels_pad_size = math.ceil(labels_full_size / labels_chunk_size) * labels_chunk_size - labels_full_size
labels_padder = nn.ConstantPad2d(padding=(0, labels_pad_size, 0, 0), value=0.0)
labels_step_size = math.ceil(labels_chunk_size * overlap)
# Additional padding, in case the labels are shorter than the audio
while True:
#num_chunks_audio = math.ceil(audio_full_size / audio_chunk_size)
#num_chunks_labels = math.ceil(labels_full_size / labels_chunk_size)
num_chunks_audio = (audio_full_size + audio_pad_size) / audio_chunk_size
num_chunks_labels = (labels_full_size + labels_pad_size) / labels_chunk_size
if num_chunks_labels < num_chunks_audio:
labels_pad_size += labels_chunk_size
labels_padder = nn.ConstantPad2d(padding=(0, labels_pad_size, 0, 0), value=0.0)
else:
break
audio_padding = {'padder': audio_padder,
'chunk_size': audio_chunk_size,
'hop_size': audio_step_size,
'full_size': audio_full_size}
labels_padding = {'padder': labels_padder,
'chunk_size': labels_chunk_size,
'hop_size': labels_step_size,
'full_size': labels_full_size}
return audio_padding, labels_padding
def test_validation_clean():
# Here I am testing how to do the validation
# The idea is that I want to iterate the full wavs, to get the predictions
# So we get full length audio and labels from the dataset
# Then we split into chunks manually
# And iterate over wavs, using a dataloader for each one
# Other useful function, torch.chunks, torch.split
batch_size = 32 # This depends on GPU memory
dataset = DCASE_SELD_Dataset(directory_root='/m/triton/scratch/work/falconr1/sony/data_dcase2022',
list_dataset='dcase2022_devtrain_all.txt',
chunk_mode='full',
trim_wavs=-1,
return_fname=True)
spec = torchaudio.transforms.Spectrogram(
n_fft=512,
win_length=512,
hop_length=240,
)
all_labels = []
print(f'Iterating {len(dataset)} fnames in dataset.')
for i in range(len(dataset)):
# Analyze audio in full size
audio, labels, fname = dataset[i]
duration = dataset.durations[fname]
all_labels.append(labels)
print(f'Full audio:')
print(audio.shape)
print(f'Full spec:')
print(spec(audio).shape)
print(f'Full labels:')
print(labels.shape)
audio_padding, labels_padding = _get_padders(chunk_size_seconds=1.27,
duration_seconds=math.floor(duration),
overlap=1, # Other values e.g. 32/128 are ok,
audio_fs=24000,
labels_fs=100)
# To process audio in GPU, split into chunks (that can be overlapped)
audio = audio_padding['padder'](audio)
audio_chunks = audio.unfold(dimension=1, size=audio_padding['chunk_size'],
step=audio_padding['hop_size']).permute((1, 0, 2))
labels = labels_padding['padder'](labels)
labels_chunks = labels.unfold(dimension=-1, size=labels_padding['chunk_size'],
step=labels_padding['hop_size']).permute((2,0,1,3))
print(f'Full padded audio:')
print(audio.shape)
print(f'Full padded labels:')
print(labels.shape)
tmp = torch.utils.data.TensorDataset(audio_chunks, labels_chunks)
loader = DataLoader(tmp, batch_size=batch_size, shuffle=False, drop_last=False) # Loader per wav to get batches
for ctr, (audio, labels) in enumerate(loader):
print(f'Processing batch {ctr}')
outo = spec(audio)
print(f'Audio shape = {audio.shape}')
print(f'Spec shape = {outo.shape}')
print(f'Labels shape = {labels.shape}')
assert outo.shape[-1] == labels.shape[-1], \
'Wrong shapes, the spectrogram and labels should have the same number of frames. Check paddings and step size'
# Analysis of labels
count_active_classes(all_labels)
# breaks in wav 43 or 42 with overlap
def test_validation_histograms():
# Here I am testing how to do the validation
# The idea is that I want to iterate the full wavs, to get the predictions
# So we get full length audio and labels from the dataset
# Then we split into chunks manually
# And iterate over wavs, using a dataloader for each one
# Other useful function, torch.chunks, torch.split
# Update 15.06.2022
# This is very useful to analyze tbe labels too.
batch_size = 32 # This depends on GPU memory
dataset = DCASE_SELD_Dataset(directory_root='/m/triton/scratch/work/falconr1/sony/data_dcase2022',
list_dataset='dcase2022_devtrain_all.txt',
chunk_mode='full',
trim_wavs=-1,
return_fname=True,
num_classes=13,
multi_track=False)
spec = torchaudio.transforms.Spectrogram(
n_fft=512,
win_length=512,
hop_length=240,
)
all_labels = []
print(f'Iterating {len(dataset)} fnames in dataset.')
for i in range(len(dataset)):
# Analyze audio in full size
audio, labels, fname = dataset[i]
all_labels.append(labels)
# Analysis of labels
count_active_classes(all_labels)
count_active_classes(all_labels[0:1])
def count_active_classes(all_labels: List, detection_threshold=0.5):
""" Useful function to get the histogram of active classes per frames.
Tip: Call it with only one label to get the plot.
count_active_classes(all_labels[0:1])
"""
import plots
import matplotlib.pyplot as plt
import seaborn as sns
if len(all_labels) == 1:
plots.plot_labels_cross_sections(all_labels[0], n_classes=list(range(all_labels[0].shape[-2])), plot_cartesian=True)
plots.plot_labels(all_labels[0], n_classes=list(range(all_labels[0].shape[-2])), savefig=False, plot_cartesian=True)
all_count_detections = {}
for i in range(len(all_labels)):
this_label = all_labels[i]
vec_norms = torch.linalg.vector_norm(this_label, ord=2, dim=-3)
for cls in range(this_label.shape[-2]):
mask_detected_events = vec_norms[cls, :] > detection_threshold # detected events for this class
# mask_detected_events = mask_detected_events.repeat(1, 3, 1)
tmp_events = this_label[..., cls, mask_detected_events]
# detections = tmp_events[mask_detected_events]
this_count_detections = mask_detected_events.nonzero(as_tuple=False)
if cls in all_count_detections.keys():
all_count_detections[cls] += len(this_count_detections)
else:
all_count_detections[cls] = len(this_count_detections)
f, ax = plt.subplots(figsize=(10, 15))
df = pd.DataFrame(list(all_count_detections.items()))
df.columns = ['class_id', 'count']
g = sns.barplot(x="class_id", y="count", data=df,
label="class_id", color="b")
sns.despine(left=False, bottom=False)
#g.set_yscale("log")
plt.show()
def test_multi_track():
""" HEre I should test (manually):
- chunk_mode: {'fixed', 'random', 'full'}
- multi_track: True, False
- labels_backend: {'sony', 'baseline'}
-Update 21.07.2022 . Both backends look good for single ACCCDOA. At least they look the same.
"""
dataset = DCASE_SELD_Dataset(directory_root='/m/triton/scratch/work/falconr1/sony/data_dcase2022',
list_dataset='dcase2022_devtrain_debug.txt',
chunk_mode='full', # test sony and baseline
chunk_size=30480, # 30480, 61200, 122640, 144000, with fixed chunk
trim_wavs=-1,
return_fname=True,
num_classes=13,
multi_track=False, # test sony and baseline
labels_backend='baseline', # test sony and baseline
pad_labels=True) # True only for spectrograms
audio, labels, fname = dataset[0]
if len(labels.shape) > 3:
this_label = labels[2]
else:
this_label = labels
plots.plot_labels(this_label)
raise ValueError
# This sitll fails when using full wavs, and backend baseline
# the size is not correct, I guess it is cropping somewhere
# note that the vanilla multitrack puts all the activity in the first track
a = 1
def compare_backends():
wav_id = 42
dataset_sony = DCASE_SELD_Dataset(directory_root='/m/triton/scratch/work/falconr1/sony/data_dcase2022',
list_dataset='dcase2022_devtrain_all.txt',
chunk_mode='full', # test sony and baseline
chunk_size=30480,
trim_wavs=-1,
return_fname=True,
num_classes=13,
multi_track=False, # test sony and baseline
labels_backend='sony') # test sony and baseline
dataset_baseline = DCASE_SELD_Dataset(directory_root='/m/triton/scratch/work/falconr1/sony/data_dcase2022',
list_dataset='dcase2022_devtrain_all.txt',
chunk_mode='full', # test sony and baseline
chunk_size=30480,
trim_wavs=-1,
return_fname=True,
num_classes=13,
multi_track=False, # test sony and baseline
labels_backend='baseline') # test sony and baseline
audio_sony, labels_sony, fname_sony = dataset_sony[wav_id]
audio_base, labels_base, fname_base = dataset_baseline[wav_id]
class t_transform(nn.Sequential):
def __init__(self, scale_factor=(1, 0.1)):
super().__init__()
print(f'helloo, {scale_factor}')
self.scale_factor = scale_factor
def forward(self, input):
out = nn.functional.interpolate(input, scale_factor=self.scale_factor, mode='nearest')
return out
target_transform = t_transform()
labels_sony_downsample = target_transform(labels_sony[None, ...])[0]
labels_sony_padded = torch.zeros_like(labels_base)
labels_sony_padded[:, :, 0:labels_sony_downsample.shape[-1]] = labels_sony_downsample
error = (labels_base - labels_sony_padded) ** 2
print(f'Error = {error.sum()}')
target_transform2 = t_transform(scale_factor=(1, 10))
labels_base_padded = target_transform2(labels_base[None, ...])[0]
labels_sony_padded = torch.zeros_like(labels_base_padded)
labels_sony_padded[:, :, 0:labels_sony.shape[-1]] = labels_sony
error = (labels_base_padded - labels_sony_padded) ** 2
print(f'Error = {error.sum()}')
def compare_backends_no_pad():
# Update 22.07.2022
# This seems ok for now, there is a slight mismatch between total length when using the backends
# and there is a big problem with the osny backend, that it is chopping up some events
# But for now I can work with the baselinme backend
dataset_sony = DCASE_SELD_Dataset(directory_root='/m/triton/scratch/work/falconr1/sony/data_dcase2022',
list_dataset='dcase2022_devtrain_all.txt',
chunk_mode='full', # test sony and baseline
chunk_size=30480,
trim_wavs=-1,
return_fname=True,
num_classes=13,
multi_track=False, # test sony and baseline
labels_backend='sony') # test sony and baseline
dataset_baseline = DCASE_SELD_Dataset(directory_root='/m/triton/scratch/work/falconr1/sony/data_dcase2022',
list_dataset='dcase2022_devtrain_all.txt',
chunk_mode='full', # test sony and baseline
chunk_size=30480,
trim_wavs=-1,
return_fname=True,
num_classes=13,
multi_track=False, # test sony and baseline
labels_backend='baseline') # test sony and baseline
dataset_sony = DCASE_SELD_Dataset(directory_root='/m/triton/scratch/work/falconr1/sony/data_dcase2021_task3',
list_dataset='dcase2021t3_foa_devtest.txt',
chunk_mode='full', # test sony and baseline
chunk_size=30480,
trim_wavs=-1,
return_fname=True,
num_classes=13,
multi_track=False, # test sony and baseline
labels_backend='sony') # test sony and baseline
dataset_baseline = DCASE_SELD_Dataset(directory_root='/m/triton/scratch/work/falconr1/sony/data_dcase2021_task3',
list_dataset='dcase2021t3_foa_devtest.txt',
chunk_mode='full', # test sony and baseline
chunk_size=30480,
trim_wavs=-1,
return_fname=True,
num_classes=13,
multi_track=False, # test sony and baseline
labels_backend='baseline') # test sony and baseline
for wav_id in range(len(dataset_sony)):
audio_sony, labels_sony, fname_sony = dataset_sony[wav_id]
audio_base, labels_base, fname_base = dataset_baseline[wav_id]
error = (labels_sony[..., 0:labels_base.shape[-1]] - labels_base) ** 2
print(f'Error = {error.sum()}')
# Look at some of them
wav_id = 1
audio_sony, labels_sony, fname_sony = dataset_sony[wav_id]
audio_base, labels_base, fname_base = dataset_baseline[wav_id]
plots.plot_labels_cross_sections(labels_sony, title='Sony')
plots.plot_labels_cross_sections(labels_base, title='Baseline')
if __name__ == '__main__':
from utils import seed_everything
seed_everything(1234, mode='balanced')
test_multi_track()
test_validation_histograms()
test_dataset_train_iteration() # OK, I am happy
test_validation_clean() # seems ok, except when using overlaps
print('End of test')
| rfalcon100/seld_dcase2022_ric | dataset/dcase_dataset.py | dcase_dataset.py | py | 51,279 | python | en | code | 6 | github-code | 36 |
31429204981 | #2021.06.22
#소수 구하기
import math
def isprime(num) :
if num == 1 : return False
n = int(math.sqrt(num))
for i in range(2,n+1):
if num % i == 0:
return False
return True
s,e = map(int,input().split())
for k in range(s,e+1):
if isprime(k) :
print(k) | Minkeyyyy/OJ | BaekJoon/Step/기본수학2/_1929.py | _1929.py | py | 286 | python | en | code | 0 | github-code | 36 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.