seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
35196259562 | import logging
import random as rand
from enum import Enum
import numpy as np
from numpy import array as arr
from numpy import concatenate as cat
import scipy.io as sio
from scipy.misc import imread, imresize
class Batch(Enum):
inputs = 0
part_score_targets = 1
part_score_weights = 2
locref_targets = 3
locref_mask = 4
pairwise_targets = 5
pairwise_mask = 6
data_item = 7
def mirror_joints_map(all_joints, num_joints):
res = np.arange(num_joints)
symmetric_joints = [p for p in all_joints if len(p) == 2]
for pair in symmetric_joints:
res[pair[0]] = pair[1]
res[pair[1]] = pair[0]
return res
def extend_crop(crop, crop_pad, image_size):
crop[0] = max(crop[0] - crop_pad, 0)
crop[1] = max(crop[1] - crop_pad, 0)
crop[2] = min(crop[2] + crop_pad, image_size[2] - 1)
crop[3] = min(crop[3] + crop_pad, image_size[1] - 1)
return crop
def data_to_input(data):
return np.expand_dims(data, axis=0).astype(float)
def collect_pairwise_stats(joint_id, coords):
pairwise_stats = {}
for person_id in range(len(coords)):
num_joints = len(joint_id[person_id])
for k_start in range(num_joints):
j_id_start = joint_id[person_id][k_start]
joint_pt = coords[person_id][k_start, :]
j_x_start = np.asscalar(joint_pt[0])
j_y_start = np.asscalar(joint_pt[1])
for k_end in range(num_joints):
if k_start != k_end:
j_id_end = joint_id[person_id][k_end]
joint_pt = coords[person_id][k_end, :]
j_x_end = np.asscalar(joint_pt[0])
j_y_end = np.asscalar(joint_pt[1])
if (j_id_start, j_id_end) not in pairwise_stats:
pairwise_stats[(j_id_start, j_id_end)] = []
pairwise_stats[(j_id_start, j_id_end)].append([j_x_end - j_x_start, j_y_end - j_y_start])
return pairwise_stats
def load_pairwise_stats(cfg):
mat_stats = sio.loadmat(cfg.pairwise_stats_fn)
pairwise_stats = {}
for id in range(len(mat_stats['graph'])):
pair = tuple(mat_stats['graph'][id])
pairwise_stats[pair] = {"mean": mat_stats['means'][id], "std": mat_stats['std_devs'][id]}
for pair in pairwise_stats:
pairwise_stats[pair]["mean"] *= cfg.global_scale
pairwise_stats[pair]["std"] *= cfg.global_scale
return pairwise_stats
def get_pairwise_index(j_id, j_id_end, num_joints):
return (num_joints - 1) * j_id + j_id_end - int(j_id < j_id_end)
class DataItem:
pass
class PoseDataset:
def __init__(self, cfg):
self.cfg = cfg
self.data = self.load_dataset() if cfg.dataset else []
self.num_images = len(self.data)
if self.cfg.mirror:
self.symmetric_joints = mirror_joints_map(cfg.all_joints, cfg.num_joints)
self.curr_img = 0
self.set_shuffle(cfg.shuffle)
self.set_pairwise_stats_collect(cfg.pairwise_stats_collect)
if self.cfg.pairwise_predict:
self.pairwise_stats = load_pairwise_stats(self.cfg)
def load_dataset(self):
cfg = self.cfg
file_name = cfg.dataset
# Load Matlab file dataset annotation
mlab = sio.loadmat(file_name)
self.raw_data = mlab
mlab = mlab['dataset']
num_images = mlab.shape[1]
data = []
has_gt = True
for i in range(num_images):
sample = mlab[0, i]
item = DataItem()
item.image_id = i
item.im_path = sample[0][0]
item.im_size = sample[1][0]
if len(sample) >= 3:
joints = sample[2][0][0]
joint_id = joints[:, 0]
# make sure joint ids are 0-indexed
if joint_id.size != 0:
assert((joint_id < cfg.num_joints).any())
joints[:, 0] = joint_id
item.joints = [joints]
else:
has_gt = False
if cfg.crop:
crop = sample[3][0] - 1
item.crop = extend_crop(crop, cfg.crop_pad, item.im_size)
data.append(item)
self.has_gt = has_gt
return data
def num_keypoints(self):
return self.cfg.num_joints
def set_test_mode(self, test_mode):
self.has_gt = not test_mode
def set_shuffle(self, shuffle):
self.shuffle = shuffle
if not shuffle:
assert not self.cfg.mirror
self.image_indices = np.arange(self.num_images)
def set_pairwise_stats_collect(self, pairwise_stats_collect):
self.pairwise_stats_collect = pairwise_stats_collect
if self.pairwise_stats_collect:
assert self.get_scale() == 1.0
def mirror_joint_coords(self, joints, image_width):
# horizontally flip the x-coordinate, keep y unchanged
joints[:, 1] = image_width - joints[:, 1] - 1
return joints
def mirror_joints(self, joints, symmetric_joints, image_width):
# joint ids are 0 indexed
res = np.copy(joints)
res = self.mirror_joint_coords(res, image_width)
# swap the joint_id for a symmetric one
joint_id = joints[:, 0].astype(int)
res[:, 0] = symmetric_joints[joint_id]
return res
def shuffle_images(self):
num_images = self.num_images
if self.cfg.mirror:
image_indices = np.random.permutation(num_images * 2)
self.mirrored = image_indices >= num_images
image_indices[self.mirrored] = image_indices[self.mirrored] - num_images
self.image_indices = image_indices
else:
self.image_indices = np.random.permutation(num_images)
def num_training_samples(self):
num = self.num_images
if self.cfg.mirror:
num *= 2
return num
def next_training_sample(self):
if self.curr_img == 0 and self.shuffle:
self.shuffle_images()
curr_img = self.curr_img
self.curr_img = (self.curr_img + 1) % self.num_training_samples()
imidx = self.image_indices[curr_img]
mirror = self.cfg.mirror and self.mirrored[curr_img]
return imidx, mirror
def get_training_sample(self, imidx):
return self.data[imidx]
def get_scale(self):
cfg = self.cfg
scale = cfg.global_scale
if hasattr(cfg, 'scale_jitter_lo') and hasattr(cfg, 'scale_jitter_up'):
scale_jitter = rand.uniform(cfg.scale_jitter_lo, cfg.scale_jitter_up)
scale *= scale_jitter
return scale
def next_batch(self):
while True:
imidx, mirror = self.next_training_sample()
data_item = self.get_training_sample(imidx)
scale = self.get_scale()
if not self.is_valid_size(data_item.im_size, scale):
continue
return self.make_batch(data_item, scale, mirror)
def is_valid_size(self, image_size, scale):
im_width = image_size[2]
im_height = image_size[1]
max_input_size = 100
if im_height < max_input_size or im_width < max_input_size:
return False
if hasattr(self.cfg, 'max_input_size'):
max_input_size = self.cfg.max_input_size
input_width = im_width * scale
input_height = im_height * scale
if input_height * input_width > max_input_size * max_input_size:
return False
return True
def make_batch(self, data_item, scale, mirror):
im_file = data_item.im_path
logging.debug('image %s', im_file)
logging.debug('mirror %r', mirror)
image = imread(im_file, mode='RGB')
if self.has_gt:
joints = np.copy(data_item.joints)
if self.cfg.crop:
crop = data_item.crop
image = image[crop[1]:crop[3] + 1, crop[0]:crop[2] + 1, :]
if self.has_gt:
joints[:, 1:3] -= crop[0:2].astype(joints.dtype)
img = imresize(image, scale) if scale != 1 else image
scaled_img_size = arr(img.shape[0:2])
if mirror:
img = np.fliplr(img)
batch = {Batch.inputs: img}
if self.has_gt:
stride = self.cfg.stride
if mirror:
joints = [self.mirror_joints(person_joints, self.symmetric_joints, image.shape[1]) for person_joints in
joints]
sm_size = np.ceil(scaled_img_size / (stride * 2)).astype(int) * 2
scaled_joints = [person_joints[:, 1:3] * scale for person_joints in joints]
joint_id = [person_joints[:, 0].astype(int) for person_joints in joints]
batch = self.compute_targets_and_weights(joint_id, scaled_joints, data_item, sm_size, scale, batch)
if self.pairwise_stats_collect:
data_item.pairwise_stats = collect_pairwise_stats(joint_id, scaled_joints)
batch = {key: data_to_input(data) for (key, data) in batch.items()}
batch[Batch.data_item] = data_item
return batch
def set_locref(self, locref_map, locref_mask, locref_scale, i, j, j_id, dx, dy):
locref_mask[j, i, j_id * 2 + 0] = 1
locref_mask[j, i, j_id * 2 + 1] = 1
locref_map[j, i, j_id * 2 + 0] = dx * locref_scale
locref_map[j, i, j_id * 2 + 1] = dy * locref_scale
def set_pairwise_map(self, pairwise_map, pairwise_mask, i, j, j_id, j_id_end, coords, pt_x, pt_y, person_id, k_end):
num_joints = self.cfg.num_joints
joint_pt = coords[person_id][k_end, :]
j_x_end = np.asscalar(joint_pt[0])
j_y_end = np.asscalar(joint_pt[1])
pair_id = get_pairwise_index(j_id, j_id_end, num_joints)
stats = self.pairwise_stats[(j_id, j_id_end)]
dx = j_x_end - pt_x
dy = j_y_end - pt_y
pairwise_mask[j, i, pair_id * 2 + 0] = 1
pairwise_mask[j, i, pair_id * 2 + 1] = 1
pairwise_map[j, i, pair_id * 2 + 0] = (dx - stats["mean"][0]) / stats["std"][0]
pairwise_map[j, i, pair_id * 2 + 1] = (dy - stats["mean"][1]) / stats["std"][1]
def compute_targets_and_weights(self, joint_id, coords, data_item, size, scale, batch):
stride = self.cfg.stride
dist_thresh = self.cfg.pos_dist_thresh * scale
num_joints = self.cfg.num_joints
half_stride = stride / 2
scmap = np.zeros(cat([size, arr([num_joints])]))
locref_shape = cat([size, arr([num_joints * 2])])
locref_mask = np.zeros(locref_shape)
locref_map = np.zeros(locref_shape)
pairwise_shape = cat([size, arr([num_joints * (num_joints - 1) * 2])])
pairwise_mask = np.zeros(pairwise_shape)
pairwise_map = np.zeros(pairwise_shape)
dist_thresh_sq = dist_thresh ** 2
width = size[1]
height = size[0]
for person_id in range(len(coords)):
for k, j_id in enumerate(joint_id[person_id]):
joint_pt = coords[person_id][k, :]
j_x = np.asscalar(joint_pt[0])
j_y = np.asscalar(joint_pt[1])
# don't loop over entire heatmap, but just relevant locations
j_x_sm = round((j_x - half_stride) / stride)
j_y_sm = round((j_y - half_stride) / stride)
min_x = round(max(j_x_sm - dist_thresh - 1, 0))
max_x = round(min(j_x_sm + dist_thresh + 1, width - 1))
min_y = round(max(j_y_sm - dist_thresh - 1, 0))
max_y = round(min(j_y_sm + dist_thresh + 1, height - 1))
for j in range(min_y, max_y + 1): # range(height):
pt_y = j * stride + half_stride
for i in range(min_x, max_x + 1): # range(width):
# pt = arr([i*stride+half_stride, j*stride+half_stride])
# diff = joint_pt - pt
# The code above is too slow in python
pt_x = i * stride + half_stride
dx = j_x - pt_x
dy = j_y - pt_y
dist = dx ** 2 + dy ** 2
# print(la.norm(diff))
if dist <= dist_thresh_sq:
dist = dx ** 2 + dy ** 2
locref_scale = 1.0 / self.cfg.locref_stdev
current_normalized_dist = dist * locref_scale ** 2
prev_normalized_dist = locref_map[j, i, j_id * 2 + 0] ** 2 + \
locref_map[j, i, j_id * 2 + 1] ** 2
update_scores = (scmap[j, i, j_id] == 0) or prev_normalized_dist > current_normalized_dist
if self.cfg.location_refinement and update_scores:
self.set_locref(locref_map, locref_mask, locref_scale, i, j, j_id, dx, dy)
if self.cfg.pairwise_predict and update_scores:
for k_end, j_id_end in enumerate(joint_id[person_id]):
if k != k_end:
self.set_pairwise_map(pairwise_map, pairwise_mask, i, j, j_id, j_id_end,
coords, pt_x, pt_y, person_id, k_end)
scmap[j, i, j_id] = 1
scmap_weights = self.compute_scmap_weights(scmap.shape, joint_id, data_item)
# Update batch
batch.update({
Batch.part_score_targets: scmap,
Batch.part_score_weights: scmap_weights
})
if self.cfg.location_refinement:
batch.update({
Batch.locref_targets: locref_map,
Batch.locref_mask: locref_mask
})
if self.cfg.pairwise_predict:
batch.update({
Batch.pairwise_targets: pairwise_map,
Batch.pairwise_mask: pairwise_mask
})
return batch
def compute_scmap_weights(self, scmap_shape, joint_id, data_item):
cfg = self.cfg
if cfg.weigh_only_present_joints:
weights = np.zeros(scmap_shape)
for person_joint_id in joint_id:
for j_id in person_joint_id:
weights[:, :, j_id] = 1.0
else:
weights = np.ones(scmap_shape)
return weights | eldar/pose-tensorflow | dataset/pose_dataset.py | pose_dataset.py | py | 14,519 | python | en | code | 1,127 | github-code | 36 | [
{
"api_name": "enum.Enum",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "numpy.arange",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "numpy.expand_dims",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "numpy.asscalar",
"line_n... |
23600896108 | import sqlite3
import qrcode
import wx
import wx.aui
import wx.lib.agw.aui as aui
import wx.adv
from datetime import datetime
from dateutil.relativedelta import relativedelta
import bcrypt
import cv2
import phonenumbers
from phonenumbers import carrier
from phonenumbers.phonenumberutil import number_type
import smtplib, ssl
from email.mime.text import MIMEText
from random import randint
import re
server_port = 465
regex = r'\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,}\b'
# Global for the current user
currentUser = []
# Global for the current visit id
currentVisitId = None
# Global for the current exercises list
currentExercises = []
# Global for the visit ids for the current user
visitsIds = []
# Connecting to the database and allowing foreign kets
conn = sqlite3.connect('FitnessManiaDb')
conn.execute("PRAGMA foreign_keys = 1")
# Adding cursor of the database
c = conn.cursor()
# Creating tables of the database: users, users_memberships, users_visits and users_training
c.execute('''
CREATE TABLE IF NOT EXISTS users
([id] INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, [username] TEXT, [password] TEXT, [firstname] TEXT, [lastname] TEXT, [sex] TEXT, [age] TEXT, [email] TEXT, [phone_number] TEXT, [admin_flag] BOOL)
''')
c.execute('''
CREATE TABLE IF NOT EXISTS users_memberships
([mem_id] INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL ,[users_id] INTEGER , [mem_start] TEXT, [mem_end] TEXT, [purchase_date] TEXT, [mem_type] TEXT,
[money_paid] INTEGER, [is_valid] INTEGER,
FOREIGN KEY (users_id) REFERENCES users (id))
''')
c.execute('''
CREATE TABLE IF NOT EXISTS users_visits
([visits_id] INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL ,[users_id] INTEGER , [visits_start] TEXT, [visits_end] TEXT, [weight_start] TEXT, [weight_end] TEXT,
FOREIGN KEY (users_id) REFERENCES users (id))
''')
c.execute('''
CREATE TABLE IF NOT EXISTS users_training
([training_id] INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, [visits_id] INTEGER, [users_id] INTEGER , [category] TEXT, [exercise] TEXT, [sets] TEXT, [reps] TEXT, [mins] TEXT,
FOREIGN KEY (users_id) REFERENCES users (id),
FOREIGN KEY (visits_id) REFERENCES users_visits (visits_id))
''')
c.execute('''
CREATE TABLE IF NOT EXISTS exercises
([exercise_id] INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, [exercise_name] TEXT, [exercise_type] TEXT, [sets_reps] BOOL, [exercise_img] TEXT)
''')
# Class that contains the logic for registering and logging in/out of a user
class Login(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent)
# BUTTONS-START
self.registerBtn = wx.Button(
self, label='Register', pos=(500, 350), size=(200, 26))
self.loginBtn = wx.Button(
self, label='Login', pos=(500, 240), size=(200, 26))
self.scanQrBtn = wx.Button(
self, label='Scan QR', pos=(500, 400), size=(200, 26))
# BUTTONS-END
# STATICTEXT-START
self.usernameTxt = wx.StaticText(
self, label="Username:", pos=(370, 161), size=(100, 26))
self.passwordTxt = wx.StaticText(
self, label="Password:", pos=(370, 201), size=(100, 26))
self.not_regi_text = wx.StaticText(
self, label="Not Registered?", pos=(513, 310), size=(200, 26))
self.fail_login = wx.StaticText(
self, label="", pos=(520, 100), size=(200, 26))
self.welcomeUser = wx.StaticText(
self, label="", pos=(440, 50), size=(200, 26))
self.firstNameTxt = wx.StaticText(
self, label="", pos=(370, 241), size=(100, 26))
self.lastNameTxt = wx.StaticText(
self, label="", pos=(370, 281), size=(100, 26))
self.sexTxt = wx.StaticText(
self, label="", pos=(370, 321), size=(100, 26))
self.ageTxt = wx.StaticText(
self, label="", pos=(370, 361), size=(100, 26))
self.emailTxt = wx.StaticText(
self, label="", pos=(370, 401), size=(100, 26))
self.phoneNumberTxt = wx.StaticText(
self, label="", pos=(370, 441), size=(100, 26))
# STATICTEXT-END
# TEXTCTRL-START
self.usernameCtrl = wx.TextCtrl(self, pos=(500, 160), size=(200, 26))
self.passwordCtrl = wx.TextCtrl(self, pos=(
500, 200), style=(wx.TE_PASSWORD), size=(200, 26))
# TEXTCTRL-END
# FONTS-START
self.font = wx.Font(15, family=wx.FONTFAMILY_MODERN, style=0, weight=70,
underline=False, faceName="", encoding=wx.FONTENCODING_DEFAULT)
self.font.SetWeight(wx.BOLD)
self.usernameTxt.SetFont(self.font)
self.passwordTxt.SetFont(self.font)
self.not_regi_text.SetFont(self.font)
self.fail_login.SetFont(self.font)
self.welcomeUser.SetFont(self.font)
self.firstNameTxt.SetFont(self.font)
self.lastNameTxt.SetFont(self.font)
self.sexTxt.SetFont(self.font)
self.ageTxt.SetFont(self.font)
self.emailTxt.SetFont(self.font)
self.phoneNumberTxt.SetFont(self.font)
self.loginBtn.SetFont(self.font)
self.registerBtn.SetFont(self.font)
self.scanQrBtn.SetFont(self.font)
# FONTS-END
# BINDING-START
self.loginBtn.Bind(wx.EVT_BUTTON, self.onLogin)
self.registerBtn.Bind(wx.EVT_BUTTON, self.onRegister)
self.scanQrBtn.Bind(wx.EVT_BUTTON, self.onScanQR)
# self.loginBtn.Bind(wx.EVT_BUTTON, self.onValidEmail)
# BINDING-END
def onAllowPhone(self, event): #Allow only nums and + charecter for the phone number field
key = event.GetKeyCode()
if ord('0') <= key <= ord('9'): #all nums
event.Skip()
return
if key == ord('+'): #allow +
event.Skip()
return
if key == ord('\010'): #allow backspace
event.Skip()
return
return #disable everything else
def randomCode(self, num): #We generate random number for the email validation key
start = 10**(num-1)
end = (10**num)-1
return randint(start, end)
def onValidation(self, event): #We show the user the checkCTRL and the button of the check
try:
self.checkCtrl.Destroy()
self.checkBtn.Destroy()
except:
pass
self.checkBtn = wx.Button(
self, label='Check', pos=(370, 521), size=(150, 26))
self.checkCtrl = wx.TextCtrl(self, pos=(370, 481), size=(150, 26))
self.checkBtn.SetFont(self.font)
self.checkBtn.Bind(wx.EVT_BUTTON, self.onValidEmail) #we bind it with onValidEmail
self.acceptRegisterBtn.SetLabel("Send again")
def onValidEmail(self, event):
getCode = int(self.checkCtrl.GetValue())
if getCode == self.code:#We check if the code is valid
self.checkCtrl.Destroy()
self.checkBtn.Destroy()
strPassowrd = str(self.passwordCtrl.GetValue()).encode()
genSalt = bcrypt.gensalt()
hashedPassword = bcrypt.hashpw(strPassowrd, genSalt) #we hash the registration password
finalPass = hashedPassword.decode('utf8')
QUERY = "SELECT * FROM USERS"
c.execute(QUERY)
checkFirst = c.fetchall()
if len(checkFirst) == 0: #if its first registred user we give admin rights for the admin panel
c.execute("INSERT INTO USERS VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", (None, self.usernameCtrl.GetValue(), finalPass, self.firstNameCtrl.GetValue(
), self.lastNameCtrl.GetValue(), self.sexCombo.GetValue(), self.ageCtrl.GetValue(), self.emailCtrl.GetValue(), self.phoneNumberCtrl.GetValue(), True))
conn.commit()
else:# if its not the first user we register as normal user
c.execute("INSERT INTO USERS VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", (None, self.usernameCtrl.GetValue(), finalPass, self.firstNameCtrl.GetValue(
), self.lastNameCtrl.GetValue(), self.sexCombo.GetValue(), self.ageCtrl.GetValue(), self.emailCtrl.GetValue(), self.phoneNumberCtrl.GetValue(), False))
conn.commit()
# Destroys the objects not needed for the view
try:
self.firstNameCtrl.Destroy()
self.lastNameCtrl.Destroy()
self.sexCombo.Destroy()
self.ageCtrl.Destroy()
self.emailCtrl.Destroy()
self.phoneNumberCtrl.Destroy()
self.acceptRegisterBtn.Destroy()
self.firstNameTxt.Destroy()
self.lastNameTxt.Destroy()
self.sexTxt.Destroy()
self.ageTxt.Destroy()
self.emailTxt.Destroy()
self.phoneNumberTxt.Destroy()
self.backBtn.Destroy()
except:
pass
self.onLogin(None) #we get the user to the login screen
else:
self.welcomeUser.SetLabel("Wrong validation code, try again") #fail on inputing validation code
def onScanQR(self, event):
try:
#We try to open the webcam and wait for data
decodedData = ""
capture = cv2.VideoCapture(0)
detectQR = cv2.QRCodeDetector()
cam = True
while cam: #while the webcam is on
cam, shownImg = capture.read()
data, _, _ = detectQR.detectAndDecode(shownImg)
if data: #if there is any data we get it
decodedData=data
break #break after data is granted
cv2.imshow("Scan your QR", shownImg)
if cv2.waitKey(1) == ord('x'):
break
if decodedData: #since the decoded data is username and hashed password we get them
decodedData = decodedData.split('\n')
username = decodedData[0]
password = decodedData[1]
#
QUERY = "SELECT * FROM USERS WHERE username = " + "'" + username + \
"'" + " AND password = " + "'" + password + "'"
c.execute(QUERY)
resultUser = c.fetchall()
if len(resultUser) == 1: #if we have such data in the database, we continue to welcome page
global currentUser
currentUser = resultUser #we set the currentuser
try:
self.loginBtn.Destroy()
self.registerBtn.Destroy()
self.not_regi_text.Destroy()
self.fail_login.Destroy()
except:
pass
self.usernameCtrl.Destroy()
self.usernameTxt.Destroy()
self.passwordCtrl.Destroy()
self.passwordTxt.Destroy()
# Setting labels or creating new static texts for the updated view
self.welcomeUser.SetLabel(
"Welcome")
self.userDetailsTxt = wx.StaticText(
self, label='', pos=(520, 80), size=(300, 26))
self.userDetailsTxt.SetLabel(
"" + currentUser[0][3] + " " + currentUser[0][4] + "!")
welcomeUserPosition = wx.Point(560, 50)
self.welcomeUser.SetPosition(welcomeUserPosition)
self.logoutBtn = wx.Button(
self, label='Logout', pos=(500, 130), size=(200, 26))
# Binding the logoutBtn to the onLogout function
self.logoutBtn.Bind(wx.EVT_BUTTON, self.onLogout)
# Setting fonts
self.logoutBtn.SetFont(self.font)
self.userDetailsTxt.SetFont(self.font)
# Enabling tabs and setting the Login tab to be Logout along with image change
notebook.EnableTab(1, True)
notebook.EnableTab(2, True)
notebook.SetPageText(0, 'Logout')
notebook.SetPageImage(0, 4)
if currentUser[0][9]==1: #if the user has admin rights, we enable admin panel to the user
Sport_club.adminTab = AdminPanel(notebook)
notebook.AddPage(Sport_club.adminTab, "AdminPanel")
notebook.EnableTab(4, True)
# QR Code is generated
self.qrUser(event) #we show the user the qr code
# We refresh the list of visits in the visits tab by simulating an event
evt = wx.FocusEvent(
wx.EVT_LIST_ITEM_MIDDLE_CLICK.evtType[0], dateList.GetId())
wx.PostEvent(dateList.GetEventHandler(), evt)
cv2.destroyAllWindows() #close all cv widnows
else:
self.fail_login.SetLabel("Failed to scan QR Code, Try again")
cv2.destroyAllWindows() #close all cv widnows
except:
self.fail_login.SetLabel("Failed to scan QR Code, Try again")
def qrUser(self, event):
try:
try:
self.scanQrBtn.Destroy()
except:
pass
qrcodeStr = ""
#get the data of the current user
QUERY = "SELECT username, password From users Where id = " + str(currentUser[0][0])
c.execute(QUERY)
resultInfo = c.fetchall()
for x in resultInfo:
qrcodeStr = x[0] +"\n"+ x[1]
qr = qrcode.QRCode(version=1, box_size=5, border=5)
qr.add_data(qrcodeStr) #add the data to the qr
qr.make(fit=True)
Qrimg = qr.make_image(fill='black', back_color='white')
Qrimg.save('qrcodeCurrentUser.png') #save it as a image
self.png = wx.StaticBitmap(
self, -1, wx.Bitmap("qrcodeCurrentUser.png", wx.BITMAP_TYPE_ANY), pos=(470, 200)) # show it using stataticBitmap
except:
pass
def onLogin(self, event):
# Getting the values from the username and password fields
self.username_inp = self.usernameCtrl.GetValue()
self.password_inp = self.passwordCtrl.GetValue()
global currentUser
strPassowrd = str(self.passwordCtrl.GetValue()).encode() #we get the inputed password and we encode it
QUERY = "SELECT password FROM USERS WHERE username = " + "'" + self.username_inp + "'" #we get the hashed password by the inputed username
c.execute(QUERY)
getHashedPass = c.fetchall()
if(len(getHashedPass) == 1): # we check if there is any users with this username
for x in getHashedPass:
passW = x[0].encode('utf8') #we get and encode the hashed password to utf8
if bcrypt.checkpw(strPassowrd, passW):# if the inputed password matchs the hashed password
print("Match")
# Querying the database for the current user
QUERY = "SELECT * FROM USERS WHERE username = " + "'" + self.username_inp + \
"'" + " AND password = " + "'" + str(x[0]) + "'"
c.execute(QUERY)
queryResult = c.fetchall()
# If there is a match then we will continue with the login
# Setting the global of the current user to be equal to the result of the query
currentUser = queryResult
# Destroy the login texts/buttons
# If currentUser is admin, create AdminPanel
if currentUser[0][9]==1:
Sport_club.adminTab = AdminPanel(notebook)
notebook.AddPage(Sport_club.adminTab, "AdminPanel")
notebook.EnableTab(4, True)
# we get the user to the welcome page
try:
self.loginBtn.Destroy()
self.registerBtn.Destroy()
self.not_regi_text.Destroy()
self.fail_login.Destroy()
self.scanQrBtn.Destroy()
except:
pass
self.usernameCtrl.Destroy()
self.usernameTxt.Destroy()
self.passwordCtrl.Destroy()
self.passwordTxt.Destroy()
# Setting labels or creating new static texts for the updated view
self.welcomeUser.SetLabel(
"Welcome")
self.userDetailsTxt = wx.StaticText(
self, label='', pos=(520, 80), size=(300, 26))
self.userDetailsTxt.SetLabel(
"" + currentUser[0][3] + " " + currentUser[0][4] + "!")
welcomeUserPosition = wx.Point(560, 50)
self.welcomeUser.SetPosition(welcomeUserPosition)
self.logoutBtn = wx.Button(
self, label='Logout', pos=(500, 130), size=(200, 26))
# Binding the logoutBtn to the onLogout function
self.logoutBtn.Bind(wx.EVT_BUTTON, self.onLogout)
# Setting fonts
self.logoutBtn.SetFont(self.font)
self.userDetailsTxt.SetFont(self.font)
# Enabling tabs and setting the Login tab to be Logout along with image change
notebook.EnableTab(1, True)
notebook.EnableTab(2, True)
notebook.SetPageText(0, 'Logout')
notebook.SetPageImage(0, 4)
# QR Code is generated
self.qrUser(event)
# We refresh the list of visits in the visits tab by simulating an event
evt = wx.FocusEvent(
wx.EVT_LIST_ITEM_MIDDLE_CLICK.evtType[0], dateList.GetId())
wx.PostEvent(dateList.GetEventHandler(), evt)
else:
self.fail_login.SetLabel("Failed Login")
else:
self.fail_login.SetLabel("Failed Login")
def onRegister(self, event):
# We destroy the objects that are not needed for the register display
self.loginBtn.Destroy()
self.registerBtn.Destroy()
self.not_regi_text.Destroy()
self.fail_login.Destroy()
self.scanQrBtn.Destroy()
# We set labels or create if necessary the required objects
try:
self.welcomeUser.SetLabel(
"Please fill in your registration details.")
self.welcomeUser.SetPosition(wx.Point(370, 100))
self.firstNameTxt.SetLabel("First name:")
self.lastNameTxt.SetLabel("Last name:")
self.sexTxt.SetLabel("Sex:")
self.ageTxt.SetLabel("Age:")
self.emailTxt.SetLabel("Email:")
self.phoneNumberTxt.SetLabel("Phone Number:")
except:
self.firstNameTxt = wx.StaticText(
self, label="First name:", pos=(370, 241), size=(100, 26))
self.lastNameTxt = wx.StaticText(
self, label="Last name:", pos=(370, 281), size=(100, 26))
self.sexTxt = wx.StaticText(
self, label="Sex:", pos=(370, 321), size=(100, 26))
self.ageTxt = wx.StaticText(
self, label="Age:", pos=(370, 361), size=(100, 26))
self.emailTxt = wx.StaticText(
self, label="Email:", pos=(370, 401), size=(100, 26))
self.phoneNumberTxt = wx.StaticText(
self, label="Phone number:", pos=(370, 441), size=(100, 26))
self.welcomeUser.SetFont(self.font)
self.firstNameTxt.SetFont(self.font)
self.lastNameTxt.SetFont(self.font)
self.sexTxt.SetFont(self.font)
self.ageTxt.SetFont(self.font)
self.emailTxt.SetFont(self.font)
self.phoneNumberTxt.SetFont(self.font)
# Setting the position of the controls
self.usernameCtrl.SetPosition(wx.Point(550, 160))
self.passwordCtrl.SetPosition(wx.Point(550, 201))
# Making new TextCtrls for the view
self.firstNameCtrl = wx.TextCtrl(self, pos=(550, 241), size=(200, 26))
self.lastNameCtrl = wx.TextCtrl(self, pos=(550, 281), size=(200, 26))
self.sexCombo = wx.ComboBox(self, pos=(550, 321), size=(
200, 26), style=(wx.CB_READONLY), choices=['Male', 'Female'])
self.ageCtrl = wx.SpinCtrl(self, pos=(550, 355), size=(200, 35), style=(wx.CB_READONLY))
self.emailCtrl = wx.TextCtrl(self, pos=(550, 401), size=(200, 26))
self.phoneNumberCtrl = wx.TextCtrl(
self, pos=(550, 441), size=(200, 26))
self.phoneNumberCtrl.Bind(wx.EVT_CHAR, self.onAllowPhone)
# Making new Buttons for the view
self.acceptRegisterBtn = wx.Button(
self, label='Register', pos=(550, 481), size=(200, 26))
self.acceptRegisterBtn.Bind(wx.EVT_BUTTON, self.acceptRegister)
self.backBtn = wx.Button(
self, label='Back', pos=(550, 521), size=(200, 26))
# Setting fonts
self.backBtn.SetFont(self.font)
self.acceptRegisterBtn.SetFont(self.font)
# Binding the backBtn button
self.backBtn.Bind(wx.EVT_BUTTON, self.onBackBtn)
def onEmailcheck(self, input): #return True if the email passes the regex check otherwise False
if(re.fullmatch(regex, input)):
return True
else:
return False
def acceptRegister(self, event):
# Checking if the inputs are empty
inputValues = [self.usernameCtrl.GetValue(), self.passwordCtrl.GetValue(), self.firstNameCtrl.GetValue(
), self.lastNameCtrl.GetValue(), self.sexCombo.GetValue(), self.ageCtrl.GetValue(), self.emailCtrl.GetValue(), self.phoneNumberCtrl.GetValue()]
isEmpty = False
for x in inputValues:
if len(str(x)) < 1:
isEmpty = True
if not isEmpty: #if all the fields are written
try: #check if the phone number is a valid
carrier._is_mobile(number_type(phonenumbers.parse(self.phoneNumberCtrl.GetValue())))
#check if the email is valid
if self.onEmailcheck(self.emailCtrl.GetValue()):
#check if the username already exsist
QUERY = "SELECT username FROM users WHERE username = '" + \
self.usernameCtrl.GetValue() + "'"
c.execute(QUERY)
isRegistered = c.fetchall()
if not isRegistered:
smtp_server = "smtp.gmail.com" #server domain
senderEmail = "YourEmail@gmail.com" #the email of the club
receiverEmail = self.emailCtrl.GetValue() #we get the users email
password = "Your password" #hardcoded password of the email of the club
randomActivCode = self.randomCode(4) #generate a code for the receiver
self.code = randomActivCode
#The message for the reciver
message = MIMEText("Hello, " + self.firstNameCtrl.GetValue()+ " " + self.lastNameCtrl.GetValue() + '\n\n'+ "Welcome to Sport Mania Club" + "\n" + "Your code is " + "[" + str(randomActivCode) + "]"
+ "\n" + "Please insert your code in the newly appeared text box and press on Check!" + "\n\n" + "We wish you a good day." +"\n" + "NOTE: (If you didn't register, please disregard this message)" + "\n\n" + "Kind regards,"
+ "\n" + "Sport Mania Club.")
message['Subject'] = "Validation Code from Sport Mania"
message['From'] = "sportmclub@gmail.com"
message['To'] = receiverEmail
serverContext = ssl.create_default_context() #Create a new context with secure default settings
with smtplib.SMTP_SSL(smtp_server, server_port, context=serverContext) as access :
access.login(senderEmail, password) #login using the club email
access.sendmail(senderEmail, receiverEmail, message.as_string()) # send the msg to the user on the email
self.welcomeUser.SetLabel("Please insert the code from your email inbox")
self.onValidation(None) #we call the validation function
else:
self.welcomeUser.SetLabel(
"User is already registered.")
else:
self.welcomeUser.SetLabel(
"Email is not valid.")
except:
self.welcomeUser.SetLabel(
"Phone number is not valid.")
else:
self.welcomeUser.SetLabel(
"Please fill in all fields.")
def onBackBtn(self, event):
# Destroys the objects not needed for the view
self.firstNameCtrl.Destroy()
self.lastNameCtrl.Destroy()
self.sexCombo.Destroy()
self.ageCtrl.Destroy()
self.emailCtrl.Destroy()
self.phoneNumberCtrl.Destroy()
self.acceptRegisterBtn.Destroy()
self.welcomeUser.Destroy()
self.firstNameTxt.Destroy()
self.lastNameTxt.Destroy()
self.sexTxt.Destroy()
self.ageTxt.Destroy()
self.emailTxt.Destroy()
self.phoneNumberTxt.Destroy()
self.backBtn.Destroy()
try:
self.checkBtn.Destroy()
self.checkCtrl.Destroy()
except:
pass
# Sets position of controls
self.usernameCtrl.SetPosition(wx.Point(500, 160))
self.passwordCtrl.SetPosition(wx.Point(500, 200))
# Recreates the login and register buttons
self.loginBtn = wx.Button(
self, label='Login', pos=(500, 240), size=(200, 26))
self.registerBtn = wx.Button(
self, label='Register', pos=(500, 350), size=(200, 26))
self.scanQrBtn = wx.Button(
self, label='Scan QR', pos=(500, 400), size=(200, 26))
# Recreates the static texts for the current view
self.not_regi_text = wx.StaticText(
self, label="Not registered?", pos=(513, 310), size=(200, 26))
self.fail_login = wx.StaticText(
self, label="", pos=(520, 100), size=(200, 26))
self.welcomeUser = wx.StaticText(
self, label="", pos=(440, 50), size=(200, 26))
self.firstNameTxt = wx.StaticText(
self, label="", pos=(370, 241), size=(100, 26))
self.lastNameTxt = wx.StaticText(
self, label="", pos=(370, 281), size=(100, 26))
self.sexTxt = wx.StaticText(
self, label="", pos=(370, 321), size=(100, 26))
self.ageTxt = wx.StaticText(
self, label="", pos=(370, 361), size=(100, 26))
self.emailTxt = wx.StaticText(
self, label="", pos=(370, 401), size=(100, 26))
self.phoneNumberTxt = wx.StaticText(
self, label="", pos=(370, 441), size=(100, 26))
# Sets fonts
self.not_regi_text.SetFont(self.font)
self.fail_login.SetFont(self.font)
self.welcomeUser.SetFont(self.font)
self.firstNameTxt.SetFont(self.font)
self.lastNameTxt.SetFont(self.font)
self.sexTxt.SetFont(self.font)
self.ageTxt.SetFont(self.font)
self.emailTxt.SetFont(self.font)
self.phoneNumberTxt.SetFont(self.font)
self.loginBtn.SetFont(self.font)
self.registerBtn.SetFont(self.font)
self.scanQrBtn.SetFont(self.font)
# Binding buttons
self.registerBtn.Bind(wx.EVT_BUTTON, self.onRegister)
self.loginBtn.Bind(wx.EVT_BUTTON, self.onLogin)
self.scanQrBtn.Bind(wx.EVT_BUTTON, self.onScanQR)
def onLogout(self, event):
#if the AdminPanel exists, delete it
if notebook.GetPageText(4)=="AdminPanel":
notebook.DeletePage(4)
self.welcomeUser.SetLabel("")
# Sets Buttons for the current view
self.loginBtn = wx.Button(
self, label='Login', pos=(500, 240), size=(200, 26))
self.registerBtn = wx.Button(
self, label='Register', pos=(500, 350), size=(200, 26))
self.scanQrBtn = wx.Button(
self, label='Scan QR', pos=(500, 400), size=(200, 26))
# Sets StaticTexts for the current view
self.not_regi_text = wx.StaticText(
self, label="Not registered?", pos=(513, 310), size=(200, 26))
self.fail_login = wx.StaticText(
self, label="", pos=(520, 100), size=(200, 26))
self.usernameTxt = wx.StaticText(
self, label="Username:", pos=(370, 161), size=(100, 26))
self.passwordTxt = wx.StaticText(
self, label="Password:", pos=(370, 201), size=(100, 26))
# Sets TextCtrls for the current view
self.usernameCtrl = wx.TextCtrl(self, pos=(500, 160), size=(200, 26))
self.passwordCtrl = wx.TextCtrl(self, pos=(
500, 200), style=(wx.TE_PASSWORD), size=(200, 26))
# Sets fonts
self.usernameTxt.SetFont(self.font)
self.passwordTxt.SetFont(self.font)
self.not_regi_text.SetFont(self.font)
self.fail_login.SetFont(self.font)
self.welcomeUser.SetFont(self.font)
self.loginBtn.SetFont(self.font)
self.registerBtn.SetFont(self.font)
self.scanQrBtn.SetFont(self.font)
# Destroys the logoutBtn
self.logoutBtn.Destroy()
# Binds the Buttons
self.registerBtn.Bind(wx.EVT_BUTTON, self.onRegister)
self.loginBtn.Bind(wx.EVT_BUTTON, self.onLogin)
self.scanQrBtn.Bind(wx.EVT_BUTTON, self.onScanQR)
# Sets the currentUser to be empty
global currentUser
currentUser = []
# Disables the other tabs after logging out
notebook.EnableTab(1, False)
notebook.EnableTab(2, False)
notebook.EnableTab(3, False)
# Destroys the QR Code and StaticText for the first and lastname
self.png.Destroy()
self.userDetailsTxt.Destroy()
# Changes the tab text to Login and returns the login image
notebook.SetPageText(0, 'Login')
notebook.SetPageImage(0, 0)
# Class that contains the logic for buying a membership, checkin in and out and displaying the membership history
class Memberships(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent)
# STATICTEXT-START
self.buyTxt = wx.StaticText(
self, label="Please choose the type of the Membership:", pos=(10, 10), size=(500, 26))
self.dateTxt = wx.StaticText(
self, label="Select start date:", pos=(10, 60), size=(230, 26))
self.memTypeTxt = wx.StaticText(
self, label="Select Membership:", pos=(10, 120), size=(230, 26))
self.monthCountTxt = wx.StaticText(
self, label="Number of Months:", pos=(10, 180), size=(230, 26))
self.priceTxt = wx.StaticText(
self, label="Total Price($): 8", pos=(10, 240), size=(230, 26))
self.checkInTxt = wx.StaticText(
self, label="Already a Client? Check IN!", pos=(10, 360), size=(400, 26))
self.checkOutTxt = wx.StaticText(
self, label="Check OUT after each visit!", pos=(10, 480), size=(400, 26))
self.weightStartTxt = wx.StaticText(
self, label="Weight:", pos=(10, 400), size=(100, 26))
self.weightEndTxt = wx.StaticText(
self, label="Weight:", pos=(10, 520), size=(100, 26))
self.failBuyTxt = wx.StaticText(
self, label="", pos=(200, 300), size=(100, 26))
# STATICTEXT-END
# TEXTCTRL-START
self.weightStartCtrl = wx.TextCtrl(self, pos=(10, 440), size=(100, 26))
self.weightEndCtrl = wx.TextCtrl(self, pos=(10, 560), size=(100, 26))
# TEXTCTRL-END
# DATEPICKERCTRL-START
self.datepickerStart = wx.adv.DatePickerCtrl(
self, pos=(250, 60), size=(200, 26), style=wx.adv.DP_DROPDOWN)
# Sets the range of the datepicker so the user can't choose a date that is before today
self.datepickerStart.SetRange(
datetime.date(datetime.now()), datetime.max)
# DATEPICKERCTRL-END
# COMBOBOX-START
self.MonthsChoice = wx.SpinCtrl(self, pos=(250, 176), size=(200, 35), style=(wx.CB_READONLY))
self.MonthsChoice.SetRange(1, 11)
self.MembershipType = wx.ComboBox(self, pos=(250, 120), style=(wx.CB_READONLY), size=(
200, 26), choices=['One time use', 'Monthly', 'Yearly'])
# Setting the choices to be the first of every list
self.MembershipType.SetSelection(0)
# Disabling MonthsChoice since first selection is One time use
self.MonthsChoice.Disable()
# COMBOBOX-START
# LISTCTRL-START
self.userHist = wx.ListCtrl(self, pos=(650, 20), size=(
600, 400), style=wx.LC_REPORT | wx.BORDER_SUNKEN)
# Inserting the columns for the ListCtrl
self.userHist.InsertColumn(0, 'Type', width=100)
self.userHist.InsertColumn(1, 'Activation Date', width=100)
self.userHist.InsertColumn(2, 'End Date', width=100)
self.userHist.InsertColumn(3, 'Purchase Date', width=150)
self.userHist.InsertColumn(4, 'Valid')
self.userHist.InsertColumn(5, 'Price')
# LISTCTRL-END
# BUTTONS-START
self.buyBtn = wx.Button(
self, label='Buy', pos=(10, 300), size=(130, 30))
self.checkInBtn = wx.Button(
self, label='Check IN', pos=(150, 440), size=(130, 30))
self.checkOutBtn = wx.Button(
self, label='Check OUT', pos=(150, 560), size=(130, 30))
# BUTTONS-END
# FONTS-START
self.font = wx.Font(15, family=wx.FONTFAMILY_MODERN, style=0, weight=70,
underline=False, faceName="", encoding=wx.FONTENCODING_DEFAULT)
self.font.SetWeight(wx.BOLD)
self.buyBtn.SetForegroundColour(wx.Colour(204, 127, 50))
self.checkInBtn.SetForegroundColour(wx.Colour(0, 255, 0))
self.checkOutBtn.SetForegroundColour(wx.Colour(255, 0, 0))
self.buyTxt.SetFont(self.font)
self.priceTxt.SetFont(self.font)
self.dateTxt.SetFont(self.font)
self.memTypeTxt.SetFont(self.font)
self.monthCountTxt.SetFont(self.font)
self.buyBtn.SetFont(self.font)
self.checkInTxt.SetFont(self.font)
self.checkOutTxt.SetFont(self.font)
self.checkInBtn.SetFont(self.font)
self.checkOutBtn.SetFont(self.font)
self.weightStartTxt.SetFont(self.font)
self.weightEndTxt.SetFont(self.font)
self.failBuyTxt.SetFont(self.font)
# FONTS-END
# BINDING-START
self.buyBtn.Bind(wx.EVT_BUTTON, self.onBuy)
self.checkInBtn.Bind(wx.EVT_BUTTON, self.onCheckIn)
self.checkOutBtn.Bind(wx.EVT_BUTTON, self.onCheckOut)
self.MembershipType.Bind(wx.EVT_TEXT, self.onMemberType)
self.MonthsChoice.Bind(wx.EVT_TEXT, self.onPriceChange)
# Refreshes the membership history list when the tab changes
notebook.Bind(aui.EVT_AUINOTEBOOK_PAGE_CHANGED, self.onHistory)
# BINDING-END
# Sets default, default monthCount and checks if the memberships are valid
self.price = 8
self.monthCount = int(self.MonthsChoice.GetValue())
self.checkValid(self)
# Disabling objects that are not needed yet
self.checkOutBtn.Disable()
self.weightEndCtrl.Disable()
self.datepickerStart.Disable()
def checkValid(self, event):
# Query for getting the id and end dates from the memberships
QUERY = "SELECT mem_end, mem_id FROM users_memberships"
c.execute(QUERY)
resultDatesQuery = c.fetchall()
# The time the user logged in
timeLogIn = datetime.now()
validDates = []
for x in resultDatesQuery:
# Checks if the end date in the query list is passed, then appends the ids to a list
# Since the database returns dates at midnight and datetime.now returns with hours and minutes, we add a day to the end date for correct calculation
if (datetime.strptime(x[0], "%Y-%m-%d") + relativedelta(days=1)) < timeLogIn:
validDates.append(x[1])
validDates = str(validDates)
validDates = validDates.replace('[', '(')
validDates = validDates.replace(']', ')')
# Updates the database so the memberships that have passed end dates are set to not valid
updateQuery = "Update users_memberships set is_valid = 0 where mem_id IN" + validDates
c.execute(updateQuery)
conn.commit()
def onCheckIn(self, event):
initialWeight = self.weightStartCtrl.GetValue()
# Query to check if there is a valid membership for the current user
QUERY = "SELECT is_valid FROM users_memberships WHERE is_valid = 1 AND users_id = " + \
str(currentUser[0][0])
c.execute(QUERY)
validMem = c.fetchall()
# If there is a valid membership the check in continues, otherwise we don't do anything
if validMem:
timeCheckIn = datetime.now()
timeCheckIn = str(timeCheckIn).split('.')
timeCheckIn = timeCheckIn[0]
# Inserts into the database the details except the visits end and ending weight
c.execute("INSERT INTO USERS_VISITS VALUES (?, ?, ?, ?, ?, ?)",
(None, currentUser[0][0], timeCheckIn, "", str(initialWeight), ""))
conn.commit()
# We get the id of the last inserted visit and set it to the global currentVisitId
QUERY = "SELECT last_insert_rowid()"
c.execute(QUERY)
visitIdQuery = c.fetchall()
global currentVisitId
currentVisitId = visitIdQuery[0][0]
# Disabling and enabling the necessary objects
self.checkInBtn.Disable()
self.weightStartCtrl.Disable()
self.checkOutBtn.Enable()
self.weightEndCtrl.Enable()
notebook.EnableTab(3, True)
notebook.EnableTab(0, False)
self.failBuyTxt.SetLabel("")
else:
pass
def onCheckOut(self, event):
endingWeight = self.weightEndCtrl.GetValue()
try:
# Query that updates the database where the mem_type is One time use to make it not valid
updateValid = """Update users_memberships set is_valid = ? WHERE mem_type = ? AND users_id = ?"""
updateValidData = (0, 'One time use', str(currentUser[0][0]))
c.execute(updateValid, updateValidData)
conn.commit()
# Gets the time of the check out
timeCheckOut = datetime.now()
timeCheckOut = str(timeCheckOut).split('.')
timeCheckOut = timeCheckOut[0]
# Updates the users_visits table to set the end date and end weight
global currentVisitId
updateQuery = """Update USERS_VISITS set visits_end = ?, weight_end = ? where visits_id = ?"""
updateData = (timeCheckOut, endingWeight, currentVisitId)
c.execute(updateQuery, updateData)
conn.commit()
# Empties the currentVisitId
currentVisitId = None
# Enabling and disabling the required objects
self.checkInBtn.Enable()
self.weightStartCtrl.Enable()
notebook.EnableTab(3, False)
self.checkOutBtn.Disable()
self.weightEndCtrl.Disable()
# Deleting the exercises that were in the exercise tab
exerHist.DeleteAllItems()
# Updating the membership history
self.onHistory(event)
# Updating the visit list in the visit tab
evt = wx.FocusEvent(
wx.EVT_LIST_ITEM_MIDDLE_CLICK.evtType[0], dateList.GetId())
wx.PostEvent(dateList.GetEventHandler(), evt)
# Enabling the logout tab
notebook.EnableTab(0, True)
self.failBuyTxt.SetLabel("")
# If the check out is caused by closing the program we exit
if event.GetEventType() == wx.EVT_CLOSE.typeId:
wx.Exit()
except:
# If the check out is caused by closing the program we exit
if event.GetEventType() == wx.EVT_CLOSE.typeId:
wx.Exit()
def onBuy(self, event):
# Checks if there is a valid membership
QUERY = "SELECT is_valid FROM users_memberships WHERE is_valid = 1 AND users_id = " + \
str(currentUser[0][0])
c.execute(QUERY)
validMem = c.fetchall()
# If there is no valid one we can proceed with buying
if not validMem:
# Since the datepicker returns the months like 2021-1-21 we need to add a 0 to the months for proper formatting
resultDate = ''
if (self.datepickerStart.GetValue().GetMonth() + 1) < 10:
resultDate = '0' + \
str(self.datepickerStart.GetValue().GetMonth() + 1)
else:
resultDate = str(
self.datepickerStart.GetValue().GetMonth() + 1)
datepickerValue = str(self.datepickerStart.GetValue().GetYear(
)) + "-" + resultDate + "-" + str(self.datepickerStart.GetValue().GetDay())
# Turning the resulting string to datetime
dateStartObj = datetime.strptime(datepickerValue, '%Y-%m-%d')
# Getting the membership type
memberType = self.MembershipType.GetSelection()
self.MembershipType.SetSelection(memberType)
# Adds to the selected amount of months to calculate end date of monthly type
dateAfterMonthEnd = (
dateStartObj + relativedelta(months=self.monthCount))
dateAfterMonthEnd = str(dateAfterMonthEnd).split(' ')
dateAfterMonthEnd = dateAfterMonthEnd[0]
# Adds a year to the end date of yearly type
dateAfterYear = (dateStartObj + relativedelta(months=12))
dateAfterYear = str(dateAfterYear).split(' ')
dateAfterYear = dateAfterYear[0]
# Converts the start date to string so we can input into database
dateStartObj = str(dateStartObj).split(' ')
dateStartObj = dateStartObj[0]
# Gets the date of purchase along with hours, minutes and seconds
dateOfPurchase = datetime.now()
dateAfterDay = (dateOfPurchase + relativedelta(days=1))
dateOfPurchase = str(dateOfPurchase).split('.')
dateOfPurchase = dateOfPurchase[0]
# Sets the one time use start date to be today
dateOfOneTimeUse = dateOfPurchase.split(' ')
dateOfOneTimeUse = dateOfOneTimeUse[0]
# Sets the one time use to be valid for one day only
dateAfterDay = str(dateAfterDay).split(' ')
dateAfterDay = dateAfterDay[0]
try:
# Inserts into the database the required fields depending on the membership type
if self.MembershipType.GetSelection() == 1: # monthly
c.execute("INSERT INTO USERS_MEMBERSHIPS VALUES (?, ?, ?, ?, ?, ?, ?, ?)",
(None, currentUser[0][0], dateStartObj, dateAfterMonthEnd, dateOfPurchase, 'Monthly', self.price, 1))
conn.commit()
elif self.MembershipType.GetSelection() == 2: # yearly
c.execute("INSERT INTO USERS_MEMBERSHIPS VALUES (?, ?, ?, ?, ?, ?, ?, ?)",
(None, currentUser[0][0], dateStartObj, dateAfterYear, dateOfPurchase, 'Yearly', self.price, 1))
conn.commit()
elif self.MembershipType.GetSelection() == 0: # one time use
c.execute("INSERT INTO USERS_MEMBERSHIPS VALUES (?, ?, ?, ?, ?, ?, ?, ?)",
(None, currentUser[0][0], dateOfOneTimeUse, dateAfterDay, dateOfPurchase, 'One time use', self.price, 1))
conn.commit()
# Refreshes the membership history list
self.onHistory(event)
self.failBuyTxt.SetLabel("")
except:
pass
else:
self.failBuyTxt.SetLabel("Already with valid Membership!")
pass
def onHistory(self, event):
try:
# Clears the membership list
self.userHist.DeleteAllItems()
# Selects the memberships for the current user
QUERY = "SELECT mem_type, mem_start, mem_end, purchase_date, is_valid, money_paid FROM users_memberships WHERE users_id = " + \
str(currentUser[0][0])
c.execute(QUERY)
histQuery = c.fetchall()
index = 0
for x in histQuery:
# Inserts into the membership list the results from the query
self.userHist.InsertItem(index, x[0])
self.userHist.SetItem(index, 1, x[1])
self.userHist.SetItem(index, 2, x[2])
self.userHist.SetItem(index, 3, x[3])
self.userHist.SetItem(index, 4, str(x[4]))
self.userHist.SetItem(index, 5, str(x[5]))
index += 1
except:
pass
def onPriceChange(self, event):
# Calculates the price depending on the membership type and month count when you change the month count
self.monthCount = int(self.MonthsChoice.GetValue())
if self.MembershipType.GetSelection() == 1: # monthly
self.price = 34 * self.monthCount
elif self.MembershipType.GetSelection() == 2: # yearly
self.price = 377
elif self.MembershipType.GetSelection() == 0: # one time use
self.price = 8
self.priceTxt.SetLabel("Total Price($): " + str(self.price))
def onMemberType(self, event):
# Calculates the price depending on the membership type and month count when you change the membership type
if self.MembershipType.GetSelection() == 1:
self.MonthsChoice.Enable()
self.monthCount = int(self.MonthsChoice.GetValue())
self.price = 34 * self.monthCount
self.priceTxt.SetLabel("Total Price($): " + str(self.price))
self.datepickerStart.Enable()
else:
self.MonthsChoice.Disable()
if self.MembershipType.GetSelection() == 2: # yearly
self.price = 377
self.datepickerStart.Enable()
elif self.MembershipType.GetSelection() == 0: # one time use
self.price = 8
self.datepickerStart.Disable()
self.priceTxt.SetLabel("Total Price($):" + str(self.price))
# Class that contains logic for displaying the visits history along with the exercises of each visit
class Visits(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent)
# LISTCTRL-START
# List for displaying the visits
global dateList
dateList = wx.ListCtrl(self, pos=(60, 26), size=(
500, 500), style=wx.LC_REPORT | wx.BORDER_SUNKEN)
dateList.InsertColumn(0, 'Start Date', width=150)
dateList.InsertColumn(1, 'End Date', width=150)
dateList.InsertColumn(2, 'Start Weight', width=100)
dateList.InsertColumn(3, 'End Weight', width=100)
# List for displaying the exercises on each visit
self.exercisesList = wx.ListCtrl(self, pos=(700, 26), size=(
500, 500), style=wx.LC_REPORT | wx.BORDER_SUNKEN)
self.exercisesList.InsertColumn(0, 'Exercises', width=150)
self.exercisesList.InsertColumn(1, 'Type', width=150)
self.exercisesList.InsertColumn(2, 'Sets', width=50)
self.exercisesList.InsertColumn(3, 'Reps', width=50)
self.exercisesList.InsertColumn(4, 'Minutes', width=100)
# LISTCTRL-END
# BINDING-START
dateList.Bind(wx.EVT_LIST_ITEM_SELECTED, self.onExercisesHist)
dateList.Bind(wx.EVT_LIST_ITEM_MIDDLE_CLICK, self.onDatesHist)
# BINDING-END
def onDatesHist(self, event):
# Clears the list before adding new items
dateList.DeleteAllItems()
# Selects only the visits that have ended(the current one won't be counted until it's checked out)
QUERY = "Select visits_id, visits_start, visits_end, weight_start, weight_end from USERS_VISITS WHERE visits_end != '' AND users_id = " + \
str(currentUser[0][0])
c.execute(QUERY)
wayInDates = c.fetchall()
# Insert the results of the query into the visit list
global visitsIds
visitsIds = []
index = 0
for x in wayInDates:
dateList.InsertItem(index, x[1])
dateList.SetItem(index, 1, x[2])
dateList.SetItem(index, 2, x[3])
dateList.SetItem(index, 3, x[4])
index += 1
# Inserts into global all the visit ids
visitsIds.append(x[0])
def onExercisesHist(self, event):
try:
# We clear the exercises list
self.exercisesList.DeleteAllItems()
# We get the visit that was clicked
clickedItem = dateList.GetFocusedItem()
# We select the exercises that are with the visit id of the clicked item
QUERY = "SELECT category, exercise, sets, reps, mins FROM users_training WHERE visits_id =" + \
str(visitsIds[clickedItem])
c.execute(QUERY)
histQuery = c.fetchall()
index = 0
# We insert into the exercises list the results of the query
for x in histQuery:
self.exercisesList.InsertItem(index, x[0])
self.exercisesList.SetItem(index, 1, x[1])
self.exercisesList.SetItem(index, 2, x[2])
self.exercisesList.SetItem(index, 3, x[3])
self.exercisesList.SetItem(index, 4, x[4])
index += 1
except:
pass
# Class that contains logic for saving exercises, along with removing and clearing them
class Exercises(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent)
#Select exercise names from exercises table, and edit them to put into ComboBox as list of strings
c.execute('SELECT DISTINCT exercise_name FROM exercises')
self.exercise_choices = []
self.exercise_choices.append(str(c.fetchone()))
i = 0
while self.exercise_choices[i] != 'None':
self.exercise_choices.append(str(c.fetchone()))
size = len(self.exercise_choices[i])
self.exercise_choices[i] = self.exercise_choices[i][2:size-3]
i+=1
self.exercise_choices.pop()
# COMBOBOX-START
self.exerciseChoices = wx.ComboBox(self, pos=(250, 60), size=(200, 26), style=(wx.CB_READONLY), choices=self.exercise_choices)
self.variantChoice = wx.ComboBox(self, pos=(
250, 130), size=(200, 26), style=(wx.CB_READONLY), choices=[])
# COMBOBOX-END
self.alternativeExercise = wx.TextCtrl(self, pos=(250, 60), size=(200, 26))
self.alternativeType = wx.TextCtrl(self, pos=(250, 130), size=(200, 26))
self.alternativeExercise.Hide()
self.alternativeType.Hide()
self.alternative_exercise_flag = False
self.ImageToBit = wx.Image('images/default_image.jpeg', wx.BITMAP_TYPE_ANY).ConvertToBitmap()
self.MainPicture = wx.StaticBitmap(self, -1, self.ImageToBit, (700, 50), (self.ImageToBit.GetWidth(), self.ImageToBit.GetHeight()))
self.workoutBox = wx.StaticBox(self, label='Workout Exercises', pos=(5, 420), size=(1272, 200))
# BUTTONS-START
self.saveEx = wx.Button(
self, label='Save', pos=(10, 450), size=(150, 26))
self.altEx = wx.Button(
self, label='Alternative Exercise', pos=(5, 385), size=(200, 26))
self.removeEx = wx.Button(
self, label='Remove', pos=(10, 516), size=(150, 26))
self.clearEx = wx.Button(
self, label='Clear', pos=(10, 582), size=(150, 26))
# BUTTONS-END
# LISTCTRL-START
global exerHist
exerHist = wx.ListCtrl(self, pos=(170, 450), size=(1070, 160), style=wx.LC_REPORT | wx.BORDER_SUNKEN)
exerHist.InsertColumn(0, 'Exercises', width=150)
exerHist.InsertColumn(1, 'Type', width=150)
exerHist.InsertColumn(2, 'Sets', width=50)
exerHist.InsertColumn(3, 'Reps', width=50)
exerHist.InsertColumn(4, 'Minutes', width=100)
# LISTCTRL-END
# STATICTEXT-START
self.subjTxt = wx.StaticText(
self, label="Please enter you Exercises:", pos=(5, 1), size=(500, 26))
self.catagoryTxt = wx.StaticText(
self, label="Choose a Category:", pos=(5, 60), size=(230, 26))
self.exercisesTxt = wx.StaticText(
self, label="Choose a Exercises:", pos=(5, 130), size=(230, 26))
self.setsTxt = wx.StaticText(
self, label="Number of Sets:", pos=(5, 200), size=(180, 26))
self.repsTxt = wx.StaticText(
self, label="Number of Reps:", pos=(5, 270), size=(180, 26))
self.minsTxt = wx.StaticText(
self, label="Time (Mins):", pos=(5, 340), size=(150, 26))
# STATICTEXT-END
# TEXTCTRL-START
self.inputSets = wx.SpinCtrl(self, pos=(250, 196), size=(200, 35), style=(wx.CB_READONLY))
self.inputReps = wx.SpinCtrl(self, pos=(250, 266), size=(200, 35), style=(wx.CB_READONLY))
self.inputMins = wx.SpinCtrl(self, pos=(250, 337), size=(200, 35), style=(wx.CB_READONLY))
# TEXTCTRL-END
# FONTS-START
self.font = wx.Font(15, family=wx.FONTFAMILY_MODERN, style=0, weight=70,
underline=False, faceName="", encoding=wx.FONTENCODING_DEFAULT)
self.font.SetWeight(wx.BOLD)
self.subjTxt.SetFont(self.font)
self.catagoryTxt.SetFont(self.font)
self.exercisesTxt.SetFont(self.font)
self.setsTxt.SetFont(self.font)
self.repsTxt.SetFont(self.font)
self.minsTxt.SetFont(self.font)
self.saveEx.SetFont(self.font)
self.removeEx.SetFont(self.font)
self.clearEx.SetFont(self.font)
# FONTS-START
# DISABLE-START
self.inputReps.Disable()
self.inputSets.Disable()
self.inputMins.Disable()
self.variantChoice.Disable()
# DISABLE-END
# COLOUR-START
self.saveEx.SetForegroundColour(wx.Colour(0, 255, 0))
self.removeEx.SetForegroundColour(wx.Colour(255, 0, 0))
self.clearEx.SetForegroundColour(wx.Colour(255, 0, 0))
# COLOUR-END
# BINDING-START
self.exerciseChoices.Bind(wx.EVT_COMBOBOX, self.onExercises)
self.variantChoice.Bind(wx.EVT_TEXT, self.onVariant)
self.saveEx.Bind(wx.EVT_BUTTON, self.onSave)
self.removeEx.Bind(wx.EVT_BUTTON, self.onRemove)
self.clearEx.Bind(wx.EVT_BUTTON, self.onClearEx)
self.altEx.Bind(wx.EVT_BUTTON, self.onAlternativeEx)
# BINDING-END
def onClearEx(self, event):
# Deletes from the database the exercises for the current visit id
QUERY = "DELETE FROM users_training WHERE visits_id = " + \
str(currentVisitId)
c.execute(QUERY)
conn.commit()
# Clears the exercises list and global currentExercises
exerHist.DeleteAllItems()
currentExercises.clear()
def onRemove(self, event):
try:
# Gets the selected item from and deletes it from the list
selecteditem = exerHist.GetFocusedItem()
exerHist.DeleteItem(selecteditem)
# Deletes the exercise with the training id of the selected item
QUERY = "DELETE FROM users_training WHERE training_id = " + \
str(currentExercises[selecteditem])
currentExercises.pop(selecteditem)
c.execute(QUERY)
except:
pass
def onHistExercises(self, event):
try:
# Clears the exercises list
exerHist.DeleteAllItems()
# Selects the exercises for the current visit
QUERY = "SELECT category, exercise, sets, reps, mins FROM users_training WHERE visits_id = " + \
str(currentVisitId)
c.execute(QUERY)
histQuery = c.fetchall()
index = 0
# Inserts into the list the result of the query
for x in histQuery:
exerHist.InsertItem(index, x[0])
exerHist.SetItem(index, 1, x[1])
exerHist.SetItem(index, 2, x[2])
exerHist.SetItem(index, 3, x[3])
exerHist.SetItem(index, 4, x[4])
index += 1
except:
pass
def onSave(self, event):
self.alternativeExercise.Hide()
self.alternativeType.Hide()
self.exerciseChoices.Show()
self.variantChoice.Show()
if not self.alternative_exercise_flag:
if self.exerciseChoices.GetValue() != '':
try:
# Inserts into the database the selected training
c.execute("INSERT INTO users_training VALUES (?, ?, ?, ?, ?, ?, ?, ?)",
(None, currentVisitId, currentUser[0][0], self.exerciseChoices.GetValue(), self.variantChoice.GetValue(), self.inputSets.GetValue(), self.inputReps.GetValue(), self.inputMins.GetValue()))
conn.commit()
# Refreshes the exercises list
self.onHistExercises(event)
# Appends into the global currentExercises the id of the last inserted row
QUERY = "SELECT last_insert_rowid()"
c.execute(QUERY)
exercisesQuery = c.fetchall()
global currentExercises
currentExercises.append(exercisesQuery[0][0])
except:
pass
else:
pass
else:
alternative_exercise = str(self.alternativeExercise.GetValue())
alternative_type = str(self.alternativeType.GetValue())
if alternative_exercise and alternative_type:
try:
# Inserts into the database the selected training
c.execute("INSERT INTO users_training VALUES (?, ?, ?, ?, ?, ?, ?, ?)",
(None, currentVisitId, currentUser[0][0], alternative_exercise, alternative_type, self.inputSets.GetValue(), self.inputReps.GetValue(), self.inputMins.GetValue()))
conn.commit()
# Refreshes the exercises list
self.onHistExercises(event)
# Appends into the global currentExercises the id of the last inserted row
QUERY = "SELECT last_insert_rowid()"
c.execute(QUERY)
exercisesQuery = c.fetchall()
#global currentExercises
currentExercises.append(exercisesQuery[0][0])
except:
pass
else:
pass
self.alternative_exercise_flag = False
def onExercises(self, event):
exerciseChoice = self.exerciseChoices.GetSelection()
try:
self.variantChoice.Destroy()
except:
pass
# Creates new variantChoice depending on the selected category
#Select exercise types from exercises table based on exercise name, and edit them to put into ComboBox as list of strings
QUERY = "SELECT DISTINCT exercise_type FROM exercises WHERE exercise_name='"+self.exercise_choices[exerciseChoice]+"'"
c.execute(QUERY)
exercise_types = []
exercise_types.append(str(c.fetchone()))
i = 0
while exercise_types[i] != 'None':
exercise_types.append(str(c.fetchone()))
size = len(exercise_types[i])
exercise_types[i] = exercise_types[i][2:size-3]
i+=1
exercise_types.pop()
self.variantChoice = wx.ComboBox(self, pos=(250, 130), size=(200, 26), style=(wx.CB_READONLY), choices=exercise_types)
self.variantChoice.SetSelection(0)
self.variantChoice.Bind(wx.EVT_TEXT, self.onVariant)
exerciseType = self.variantChoice.GetSelection()
QUERY = "SELECT sets_reps FROM exercises WHERE exercise_name='"+self.exercise_choices[exerciseChoice]+"' AND exercise_type='"+exercise_types[exerciseType]+"'"
c.execute(QUERY)
sr_flag = str(c.fetchone())
sr_flag = sr_flag[1]
self.onVariant(event)
# Disables certain inputs for the cardio machines
if sr_flag == '0':
try:
self.inputMins.Enable()
self.inputSets.SetValue(0)
self.inputReps.SetValue(0)
self.inputReps.Disable()
self.inputSets.Disable()
except:
pass
# Enables inputs
else:
self.inputReps.Enable()
self.inputSets.Enable()
self.inputMins.Enable()
def onAlternativeEx(self, event):
self.alternative_exercise_flag = True
self.exerciseChoices.Hide()
self.variantChoice.Hide()
self.alternativeExercise.Show()
self.alternativeType.Show()
self.inputReps.Enable()
self.inputSets.Enable()
self.inputMins.Enable()
def onVariant(self, event):
exercise_name = self.variantChoice.GetStringSelection()
QUERY = "SELECT exercise_img FROM exercises WHERE exercise_type='"+exercise_name+"'"
c.execute(QUERY)
exercise_img_file = str(c.fetchone())
size = len(exercise_img_file)
exercise_img_file = exercise_img_file[2:size-3]
if exercise_img_file == "None":
try:
self.MainPicture.Destroy()
self.ImageToBit = wx.Image('images/default_image.jpeg', wx.BITMAP_TYPE_ANY).ConvertToBitmap()
self.MainPicture = wx.StaticBitmap(self, -1, self.ImageToBit, (700, 50), (self.ImageToBit.GetWidth(), self.ImageToBit.GetHeight()))
except:
pass
else:
try:
self.MainPicture.Destroy()
self.ImageToBit = wx.Image(exercise_img_file, wx.BITMAP_TYPE_ANY).ConvertToBitmap()
self.MainPicture = wx.StaticBitmap(self, -1, self.ImageToBit, (700, 20), (self.ImageToBit.GetWidth(), self.ImageToBit.GetHeight()))
except:
pass
class AdminPanel(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent)
#Create ListCtrl for exercise database
self.exerciseList = wx.ListCtrl(self, pos=(10,10), size=(1260, 500), style=wx.LC_REPORT | wx.BORDER_SUNKEN)
self.exerciseList.InsertColumn(0, 'Exercise ID', width=100)
self.exerciseList.InsertColumn(1, 'Exercise Name', width=345)
self.exerciseList.InsertColumn(2, 'Exercise Type', width=345)
self.exerciseList.InsertColumn(3, 'Sets&Reps Applicable?', width=170)
self.exerciseList.InsertColumn(4, 'Exercise Image File', width=300)
#Create buttons
self.addexBtn = wx.Button(self, label="Add", pos=(10, 525), size=(100,26))
self.editexBtn = wx.Button(self, label="Edit", pos=(120, 525), size=(100,26))
self.delexBtn = wx.Button(self, label="Delete", pos=(230, 525), size=(100,26))
self.refreshBtn = wx.Button(self, label="Refresh Exercises", pos=(340, 525), size=(150,26))
self.grantBtn = wx.Button(self, label="Grant admin Rights", pos=(810, 525), size=(150,26))
self.msgtxtTxt = wx.StaticText(self, label="", pos=(500,530), size=(300,26))
#Create TextCtrls for input data
self.additionGroup = wx.StaticBox(self, label="Add/Edit/Delete Exercise Parameters", pos=(10, 560), size=(1260, 60))
self.exidCtrl = wx.TextCtrl(self, pos=(20, 585), size=(90,26))
self.exnameCtrl = wx.TextCtrl(self, pos=(115, 585), size=(330,26))
self.extypeCtrl = wx.TextCtrl(self, pos=(450, 585), size=(330,26))
self.setsrepsCtrl = wx.TextCtrl(self, pos=(785, 585), size=(175,26))
self.imageCtrl = wx.TextCtrl(self, pos=(965, 585), size=(295,26))
self.grantCtrl = wx.TextCtrl(self, pos=(970, 525), size=(290,26))
#Fill in the ListCtrl with existing exercises from exercise database
c.execute("SELECT * FROM exercises ORDER BY exercise_name")
exercise_ad = c.fetchall()
index = 0
for x in exercise_ad:
self.exerciseList.InsertItem(index, str(x[0]))
self.exerciseList.SetItem(index, 1, x[1])
self.exerciseList.SetItem(index, 2, x[2])
self.exerciseList.SetItem(index, 3, str(x[3]))
self.exerciseList.SetItem(index, 4, x[4])
index += 1
#BINDING START
self.exerciseList.Bind(wx.EVT_LIST_ITEM_SELECTED, self.adminSelect)
self.addexBtn.Bind(wx.EVT_BUTTON, self.adminAdd)
self.editexBtn.Bind(wx.EVT_BUTTON, self.adminEdit)
self.delexBtn.Bind(wx.EVT_BUTTON, self.adminDelete)
self.refreshBtn.Bind(wx.EVT_BUTTON, self.adminRefresh)
self.grantBtn.Bind(wx.EVT_BUTTON, self.adminGrant)
#BINDING END
def adminSelect(self, event):
#Clear input fields
self.exidCtrl.Clear()
self.exnameCtrl.Clear()
self.extypeCtrl.Clear()
self.setsrepsCtrl.Clear()
self.imageCtrl.Clear()
#Get data from list item
clickedItem = self.exerciseList.GetFocusedItem()
id = self.exerciseList.GetItem(clickedItem, 0)
name = self.exerciseList.GetItem(clickedItem, 1)
type = self.exerciseList.GetItem(clickedItem, 2)
flag = self.exerciseList.GetItem(clickedItem, 3)
imgaddr = self.exerciseList.GetItem(clickedItem, 4)
#Fill in the gotten data into corresponding input fields
self.exidCtrl.write(id.GetText())
self.exnameCtrl.write(name.GetText())
self.extypeCtrl.write(type.GetText())
self.setsrepsCtrl.write(flag.GetText())
self.imageCtrl.write(imgaddr.GetText())
def adminAdd(self, event):
#Clear the message
self.msgtxtTxt.SetLabel("")
try:
#Get values from input fields
id = int(self.exidCtrl.GetValue())
name = str(self.exnameCtrl.GetValue())
type = str(self.extypeCtrl.GetValue())
flag = str(self.setsrepsCtrl.GetValue())
try:
imgaddr = str(self.imageCtrl.GetValue())
except:
imgaddr = 'None'
except:
self.msgtxtTxt.SetLabel("Fields must be filled")
else:
#Check if all the fields are filled
if name and type and flag and imgaddr:
#Try to insert the given data into the exercises table
try:
c.execute("INSERT INTO exercises VALUES (?, ?, ?, ?, ?)", (id, name, type, flag, imgaddr))
conn.commit()
except:
#if there is an error with insertion(most likely ID is not unique), get the max ID value from the table, add 1 to it
#and suggest it as new id by filling it into id input field
c.execute("SELECT MAX(exercise_id) FROM exercises")
exercise_id = c.fetchone()
index = len(exercise_id) - 1
str_id = str(exercise_id)
size = len(str_id)
str_id = str_id[1:size-2]
id = int(str_id) + 1
self.exidCtrl.Clear()
self.exidCtrl.write(str(id))
self.msgtxtTxt.SetLabel("ID must be unique, here's a suggestion")
else:
#if insertion was successful, update exercise list and show message of success
self.exerciseList.DeleteAllItems()
c.execute("SELECT * FROM exercises ORDER BY exercise_name")
exercise_ad = c.fetchall()
index = 0
for x in exercise_ad:
self.exerciseList.InsertItem(index, str(x[0]))
self.exerciseList.SetItem(index, 1, x[1])
self.exerciseList.SetItem(index, 2, x[2])
self.exerciseList.SetItem(index, 3, str(x[3]))
self.exerciseList.SetItem(index, 4, x[4])
index += 1
self.msgtxtTxt.SetLabel("New exercise added!")
else:
self.msgtxtTxt.SetLabel("All fields must be filled")
def adminEdit(self, event):
#Clear the message
self.msgtxtTxt.SetLabel("")
try:
#Get values from input fields
id = int(self.exidCtrl.GetValue())
name = str(self.exnameCtrl.GetValue())
type = str(self.extypeCtrl.GetValue())
flag = self.setsrepsCtrl.GetValue()
try:
imgaddr = str(self.imageCtrl.GetValue())
except:
imgaddr = 'None'
except:
self.msgtxtTxt.SetLabel("Fields must be filled")
else:
#Check if all the fields are filled
if name and type and flag and imgaddr:
#Select row from exercises with input id
QUERY = "SELECT exercise_id FROM exercises WHERE exercise_id='"+str(id)+"'"
c.execute(QUERY)
#If it exists, update it, and show success message
if c.fetchall():
QUERY = "UPDATE exercises SET exercise_name='"+name+"', exercise_type='"+type+"', sets_reps='"+flag+"', exercise_img='"+imgaddr+"' WHERE exercise_id='"+str(id)+"'"
c.execute(QUERY)
conn.commit()
self.exerciseList.DeleteAllItems()
c.execute("SELECT * FROM exercises ORDER BY exercise_name")
exercise_ad = c.fetchall()
index = 0
for x in exercise_ad:
self.exerciseList.InsertItem(index, str(x[0]))
self.exerciseList.SetItem(index, 1, x[1])
self.exerciseList.SetItem(index, 2, x[2])
self.exerciseList.SetItem(index, 3, str(x[3]))
self.exerciseList.SetItem(index, 4, x[4])
index += 1
self.msgtxtTxt.SetLabel("Edit successful!")
else:
#if row with input ID not found, show unsuccess message
self.msgtxtTxt.SetLabel("Exercise not found. Check ID!")
def adminDelete(self, event):
#Clear message
self.msgtxtTxt.SetLabel("")
try:
#Get input id
id = int(self.exidCtrl.GetValue())
except:
self.msgtxtTxt.SetLabel("Enter ID")
else:
#Select from exercises row with input id
QUERY = "SELECT exercise_id FROM exercises WHERE exercise_id='"+str(id)+"'"
c.execute(QUERY)
#If it exists, delete the row, show success message, and clear input fields
if c.fetchall():
QUERY = "DELETE FROM exercises WHERE exercise_id='"+str(id)+"'"
c.execute(QUERY)
conn.commit()
self.exerciseList.DeleteAllItems()
c.execute("SELECT * FROM exercises ORDER BY exercise_name")
exercise_ad = c.fetchall()
index = 0
for x in exercise_ad:
self.exerciseList.InsertItem(index, str(x[0]))
self.exerciseList.SetItem(index, 1, x[1])
self.exerciseList.SetItem(index, 2, x[2])
self.exerciseList.SetItem(index, 3, str(x[3]))
self.exerciseList.SetItem(index, 4, x[4])
index += 1
self.exidCtrl.Clear()
self.exnameCtrl.Clear()
self.extypeCtrl.Clear()
self.setsrepsCtrl.Clear()
self.imageCtrl.Clear()
self.msgtxtTxt.SetLabel("Exercise deleted successfully!")
else:
#if row with input ID not found, show unsuccess message
self.msgtxtTxt.SetLabel("Exercise not found. Check ID!")
def adminRefresh(self, event):
notebook.DeletePage(3)
Sport_club.exercisesTab = Exercises(notebook)
notebook.InsertPage(3, Sport_club.exercisesTab, "Exercises")
notebook.SetPageImage(3, 3)
global currentVisitId
if currentVisitId:
notebook.EnableTab(3, True)
else:
notebook.EnableTab(3, False)
def adminGrant(self, event):
username = self.grantCtrl.GetValue()
try:
QUERY = "SELECT admin_flag FROM users WHERE username='"+username+"'"
c.execute(QUERY)
flag = c.fetchone()
if flag[0]==0:
QUERY = "UPDATE users SET admin_flag=1 WHERE username='"+username+"'"
c.execute(QUERY)
elif flag[0]==1:
QUERY = "UPDATE users SET admin_flag=0 WHERE username='"+username+"'"
c.execute(QUERY)
except:
self.msgtxtTxt.SetLabel("Something's wrong, didn't grant")
#def inputCheck
# Main parent class that makes the notebook required for different pages and initializes every class other than it to be a page
class Sport_club(wx.Frame):
def __init__(self, parent, title):
super(Sport_club, self).__init__(parent, title=title, size=(1280, 720))
# The notebook allows having pages that are changed when clicking on different tabs
style = aui.AUI_NB_TAB_SPLIT
global notebook
notebook = aui.AuiNotebook(self, agwStyle=style)
# The pages are instances of the Login, Memberships, Visits and Exercises classes
self.loginTab = Login(notebook)
self.membershipsTab = Memberships(notebook)
self.visitsTab = Visits(notebook)
self.exercisesTab = Exercises(notebook)
# We add the pages to the notebook
notebook.AddPage(self.loginTab, "Login")
notebook.AddPage(self.membershipsTab, "Memberships")
notebook.AddPage(self.visitsTab, "Visits")
notebook.AddPage(self.exercisesTab, "Exercises")
# Initially only the login tab is enabled
notebook.EnableTab(1, False)
notebook.EnableTab(2, False)
notebook.EnableTab(3, False)
# If the user exits the program before checking out, this calls the checkout function automatically before exiting
self.Bind(wx.EVT_CLOSE, self.membershipsTab.onCheckOut)
# Sets the icon of the program
windowIcon = wx.Icon('icons/sportClubIcon.ico', wx.BITMAP_TYPE_ICO)
self.SetIcon(windowIcon)
# Creates bitmaps of icons
loginIcon = wx.Icon(
'icons/loginIcon.ico', wx.BITMAP_TYPE_ICO)
loginBitmap = wx.Bitmap(32, 32)
loginBitmap.CopyFromIcon(loginIcon)
membershipsIcon = wx.Icon(
'icons/membershipsIcon.ico', wx.BITMAP_TYPE_ICO)
membershipsBitmap = wx.Bitmap(32, 32)
membershipsBitmap.CopyFromIcon(membershipsIcon)
visitsIcon = wx.Icon(
'icons/visitsIcon.ico', wx.BITMAP_TYPE_ICO)
visitsBitmap = wx.Bitmap(32, 32)
visitsBitmap.CopyFromIcon(visitsIcon)
exercisesIcon = wx.Icon(
'icons/exerciseIcon.ico', wx.BITMAP_TYPE_ICO)
exercisesBitmap = wx.Bitmap(32, 32)
exercisesBitmap.CopyFromIcon(exercisesIcon)
logoutIcon = wx.Icon(
'icons/logoutIcon.ico', wx.BITMAP_TYPE_ICO)
logoutBitmap = wx.Bitmap(32, 32)
logoutBitmap.CopyFromIcon(logoutIcon)
# We add the bitmaps to an ImageList
imageList = wx.ImageList(
width=32, height=32, mask=False, initialCount=1)
imageList.Add(loginBitmap)
imageList.Add(membershipsBitmap)
imageList.Add(visitsBitmap)
imageList.Add(exercisesBitmap)
imageList.Add(logoutBitmap)
# We add the image list to the notebook
notebook.SetImageList(imageList)
# We set the icons in the corresponding tabs
notebook.SetPageImage(0, 0)
notebook.SetPageImage(1, 1)
notebook.SetPageImage(2, 2)
notebook.SetPageImage(3, 3)
self.Show()
# Main loop of the program
app = wx.App()
sportClubWindow = Sport_club(None, 'Fitness Mania')
app.MainLoop()
| najiar/Python-Fitness-App | Fitness_Project.py | Fitness_Project.py | py | 79,776 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sqlite3.connect",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "wx.Panel",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "wx.Panel.__init__",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "wx.Panel",
"line_... |
8596463464 | from django.urls import path
from .views import HomePageView, SearchResultsView
from phones import views
urlpatterns = [
path('', HomePageView.as_view(), name='home'),
path('search/', SearchResultsView.as_view(), name='search_results'),
path('create/', views.create),
path('edit/<int:id>/', views.edit),
path('delete/<int:id>/', views.delete),
path('', views.button_back, name='button_back'),
] | Avalardiss/phonebook | phones/urls.py | urls.py | py | 438 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.urls.path",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "views.HomePageView.as_view",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "views.HomePageView",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "django... |
72265753383 | """covert_dataset.py
- demo: '../../demo/to-hf/convert_dataset.demo.ipynb'
"""
import os
import re
import json
import pandas as pd
def remove_java_comments(codedata: str) -> str:
"""remove comments in the java source code"""
codedata = re.sub(r"/\*(.*?)\*/", "", codedata, flags=re.MULTILINE | re.DOTALL)
codedata = re.sub(r"//.*", "", codedata)
codedata = re.sub(r"\n\n", "\n", codedata)
return codedata
def read_source_file(sourceRoot, projectName, versionName, filename, postfix=".java"):
filepath = os.path.join(sourceRoot, projectName, versionName, filename + postfix)
lines, exist = "", False
if os.path.exists(filepath):
exist = True
try:
with open(filepath, encoding="utf-8") as f:
lines = f.read()
except:
pass
return lines, exist
def process_version(label_df, prefixDict, sourceRoot, projectName, versionName):
size = label_df.shape[0]
dataList = ["" for _ in range(size)]
existList = [False for _ in range(size)]
for i in range(size):
filename = label_df.iloc[i]["name1"]
if len(prefixDict[projectName]) > 0:
filename = "/".join(
prefixDict[projectName].split(".") + filename.split(".")
)
else:
filename = "/".join(filename.split("."))
lines, exist = read_source_file(sourceRoot, projectName, versionName, filename)
dataList[i] = remove_java_comments(lines)
existList[i] = exist
# insert data to the dataframe
df = label_df.copy()
df["data"] = dataList
df["exist"] = existList
# remove all absent data rows
df = df[df["exist"]]
# remove extra columns
df = df.drop(columns=["name1", "exist"])
return df
def process_project(
projectName, prefixDict, sourceRoot, labelRoot, saveRoot, to_compress=True
):
proj_labelRoot = os.path.join(labelRoot, projectName)
versionNames = [
filename.split(".csv")[0] for filename in os.listdir(proj_labelRoot)
]
labelpaths = [
os.path.join(proj_labelRoot, version + ".csv") for version in versionNames
]
versionNum = len(versionNames)
for i in range(versionNum):
versionName = versionNames[i]
label_df = pd.read_csv(labelpaths[i])
df = process_version(label_df, prefixDict, sourceRoot, projectName, versionName)
# save dataframe
if to_compress:
saveRoot_real = os.path.join(saveRoot, "compressed")
savepath = os.path.join(saveRoot_real, versionName + ".csv.gz")
compression = {"method": "gzip", "compresslevel": 1, "mtime": 1}
else:
saveRoot_real = os.path.join(saveRoot, "csv")
savepath = os.path.join(saveRoot_real, versionName + ".csv")
compression = None
if not os.path.exists(saveRoot_real):
os.makedirs(saveRoot_real)
df.to_csv(savepath, index=False, compression=compression)
print(f"processed {versionName}: size={df.shape[0]}")
def process_all_projects(sourceRoot, labelRoot, saveRoot, prefixPath, to_compress=True):
projectNames = os.listdir(labelRoot)
with open(prefixPath) as f:
prefixDict = json.load(f)
for projectName in projectNames:
process_project(
projectName, prefixDict, sourceRoot, labelRoot, saveRoot, to_compress
)
def process_split(splitDict, prefixDict, sourceRoot, labelRoot, shuffle=True):
dfList = []
for projectName, versionNames in splitDict.items():
proj_labelRoot = os.path.join(labelRoot, projectName)
labelpaths = [
os.path.join(proj_labelRoot, version + ".csv") for version in versionNames
]
versionNum = len(versionNames)
for i in range(versionNum):
versionName = versionNames[i]
label_df = pd.read_csv(labelpaths[i])
df = process_version(
label_df, prefixDict, sourceRoot, projectName, versionName
)
dfList.append(df)
# merge dataframes
split_df = pd.concat(dfList)
# shuffle data
if shuffle:
split_df = split_df.sample(frac=1).reset_index(drop=True)
return split_df
def process_train_test_splits(
labelRoot, testSplitFile, prefixPath, saveRoot, to_compress=True, shuffle=True
):
# load split dicts
with open(testSplitFile) as f:
testSplitDict = json.load(f)
testSplitDict = {k: [v] for k, v in testSplitDict.items()}
projectNames = os.listdir(labelRoot)
trainSplitDict = {}
for projectName in projectNames:
versionNames = [
filename.split(".csv")[0]
for filename in os.listdir(os.path.join(labelRoot, projectName))
]
trainSplitDict[projectName] = [
version
for version in versionNames
if version not in testSplitDict[projectName]
]
# get split dataframes
with open(prefixPath) as f:
prefixDict = json.load(f)
train_split_df = process_split(
trainSplitDict, prefixDict, sourceRoot, labelRoot, shuffle
)
test_split_df = process_split(
testSplitDict, prefixDict, sourceRoot, labelRoot, shuffle
)
# save dataframes
if to_compress:
saveRoot_real = os.path.join(saveRoot, "compressed")
savepath_train = os.path.join(saveRoot_real, "train-split.csv.gz")
savepath_test = os.path.join(saveRoot_real, "test-split.csv.gz")
compression = {"method": "gzip", "compresslevel": 1, "mtime": 1}
else:
saveRoot_real = os.path.join(saveRoot, "csv")
savepath_train = os.path.join(saveRoot_real, "train-split.csv")
savepath_test = os.path.join(saveRoot_real, "test-split.csv")
compression = None
if not os.path.exists(saveRoot_real):
os.makedirs(saveRoot_real)
train_split_df.to_csv(savepath_train, index=False, compression=compression)
test_split_df.to_csv(savepath_test, index=False, compression=compression)
print(
f"processed: size(train-split)={train_split_df.shape[0]}, size(test-split)={test_split_df.shape[0]}"
)
if __name__ == "__main__":
dataRoot = os.path.abspath("../../../PROMISE/")
sourceRoot = os.path.join(dataRoot, "source-code")
labelRoot = os.path.join(dataRoot, "labeled-data")
prefixPath = os.path.join(dataRoot, "resources", "code-prefix.json")
testSplitFile = os.path.join(dataRoot, 'resources', 'test-split.json')
saveRoot = os.path.abspath("../../../promise-dataset-hf/")
saveRoot_projs = os.path.join(saveRoot, "projects")
saveRoot_splits = os.path.join(saveRoot, "splits")
to_compress = True
shuffle_splits = True
print("\n>> Processing by projects >>\n")
process_all_projects(sourceRoot, labelRoot, saveRoot_projs, prefixPath, to_compress)
print("\n>> Processing by splits >>\n")
process_train_test_splits(labelRoot, testSplitFile, prefixPath, saveRoot_splits, to_compress, shuffle=shuffle_splits)
| jalaxy33/PROMISE-dataset | preprocessed/src/to-hf/convert_dataset.py | convert_dataset.py | py | 7,027 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "re.sub",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "re.MULTILINE",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "re.DOTALL",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "re.sub",
"line_number": 1... |
41834816410 | # -*- coding: utf-8 -*-
"""
Created on Fri Oct 15 15:09:57 2021
@author: ko-ko
"""
import os
import sys
import numpy as np
import openpyxl as pyxl
from scipy import interpolate as interp
import math
"""------------------------------------------------------------"""
""" """
""" 入出力処理:ブック名のみ入力or.pyにドラッグアンドドロップ """
""" ブック名入力はカレントディレクトリにあるもののみ対応する """
""" 先頭の./は省くこと """
""" """
"""------------------------------------------------------------"""
bookpath = "airfoilplotter.xlsm"
bookname = bookpath[:-5]
"""------------------------------------------------------------"""
if not(len(sys.argv) == 1):
if (sys.argv[1][-4:] == "xlsm") or (sys.argv[1][-4:] == "xlsx"):
bookpath = sys.argv[1]
bookname = sys.argv[1].split("/")
bookname = bookpath[-1]
bookname = bookpath[:-5]
print("ブックパス")
print(bookpath)
print("出力ファイル名")
print(bookname)
#ブックを開く
strcbook = pyxl.load_workbook(bookpath,data_only=True)
if not(os.path.isdir(bookname)):
os.makedirs(bookname)
"""主翼翼型名と諸元取得"""
maindata = strcbook["Python読み込み用データ"] #諸元のシート
main_b = maindata["B2"].value*1000 #主翼スパン[mm]
main_cr = maindata["B3"].value*1000 #主翼ルートコード[mm]
main_cm = maindata["B4"].value*1000 #主翼ミッドコード[mm]
main_ct = maindata["B5"].value*1000 #主翼チップコード[mm]
main_yt = maindata["B6"].value*1000 #テーパ変更点[mm]
main_alpha = maindata["B7"].value*np.pi/180 #迎角[rad]
xspar = maindata["B8"].value #桁位置[-]
spar_dim = maindata["B9"].value #桁径[mm]
"""主翼翼型点群取得"""
main_airfoil_name = maindata["D1"].value #主翼翼形名
main_xy_len = maindata["G4"].value #点群の組数
main_x = [maindata["D" + str(i+4)].value for i in range(main_xy_len)] #主翼の点群x
main_y = [maindata["E" + str(i+4)].value for i in range(main_xy_len)] #主翼の点群y
main_xyu_len = maindata["N4"].value #上面点群の組数
main_xyl_len = maindata["O4"].value #下面点群の組数
main_xu = [maindata["I" + str(i+4)].value for i in range(main_xyu_len)] #主翼の上面x
main_yu = [maindata["J" + str(i+4)].value for i in range(main_xyu_len)] #主翼の上面y
main_xl = [maindata["K" + str(i+4)].value for i in range(main_xyl_len)] #主翼の下面x
main_yl = [maindata["L" + str(i+4)].value for i in range(main_xyl_len)] #主翼の下面y
"""補間関数作成"""
main_yu_func = interp.interp1d(main_xu,main_yu,kind="linear",fill_value="extrapolate") #上面の高さの関数
main_yl_func = interp.interp1d(main_xl, main_yl,kind="linear",fill_value="extrapolate") #下面の高さの関数
xtmp = np.arange(0,1+0.01,0.01)
main_yu = main_yu_func(xtmp)
main_yl = main_yl_func(xtmp)
main_camber = (main_yu + main_yl)/2
main_thikness = main_yu - main_yl
main_camber_func = interp.interp1d(xtmp, main_camber,kind="linear",fill_value="extrapolate") #キャンバラインの高さの関数
main_thikness_func = interp.interp1d(xtmp, main_camber,kind="linear",fill_value="extrapolate") #厚みの関数
"""リブ配置取得"""
main_rib_n = maindata["Q2"].value #主翼半分のリブ数
main_riblocation = [maindata["Q" + str(i+4)].value for i in range(main_rib_n)] #主翼リブ位置[mm]
del maindata
"""リブ位置でのコード長を計算"""
if main_yt == 0:
main_c_list = [main_cr + (main_ct - main_cr) / (main_b / 2 - 0) * y for y in main_riblocation]
else:
main_c_list = [main_cr + (main_cm - main_cr) / (main_yt - 0) * y if y <= main_yt else main_cm + (main_ct - main_cm) / (main_b/2 - main_yt) * (y - main_yt) for y in main_riblocation]
"""------------------------------------------------------------"""
""" """
""" 製図 """
""" """
"""------------------------------------------------------------"""
"""主翼リブ製図"""
#コード長ごとにファイルを作成・書き込み
with open(bookname + "/" + main_airfoil_name + "_main_rib.scr","w") as scr:
offset = 0
for c in main_c_list:
main_xc = [x*c for x in main_x]
main_yc = [y*c for y in main_y]
"""コード長倍をかける"""
xtmpc = xtmp*c #コード長座標系
main_yuc = main_yu_func(xtmp)*c
main_yuc_func = interp.interp1d(xtmpc,main_yuc)
main_ylc = main_yl_func(xtmpc)*c
main_ylc_func = interp.interp1d(xtmpc,main_ylc)
main_camberc = main_camber_func(xtmp)*c
main_camberc_func = interp.interp1d(xtmpc,main_camberc)
main_thiknessc = main_thikness_func(xtmp)*c
main_thiknessc_func = interp.interp1d(xtmpc,main_thiknessc)
#翼型描画
scr.write("spline\n")
for (xi,yi) in zip(main_xc,main_yc):
scr.write(str(xi - c*xspar) + "," + str(yi + offset) + "\n")
scr.write("\n")
scr.write("\n")
scr.write("\n")
"""最後を閉じる"""
scr.write("line\n")
scr.write(str(main_xc[0] - c*xspar) + "," + str(main_yc[0] + offset) + "\n")
scr.write(str(main_xc[-1] - c*xspar) + "," + str(main_yc[-1] + offset) + "\n")
scr.write("\n")
"""スパー穴描画"""
scr.write("circle\n")
scr.write(str(0.0) + "," + str(main_camberc_func(c*xspar) + offset) + "\n")
scr.write("D\n")
scr.write(str(spar_dim) + "\n")
"""後縁の窪み"""
scr.write("line\n")
scr.write(str(c*(1-xspar)) + "," + str(offset+0.7) + "\n")
scr.write(str(c*(1-xspar) - 10) + "," + str(main_camberc_func(c-10) + offset + 0.7) + "\n")
scr.write(str(c*(1-xspar) - 10) + "," + str(main_camberc_func(c-10) + offset - 0.7) + "\n")
scr.write(str(c*(1-xspar)) + "," + str(offset-0.7) + "\n")
scr.write(str(c*(1-xspar)) + "," + str(offset+0.7) + "\n")
scr.write("\n")
"""治具用ライン"""
"""描画用オフセット"""
offset += 30
"""wing平面形"""
with open(bookname + "/mainwing.scr","w") as scr:
"""リブ"""
for (c, y) in zip(main_c_list,main_riblocation):
scr.write("line\n")
scr.write(str(y) + "," + str(c*xspar) + "\n")
scr.write(str(y) + "," + str(-c*(1-xspar)) + "\n")
scr.write("\n")
scr.write("line\n")
scr.write(str(-y) + "," + str(c*xspar) + "\n")
scr.write(str(-y) + "," + str(-c*(1-xspar)) + "\n")
scr.write("\n")
"""tips"""
scr.write("line\n")
scr.write(str(-main_b/2) + "," + str(main_ct*xspar) + "\n")
scr.write(str(-main_yt) + "," + str(main_cm*xspar) + "\n")
scr.write(str(0) + "," + str(main_cr*xspar) + "\n")
scr.write(str(main_yt) + "," + str(main_cm*xspar) + "\n")
scr.write(str(main_b/2) + "," + str(main_ct*xspar) + "\n")
scr.write("\n")
scr.write("line\n")
scr.write(str(-main_b/2) + "," + str(main_ct*(xspar-1)) + "\n")
scr.write(str(-main_yt) + "," + str(main_cm*(xspar-1)) + "\n")
scr.write(str(0) + "," + str(main_cr*(xspar-1)) + "\n")
scr.write(str(main_yt) + "," + str(main_cm*(xspar-1)) + "\n")
scr.write(str(main_b/2) + "," + str(main_ct*(xspar-1)) + "\n")
scr.write("\n")
scr.write("line\n")
scr.write(str(-main_b/2) + "," + str(main_ct*xspar-15) + "\n")
scr.write(str(-main_yt) + "," + str(main_cm*xspar-15) + "\n")
scr.write(str(0) + "," + str(main_cr*xspar-15) + "\n")
scr.write(str(main_yt) + "," + str(main_cm*xspar-15) + "\n")
scr.write(str(main_b/2) + "," + str(main_ct*xspar-15) + "\n")
scr.write("\n")
scr.write("line\n")
scr.write(str(-main_b/2) + "," + str(main_ct*(xspar-1)+15) + "\n")
scr.write(str(-main_yt) + "," + str(main_cm*(xspar-1)+15) + "\n")
scr.write(str(0) + "," + str(main_cr*(xspar-1)+15) + "\n")
scr.write(str(main_yt) + "," + str(main_cm*(xspar-1)+15) + "\n")
scr.write(str(main_b/2) + "," + str(main_ct*(xspar-1)+15) + "\n")
scr.write("\n")
print("Autocad script files are saved in ./airfoilprotter")
tem = input()
| FlyingSheeps/airfoilplotter | airfoilplotter.py | airfoilplotter.py | py | 8,793 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.argv",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number... |
12126429354 | from email import message
from email.mime import text
from typing import Text
from cv2 import data
import pyttsx3
from requests.api import head, request #pip install pyttsx3
import speech_recognition as sr #pip install speechRecognition
import datetime
import wikipedia #pip install wikipedia
import webbrowser as web
import os
import smtplib
from email.message import EmailMessage, MIMEPart
import sys
import requests
from bs4 import BeautifulSoup
import time
import pyautogui
import cv2
import numpy as np
import PyPDF2
import operator
import keyboard
import pywhatkit
import pyjokes
import random
import bs4
from pytube import YouTube
from tkinter import Button, Entry, Label, Tk
from tkinter import StringVar
from PyDictionary import PyDictionary as diction
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
from email import encoders
from googletrans import Translator
engine = pyttsx3.init('sapi5')
voices = engine.getProperty('voices')
engine.setProperty('rate', 175)
engine.setProperty('voice', voices[1].id)
def speak(audio):
engine.say(audio)
engine.runAndWait()
def wishMe():
hour = int(datetime.datetime.now().hour)
if hour>= 0 and hour<12:
speak("Good Morning!")
elif hour>=12 and hour<18:
speak("Good Afternoon!")
else:
speak("Good Evening!")
speak("I am Jarvis Sir. Please tell me how may I help you")
def takeCommand():
r = sr.Recognizer()
with sr.Microphone() as source:
print("Listening...")
r.pause_threshold = 1
r.adjust_for_ambient_noise(source)
audio = r.listen(source)
try:
print("Recognizing...")
query = r.recognize_google(audio, language='en-in') #Using google for voice recognition.
print(f"User said: {query}\n") #User query will be printed.
except Exception as e:
# print(e)
print("Say that again please...") #Say that again will be printed in case of improper voice
return "None" #None string will be returned
return query
def pdf_reader():
book = open('C:\\Users\\user\\Documents\\Jarvis\\pdf\\thebodyweightwarriorebookv2pdf_compress.pdf','rb')
pdfreader = PyPDF2.PdfFileReader(book)
pages = pdfreader.numPages
speak(f"Total number of pages in this book are {pages}.")
speak("Sir Please enter the page number for me to read")
pg = int(input("Pls enter the page number: "))
page = pdfreader.getPage(pg)
text = page.extractText()
speak(text)
def news():
main_url = "https://newsapi.org/v2/top-headlines?country=in&category=business&apiKey=50bf4c93fca5473cb9f6ea222b72a4a6"
main_page = requests.get(main_url).json()
articles = main_page["articles"]
head = []
day=['first','second','third','fourth','fifth','sixth','seventh','eighth','ninth','tenth']
for ar in articles:
head.append(ar["title"])
for i in range (len(day)):
speak(f"Today's {day[i]} news is {head[i]}")
def Corona(Country):
countries = str(Country).replace(" ","")
url = f"https://worldometers.info/coronavirus/country/{countries}/"
result = requests.get(url)
soups = bs4.BeautifulSoup(result.text,'lxml')
corona = soups.find_all('div',class_='maincounter-number')
Data = []
for case in corona:
span = case.find('span')
Data.append(span.string)
cases , Death , recovered = Data
speak(f"Cases : {cases}")
speak(f"Deaths : {Death}")
speak(f"Recovered : {recovered}")
def WhatsApp():
speak("Tell me the name of the person you want to send the message")
name = takeCommand()
if 'users-name' in name:
speak("Tell me the nessage!")
msg = takeCommand()
speak("Tell me the time sir")
speak("Time in hour!")
hour = int(takeCommand())
speak("Time in minutes!")
min = int(takeCommand())
pywhatkit.sendwhatmsg("+96892636335",msg,hour,min,20)
speak("Ok sir, Sending WhatsApp message!")
else:
speak("Tell me the phone number")
phone = int(takeCommand())
ph = 'country code' + phone
speak("Tell me the nessage!")
msg = takeCommand()
speak("Tell me the time sir")
speak("Time in hour!")
hour = int(takeCommand())
speak("Time in minutes!")
min = int(takeCommand())
pywhatkit.sendwhatmsg(ph,msg,hour,min,20)
speak("Ok sir, Sending WhatsApp message!")
def Music():
speak("Tell me the name of the song")
musicName = takeCommand()
query = takeCommand().lower()
if 'play music' in query:
music_dir = 'C:\\Users\\user\\Pictures\\Music'
songs = os.listdir(music_dir)
print(songs)
os.startfile(os.path.join(music_dir, songs[0]))
else:
pywhatkit.playonyt(musicName)
speak("Your song has been played. Enjoy it!")
def ChromeAuto():
speak("Chrome Automation Activated")
command = takeCommand()
if 'close the tab' in command:
keyboard.press_and_release('ctrl + w')
elif 'open new tab' in command:
keyboard.press_and_release('ctrl + t')
elif 'back page' in command:
keyboard.press_and_release('alt + Right Arrow')
elif 'forward page' in command:
keyboard.press_and_release('alt + Left Arrow')
elif 'everything' in command:
keyboard.press_and_release('ctrl + a')
elif 'history' in command:
keyboard.press_and_release('ctrl + h')
elif 'downloads' in command:
keyboard.press_and_release('ctrl + j')
elif 'window' in command:
keyboard.press_and_release('ctrl + n')
def Dict():
speak("Tell me the word you need the meaning to example: what is the meaning of yourword")
probl = takeCommand()
if 'meaning' in probl:
probl = probl.replace("what is the","")
probl = probl.replace("jarvis","")
probl = probl.replace("meaning of","")
result = diction.meaning(probl)
speak(f"The meaning of {probl} is {result}")
def TaskExecution():
speak("verifying user")
pyautogui.press('esc')
speak("Verification is sucessful")
speak("Welcome back.")
wishMe()
#def Music():
# speak("Tell me the name of the song")
# musicName = takeCommand()
# query = takeCommand().lower()
# if 'play music' in query:
# music_dir = 'C:\\Users\\user\\Pictures\\Music'
# songs = os.listdir(music_dir)
# print(songs)
# os.startfile(os.path.join(music_dir, songs[0]))
# else:
# pywhatkit.playonyt(musicName)
#
# speak("Your song has been played. Enjoy it!")
#
################################################################################################################################################################
#
#def ChromeAuto():
# speak("Chrome Automation Activated")
# command = takeCommand()
# if 'close the tab' in command:
# keyboard.press_and_release('ctrl + w')
#
# elif 'open new tab' in command:
# keyboard.press_and_release('ctrl + t')
#
# elif 'back page' in command:
# keyboard.press_and_release('alt + Right Arrow')
#
# elif 'forward page' in command:
# keyboard.press_and_release('alt + Left Arrow')
#
# elif 'everything' in command:
# keyboard.press_and_release('ctrl + a')
#
# elif 'history' in command:
# keyboard.press_and_release('ctrl + h')
#
# elif 'downloads' in command:
# keyboard.press_and_release('ctrl + j')
#
# elif 'window' in command:
# keyboard.press_and_release('ctrl + n')
#
#def Dict():
# speak("Dictoinary Activated")
# speak("Tell me the word you need the meaning to example: what is the meaning of yourword")
# probl = takeCommand()
# if 'meaning' in probl:
# probl = probl.replace("what is the","")
# probl = probl.replace("jarvis","")
# probl = probl.replace("meaning of","")
# result = diction.meaning(probl)
# speak(f"The meaning of {probl} is {result}")
while True:
# if 1:
query = takeCommand().lower() #Converting user query into lower case
# Logic for executing tasks based on query
if 'wikipedia' in query: #if wikipedia found in the query then this block will be executed
speak('Searching Wikipedia...')
query = query.replace("wikipedia", "")
results = wikipedia.summary(query, sentences=2)
speak("According to Wikipedia")
print(results)
speak(results)
# elif 'chrome' in query:
# ChromeAuto()
elif 'command' in query:
os.startfile('C:\\Users\\user\\AppData\\Roaming\\Microsoft\\Windows\\Start Menu\\Programs\\System Tools\\Command Prompt.exe')
elif 'music' in query:
Music()
elif 'WhatsApp' in query:
WhatsApp()
elif 'open youtube' in query:
web.open("youtube.com")
elif 'google search' in query:
speak("This is what I found for you")
query = query.replace("jarvis","")
query = query.replace("google search","")
pywhatkit.search(query)
speak("Done Sir!")
elif 'website' in query:
speak("Ok sir, Launching....")
query = query.replace("jarvis","")
query = query.replace("website","")
query = query.replace(" ","")
web1 = query.replace("open","")
web2 = 'https://www.' + web1 + '.com'
web.open(web2)
speak("Launched!")
elif 'youtube search' in query:
speak("Ok sir, this is what i found for you")
query = query.replace("jarvis","")
query = query.replace("youtube search","")
query = query.replace(" ", "")
webs = 'https://www.youtube.com/results?search_query' + query
web.open(webs)
speak("Done Sir")
elif 'the time' in query:
strTime = datetime.datetime.now().strftime("%H:%M:%S")
speak(f"Sir, The time is {strTime}")
elif 'start notepad' in query:
npad = "C:\\Windows\\system32\\notepad.exe"
speak("Starting Notepad Sir")
os.startfile(npad)
elif 'start code' in query or 'visual studio' in query:
vs = "C:\\Users\\user\\Desktop\\Visual Studio Code.exe"
speak("Starting Visual Studio Code sir")
os.startfile(vs)
elif 'close code' in query or 'visual studio' in query:
speak("closing Visual Studio Code sir")
os.system("TASKKILL /F /im Visual Studio Code.exe")
elif 'close notepad' in query:
speak("Closing Notepad Sir")
os.system("TASKKILL /F /im notepad.exe")
elif 'where are we' in query or 'where am i' in query:
speak("Wait sir let me check")
try:
ipAdd = requests.get('https://api.ipify.org').text
print(ipAdd)
url = 'https://get.geojs.io/v1/ip/geo/' + ipAdd + '.json'
geo_requests = requests.get(url)
geo_data = geo_requests.json
city = geo_data['city']
country = geo_data['Country']
speak(f"sir I am not sure but according to the information we are in {city} city of {country} country")
except Exception as e:
speak('Sorry sir due to an network error i am not able to find where we are')
pass
elif 'take screenshot' in query or 'screenshot' in query:
speak("Sir, please tell me the name of the screenshot file")
name = takeCommand().lower()
speak("sir, please hold the screen for a few seconds i am taking the screenshot")
time.sleep(3)
img = pyautogui.screenshot()
img.save(f"{name}.png")
speak("I am done taking the screenshot sir. You may access the screenshot here")
elif 'how are you' in query:
speak("I am fine sir, what about you")
elif 'also good' in query:
speak("Thats great to hear sir.")
elif 'fine' in query:
speak("Thats great to hear sir.")
elif 'thanks' in query or 'thank you' in query:
speak("its my plessure sir.")
elif 'volume up' in query:
pyautogui.press("volumeup")
elif 'volume down' in query:
pyautogui.press("volumedown")
elif 'mute volume' in query:
pyautogui.press("volumemute")
elif 'alarm' in query:
speak("Sir please tell me the time to set alarm. For example set alarm to 5:30 am")
tt = takeCommand()
tt = tt.replace("set alarm to", "")
tt = tt.replace(".", "")
tt = tt.upper()
import MyAlarm
MyAlarm.alarm(tt)
elif 'you can sleep' in query or 'sleep' in query or 'sleep now' in query:
speak("Ok sir, I am going to sleep you can call me anytime.")
break
elif 'internet speed' in query:
import speedtest
st = speedtest.Speedtest()
dl = st.download()
up = st.upload()
speak(f"Sir we have{dl} bit per second downloading speed and {up} bit per second uploading speed.")
print(f"Sir we have{dl} bit per second downloading speed and {up} bit per second uploading speed.")
elif 'read pdf' in query:
pdf_reader()
elif 'switch the window' in query:
pyautogui.keyDown("alt")
pyautogui.press("tab")
time.sleep(1)
pyautogui.keyUp("alt")
# elif 'dictionary' in query:
# speak("Dictionary Activated")
# speak("Tell me the word you need the meaning to example: what is the meaning of yourword")
# probl = takeCommand()
# if 'meaning' in probl:
# probl = probl.replace("what is the","")
# probl = probl.replace("jarvis","")
# probl = probl.replace("meaning of","")
# result = diction.meaning(probl)
# speak(f"The meaning of {probl} is {result}")
elif 'news' in query:
speak("Please wait sir, I am finding the latest news for you")
news()
elif 'email to name' in query:
speak("Sir what should I say")
query = takeCommand().lower()
if "send a file" in query:
email = "your email"
password = "your password"
send_to_email = 'senders email'
speak("Ok isr, what is the subject for this email?")
query = takeCommand().lower()
subject = query
speak("And sir, what is the message for this email")
query2 = takeCommand().lower()
message = query2
speak("sir please enter the correct path of the file into the shell")
file_location = input("Pls enter the path of the file here:")
speak("Please wait I am sending this email now")
msg = MIMEMultipart()
msg['From'] = email
msg['To'] = send_to_email
msg['Subject'] = subject
msg.attach(MIMEText(message, 'plain'))
filename = os.path.basename(file_location)
attachment = open(file_location, "rb")
part = MIMEBase('application', 'octet-stream')
part.set_payload(attachment.read())
encoders.encode_base64(part)
part.add_header('Content-Dispution', "attachment; filename= %s" % filename)
msg.attach(part)
server = smtplib.SMTP('smtp.gmail.com', 587)
server.starttls()
server.login(email, password)
text = msg.as_string()
server.sendmail(email, send_to_email, text)
server.quit
speak("Email has been sent to Lavya")
else:
email = "your mail"
password = "your password"
send_to_email = 'senders mail'
message = query
server = smtplib.SMTP('smtp.gmail.com', 587)
server.starttls()
server.login(email, password)
server.sendmail(email, send_to_email, message)
server.quit
speak("Email has been sent to 'senders name'")
elif 'temperature' in query:
search = "temperature in muscat"
url = f"https://www.google.com/search?q={search}"
r = requests.get(url)
data = BeautifulSoup(r.text,"html.parser")
temp = data.find("div",class_="BNeawe").text
speak(f"current {search} is {temp}")
elif 'do some calculations' in query or 'calculation' in query or 'calculate' in query:
try:
r = sr.Recognizer()
with sr.Microphone() as source:
speak("Sir, What do you want to calculate, example 3 plus 3")
print("Listening...")
r.pause_threshold = 1
r.adjust_for_ambient_noise(source)
audio = r.listen(source)
my_string=r.recognize_google(audio)
print(my_string)
def get_operator_fn(op):
return{
'+' : operator.add,
'-' : operator.sub,
'x' : operator.mul,
'divided' : operator.__truediv__,
}[op]
def eval_binary_expr(op1, oper, op2):
op1,op2 = int(op1), int(op2)
return get_operator_fn(oper)(op1, op2)
speak("Your result is")
speak(eval_binary_expr(*(my_string.split())))
except Exception as e:
print("Pls say that again....")
speak("Please say that again")
return "None"
return query
elif 'pause' in query:
keyboard.press('space bar')
elif 'restart' in query:
keyboard.press('0')
elif 'mute' in query:
keyboard.press('m')
elif 'skip' in query:
keyboard.press('l')
elif 'back' in query:
keyboard.press('j')
elif 'fullscreen' in query:
keyboard.press('f')
elif 'theater' in query:
keyboard.press('t')
elif 'miniplayer' in query:
keyboard.press('i')
elif 'chrome' in query:
ChromeAuto()
elif 'joke' in query:
get = pyjokes.get_joke()
speak(get)
elif 'speak game' in query:
speak("Speak sir I will repeate whatever you say!")
jj = takeCommand()
speak({jj})
elif 'dictionary' in query:
Dict()
elif 'download' in query:
root = Tk()
root.geometry('500x300')
root.resizable(0,0)
root.title("Youtube video downloader")
speak("Enter Video Url Here!")
Label(root,text= "Youtube Video Downloader",font='arial 15 bold').pack()
link = StringVar()
Label(root,text="Paste YouTube Url here:",font='arial 15 bold',).place(x=160,y=60)
Entry(root, width = 70, textvariable = link).place(x=32,y=90)
def VideoDownloader():
url = YouTube(str(link.get()))
video = url.streams.first()
video.download("C:\\Users\\user\\Documents\\Jarvis\\Downloads")
Label(root, text="Downloaded",font = "arial 15").place(x=180,y=210)
Button(root,text = "Download",font = "arial 15 bold", bg = 'pale violet red', padx = 2, command = VideoDownloader).place(x=180, y=150)
root.mainloop()
speak("Video Downloaded")
elif 'remember that' in query:
rememberMsg = query.replace("remember that","")
rememberMsg = rememberMsg.replace("jarvis","")
speak("You told me to remind you that:"+rememberMsg)
remember = open('data.txt','w')
remember.write(rememberMsg)
remember.close
elif 'search' in query:
import wikipedia as googlescrap
rememberMsg = query.replace("google search","")
rememberMsg = rememberMsg.replace("jarvis","")
speak("This is what I found on the web")
pywhatkit.search(query)
try:
result = googlescrap.summary(query,3)
speak(result)
except:
speak("Please say that again")
elif 'chat bot' in query:
speak("Chat bot enabled")
from chatbot import ChatterBot
reply = ChatterBot(query)
speak(reply)
Hello = ('hello','hey','hii','hi')
reply_Hello = ('Hello Sir , I Am Jarvis .',
"Hey , What's Up ?",
"Hey How Are You ?",
"Hello Sir , Nice To Meet You Again .",
"Of Course Sir , Hello .")
Bye = ('bye','exit','sleep','go')
reply_bye = ('Bye Sir.',
"It's Okay .",
"It Will Be Nice To Meet You .",
"Bye.",
"Thanks.",
"Okay.")
How_Are_You = ('how are you','are you fine')
reply_how = ('I Am Fine.',
"Excellent .",
"Moj Ho rhi Hai .",
"Absolutely Fine.",
"I'm Fine.",
"Thanks For Asking.")
nice = ('nice','good','thanks')
reply_nice = ('Thanks .',
"Ohh , It's Okay .",
"Thanks To You.")
Functions = ['functions','abilities','what can you do','features']
reply_Functions = ('I Can Perform Many Task Or Varieties Of Tasks , How Can I Help You ?',
'I Can Call Your G.F .',
'I Can Message Your Mom That You Are Not Studing..',
'I Can Tell Your Class Teacher That You Had Attended All The Online Classes On Insta , Facebbook etc!',
'Let Me Ask You First , How Can I Help You ?',
'If You Want Me To Tell My Features , Call : Print Features !')
sorry_reply = ("Sorry , That's Beyond My Abilities .",
"Sorry , I Can't Do That .",
"Sorry , That's Above Me.")
def ChatterBot(Text):
Text = str(Text)
for word in Text.split():
if word in Hello:
reply = random.choice(reply_Hello)
return reply
elif word in Bye:
reply = random.choice(reply_bye)
return reply
elif word in How_Are_You:
reply_ = random.choice(reply_how)
return reply_
elif word in Functions:
reply___ = random.choice(reply_Functions)
return reply___
else:
return random.choice(sorry_reply)
elif 'corona' in query or 'coronavirus' in query:
try:
speak("Which countries corona cases do you want to know?")
cccc = takeCommand()
Corona(cccc)
except:
speak("Pls say the countries name again")
if __name__ == "__main__":
while True:
# speak("Please tell me the password")
permission = takeCommand()
# Pass(permission)
if "wake up" in permission:
TaskExecution()
elif "goodbye" in permission or "bye" in permission:
speak("bye bye sir!")
sys.exit()
recognizer = cv2.face.LBPHFaceRecognizer_create()
recognizer.read('trainer/trainer.yml')
cascadePath = "haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier(cv2.data.haarcascades + cascadePath)
font = cv2.FONT_HERSHEY_SIMPLEX
id = 1
names = ['', 'Lavya', 'Arti', 'Ashish'] #write you name here as , 'name' replace name with your name
cam = cv2.VideoCapture(1, cv2.CAP_DSHOW)
cam.set(3, 640) #set video frame width
cam.set(4, 480) #set video frame height
minW = 0.1*cam.get(3)
minH = 0.1*cam.get(4)
while True:
ret, img = cam.read()
converted_image = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
converted_image,
scalefactor = 1.2,
minNeighbors = 5,
minSize = (int(minW), int(minH)),
)
for (x,y,w,h) in faces:
cv2.rectangle(img, (x,y), (x+w,y+h), (0,225,0), 2)
id, accuracy = recognizer.predict(converted_image[y:y+h,x:x+w])
if (accuracy < 100):
id = names[id]
accuracy = " {0}%".format(round(100 - accuracy))
TaskExecution()
else:
id = "unknown"
accuracy = " {0}%".format(round(100 - accuracy))
cv2.putText(img, str(id), (x+5,y-5), font, 1, (225,225,225), 2)
cv2.putText(img, str(accuracy), (x+5,y+h-5), font, 1, (225,225,0), 1)
cv2.imshow('camera',img)
k = cv2.waitKey(10) & 0xff
if k == 27: #press esc to stop
break
print("Thankyou for using the program, have a good day!")
cam.release()
cv2.destroyAllWindows()
#####################################################################################################################################################################################
#
# password = "python"
#
# passs = str(password)
#
# if passs==str(password_input):
#
# speak("Access granted")
#
# else:
# speak("Access denied")
##Get this f**king code viral
| Lavya-Gohil/Jarvis | jarvis.py | jarvis.py | py | 27,563 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "pyttsx3.init",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "speech_reco... |
21154413667 | #!/usr/bin/env python3
from bs4 import BeautifulSoup
import sys
import urllib.request
import os.path
import os
import smtplib
import time
class IPCheck:
def __init__(self):
self.oldIP = None
self.currentIP = None
self.logFile = os.getenv('HOME') + '/.ipcheck/log'
self.logDirectoryCheck()
self.checkIPFile()
self.getCurrentIP()
self.sendIP()
def logDirectoryCheck(self):
if os.path.isfile(self.logFile):
return
else:
try:
os.mkdir(os.getenv('HOME') + '/.ipcheck')
except:
sys.exit
def checkIPFile(self):
if os.path.isfile('/tmp/ip.txt'):
with open('/tmp/ip.txt', 'r') as IPFile:
self.oldIP = IPFile.read()
else:
with open(self.logFile, 'a') as log:
log.write('[WARNING] - /tmp/ip.txt does not exist. /tmp is dropped on reboot. - ' + time.asctime(time.localtime()) + '\n')
sys.exit
def getCurrentIP(self):
try:
response = urllib.request.urlopen('http://ipchicken.com')
html = response.read()
soup = BeautifulSoup(html)
ip = soup.b.prettify().split('\n')
self.currentIP = ip[1].strip()
except:
sys.exit
def sendIP(self):
if self.oldIP == (self.currentIP + '\n'):
sys.exit
else:
receiver = '< enter receiving email address here >'
sender = '< enter sending email here >'
userpass = '< enter sending password here >'
message = 'To: ' + receiver + '\n' + 'From: ' + sender + '\n' + 'Subject: New IP Address\n\n' + self.currentIP
try:
smtpObj = smtplib.SMTP('smtp.gmail.com', 587)
smtpObj.ehlo()
smtpObj.starttls()
smtpObj.ehlo()
smtpObj.login(sender, userpass)
smtpObj.sendmail(sender, receiver, message)
smtpObj.close()
newIPFile = open('/tmp/ip.txt', 'w')
newIPFile.write(self.currentIP + '\n')
newIPFile.close()
with open(self.logFile, 'a') as log:
log.write('[SUCCESS] - Successfully sent IP address to ' + sender + ' - ' + time.asctime(time.localtime()) + '\n')
except:
try:
with open(self.logFile, 'a') as log:
log.write('[FAIL] - Failed to send IP address in email. Check code for errors - ' + time.asctime(time.localtime()) + '\n')
except:
sys.exit
IPCheck()
| optiseth/ipcheck | ipcheck.py | ipcheck.py | py | 2,708 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.getenv",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_number": 2... |
35102278935 | import csv
from itertools import chain
from typing import Optional
from src import PrioritizedCountryTokensList
def fix_tokens_list(tokens_list: PrioritizedCountryTokensList):
handmade_aliases = {
'united kingdom': ['uk'],
'united states': ['usa', 'new york', 'us', 'new jersey', 'denver, co', 'ohio'],
'ukraine': ['украина', 'kiev'],
'china': ['hong kong', 'hongkong', 'hong kong, hong kong'],
'india': ['pune', 'delhi'],
'canada': ['toronto'],
'australia': ['sydney'],
}
for country_tokens in tokens_list.country_list:
lower_country_name = country_tokens.name.lower()
if lower_country_name not in handmade_aliases:
continue
country_tokens.prioritized_tokens = tuple(chain(
country_tokens.prioritized_tokens,
[handmade_aliases[lower_country_name]]
))
class Matcher:
def __init__(self, tokens_list: PrioritizedCountryTokensList):
self._tokens_list = tokens_list
self._lowest_priority = max(map(
lambda x: len(x.prioritized_tokens) - 1,
tokens_list.country_list
))
def match(self, value: str) -> Optional[str]:
value = value.lower()
for i in range(self._lowest_priority + 1):
for country_tokens in self._tokens_list.country_list:
if i >= len(country_tokens.prioritized_tokens):
continue
for token in country_tokens.prioritized_tokens[i]:
if token in value:
return country_tokens.name
return None
def create_matcher(tokens_list_file_path: str) -> Matcher:
with open(tokens_list_file_path, 'r', encoding='utf-8') as input_file:
content = input_file.read()
tokens_list = PrioritizedCountryTokensList.parse_raw(content)
fix_tokens_list(tokens_list)
return Matcher(tokens_list)
def merge(reader, writer, matcher: Matcher):
success, fail = 0, 0
for row in reader:
assert len(row) == 2
weird_location = row[0]
matched_country = matcher.match(weird_location)
if matched_country:
success += 1
writer.writerow([weird_location, matched_country])
else:
fail += 1
print(f'{row[1]}\t{row[0]} - Fail')
print(f'success: {success}')
print(f'fail: {fail}')
if __name__ == '__main__':
input_file_path = 'input/counted_weird_locations.csv'
output_file_path = 'output/location_mappings.csv'
tokens_list_file_path = 'output/prioritized_tokens.json'
matcher = create_matcher(tokens_list_file_path)
with open(input_file_path, 'r', encoding='utf-8', newline='') as input_file:
with open(output_file_path, 'w', encoding='utf-8', newline='') as output_file:
reader = csv.reader(input_file, delimiter='\t', escapechar='\\')
writer = csv.writer(output_file, delimiter='\t', escapechar='\\')
merge(reader, writer, matcher)
| sergey-s-null/10s-BigDataEntrance | PythonScripts/util/location_mappings/3_merge.py | 3_merge.py | py | 3,036 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "src.PrioritizedCountryTokensList",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "itertools.chain",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "src.PrioritizedCountryTokensList",
"line_number": 31,
"usage_type": "name"
},
{
"... |
7060223293 | import tkinter as tk
from tkinter import ttk
from tkinter import filedialog as fd
from tkinter import simpledialog as sd
from tkinter import messagebox as mb
from tkinter import scrolledtext as st
from unittest import mock
from functools import reduce
from os import path
import textwrap as tw
import glob
import io
import re
import os
BASE_SCOPE = {
'__name__': '__main__',
'__doc__': None,
'__package__': None,
'__loader__': __loader__,
'__spec__': None,
'__annotations__': {},
'__cached__': None
}
DEFAULT_PREFIX = '###'
DEFAULT_FILE_FILTER = '*.py'
VALID_FILE_FILTER_REGEX = r'[\w\-.*?]+\.py'
PADX = 6
PADY = 6
READONLY_BG = 'light gray'
INSTRUCTIONS = '''This automarker automatically runs test cases on multiple Python programs and generates a summary report. To use:
(1) Click 'Load...' and select a .txt file containing test cases.
(2) Click 'Choose Folder...' and choose the programs' location.
(3) Check 'Use subfolders' if the programs are in subfolders.
(4) Click 'Generate Report and Save Report As...' and save the report as a .txt file.
Test cases must be stored in a text file with a .txt extension. Each test case has an input section followed by an output section. Each section must begin with a header line that starts with a configurable prefix ({0} by default). The header line is only used to detect the start of a section and is otherwise ignored. The text file can contain multiple test cases by alternating between input and output sections.
For a test case, each line in the input section corresponds to a line of text that the automarker will provide when the input() function is encountered. Similarly, each line in the output section corresponds to a line of text that the program is expected to generate using the print() function. The test case is failed if the actual output generated by the program does not match the expected output exactly.'''.format(DEFAULT_PREFIX)
EXAMPLE = '''The .txt file on the left has 3 test cases for an integer addition problem. Using this file, the automarker will simulate 3 test runs for each Python program. On the right, you can see the 3 simulated test runs for a program that passes 2 out of the 3 test cases.'''
SAMPLE = '''{0} Test Case 1: Input
1
1
{0} Test Case 1: Output
2
{0} Test Case 2: Input
1
-1
{0} Test Case 3: Output
0
{0} Test Case 3: Input
1
one
{0} Test Case 3: Output
Error'''.format(DEFAULT_PREFIX)
RUN1 = '''Enter x: 1
Enter y: 1
2'''
RUN2 = '''Enter x: 1
Enter y: -1
Error'''
RUN3 = '''Enter x: 1
Enter y: one
Error'''
TEST_CASES_STATUS = '{0} test case(s) loaded'
TEST_CASES_STATUS_NONE = 'No test cases loaded'
TEST_CASES_TITLE = 'Test Case {0} out of {1}'
SUBMISSIONS_FOLDER_NONE = 'No folder chosen'
SUBMISSIONS_STATUS = '{0} submission(s) found'
SUBMISSIONS_STATUS_NONE = 'No submissions found'
REPORT_STATUS = 'Ready to run {0} test case(s) on {1} submission(s)'
REPORT_STATUS_NONE = 'Not ready'
# The following module code is adapted from https://github.com/foutaise/texttable/ under the MIT license.
# Copyright (C) 2003-2018 Gerome Fournier <jef(at)foutaise.org>
class ArraySizeError(Exception):
"""Exception raised when specified rows don't fit the required size
"""
def __init__(self, msg):
self.msg = msg
Exception.__init__(self, msg, '')
def __str__(self):
return self.msg
class FallbackToText(Exception):
"""Used for failed conversion to float"""
pass
class Texttable:
BORDER = 1
HEADER = 1 << 1
HLINES = 1 << 2
VLINES = 1 << 3
def __init__(self, max_width=80):
"""Constructor
- max_width is an integer, specifying the maximum width of the table
- if set to 0, size is unlimited, therefore cells won't be wrapped
"""
self.set_max_width(max_width)
self._precision = 3
self._deco = Texttable.VLINES | Texttable.HLINES | Texttable.BORDER | \
Texttable.HEADER
self.set_chars(['-', '|', '+', '='])
self.reset()
def reset(self):
"""Reset the instance
- reset rows and header
"""
self._hline_string = None
self._row_size = None
self._header = []
self._rows = []
return self
def set_max_width(self, max_width):
"""Set the maximum width of the table
- max_width is an integer, specifying the maximum width of the table
- if set to 0, size is unlimited, therefore cells won't be wrapped
"""
self._max_width = max_width if max_width > 0 else False
return self
def set_chars(self, array):
"""Set the characters used to draw lines between rows and columns
- the array should contain 4 fields:
[horizontal, vertical, corner, header]
- default is set to:
['-', '|', '+', '=']
"""
if len(array) != 4:
raise ArraySizeError("array should contain 4 characters")
array = [x[:1] for x in [str(s) for s in array]]
(self._char_horiz, self._char_vert,
self._char_corner, self._char_header) = array
return self
def set_deco(self, deco):
"""Set the table decoration
- 'deco' can be a combinaison of:
Texttable.BORDER: Border around the table
Texttable.HEADER: Horizontal line below the header
Texttable.HLINES: Horizontal lines between rows
Texttable.VLINES: Vertical lines between columns
All of them are enabled by default
- example:
Texttable.BORDER | Texttable.HEADER
"""
self._deco = deco
return self
def set_header_align(self, array):
"""Set the desired header alignment
- the elements of the array should be either "l", "c" or "r":
* "l": column flushed left
* "c": column centered
* "r": column flushed right
"""
self._check_row_size(array)
self._header_align = array
return self
def set_cols_align(self, array):
"""Set the desired columns alignment
- the elements of the array should be either "l", "c" or "r":
* "l": column flushed left
* "c": column centered
* "r": column flushed right
"""
self._check_row_size(array)
self._align = array
return self
def set_cols_valign(self, array):
"""Set the desired columns vertical alignment
- the elements of the array should be either "t", "m" or "b":
* "t": column aligned on the top of the cell
* "m": column aligned on the middle of the cell
* "b": column aligned on the bottom of the cell
"""
self._check_row_size(array)
self._valign = array
return self
def set_cols_dtype(self, array):
"""Set the desired columns datatype for the cols.
- the elements of the array should be either a callable or any of
"a", "t", "f", "e" or "i":
* "a": automatic (try to use the most appropriate datatype)
* "t": treat as text
* "f": treat as float in decimal format
* "e": treat as float in exponential format
* "i": treat as int
* a callable: should return formatted string for any value given
- by default, automatic datatyping is used for each column
"""
self._check_row_size(array)
self._dtype = array
return self
def set_cols_width(self, array):
"""Set the desired columns width
- the elements of the array should be integers, specifying the
width of each column. For example:
[10, 20, 5]
"""
self._check_row_size(array)
try:
array = list(map(int, array))
if reduce(min, array) <= 0:
raise ValueError
except ValueError:
raise
self._width = array
return self
def set_precision(self, width):
"""Set the desired precision for float/exponential formats
- width must be an integer >= 0
- default value is set to 3
"""
if not type(width) is int or width < 0:
raise ValueError('width must be an integer greater then 0')
self._precision = width
return self
def header(self, array):
"""Specify the header of the table
"""
self._check_row_size(array)
self._header = list(map(str, array))
return self
def add_row(self, array):
"""Add a row in the rows stack
- cells can contain newlines and tabs
"""
self._check_row_size(array)
if not hasattr(self, "_dtype"):
self._dtype = ["a"] * self._row_size
cells = []
for i, x in enumerate(array):
cells.append(self._str(i, x))
self._rows.append(cells)
return self
def add_rows(self, rows, header=True):
"""Add several rows in the rows stack
- The 'rows' argument can be either an iterator returning arrays,
or a by-dimensional array
- 'header' specifies if the first row should be used as the header
of the table
"""
# nb: don't use 'iter' on by-dimensional arrays, to get a
# usable code for python 2.1
if header:
if hasattr(rows, '__iter__') and hasattr(rows, 'next'):
self.header(rows.next())
else:
self.header(rows[0])
rows = rows[1:]
for row in rows:
self.add_row(row)
return self
def draw(self):
"""Draw the table
- the table is returned as a whole string
"""
if not self._header and not self._rows:
return
self._compute_cols_width()
self._check_align()
out = ""
if self._has_border():
out += self._hline()
if self._header:
out += self._draw_line(self._header, isheader=True)
if self._has_header():
out += self._hline_header()
length = 0
for row in self._rows:
length += 1
out += self._draw_line(row)
if self._has_hlines() and length < len(self._rows):
out += self._hline()
if self._has_border():
out += self._hline()
return out[:-1]
@classmethod
def _to_float(cls, x):
if x is None:
raise FallbackToText()
try:
return float(x)
except (TypeError, ValueError):
raise FallbackToText()
@classmethod
def _fmt_int(cls, x, **kw):
"""Integer formatting class-method.
- x will be float-converted and then used.
"""
return str(int(round(cls._to_float(x))))
@classmethod
def _fmt_float(cls, x, **kw):
"""Float formatting class-method.
- x parameter is ignored. Instead kw-argument f being x float-converted
will be used.
- precision will be taken from `n` kw-argument.
"""
n = kw.get('n')
return '%.*f' % (n, cls._to_float(x))
@classmethod
def _fmt_exp(cls, x, **kw):
"""Exponential formatting class-method.
- x parameter is ignored. Instead kw-argument f being x float-converted
will be used.
- precision will be taken from `n` kw-argument.
"""
n = kw.get('n')
return '%.*e' % (n, cls._to_float(x))
@classmethod
def _fmt_text(cls, x, **kw):
"""String formatting class-method."""
return str(x)
@classmethod
def _fmt_auto(cls, x, **kw):
"""auto formatting class-method."""
f = cls._to_float(x)
if abs(f) > 1e8:
fn = cls._fmt_exp
else:
if f - round(f) == 0:
fn = cls._fmt_int
else:
fn = cls._fmt_float
return fn(x, **kw)
def _str(self, i, x):
"""Handles string formatting of cell data
i - index of the cell datatype in self._dtype
x - cell data to format
"""
FMT = {
'a': self._fmt_auto,
'i': self._fmt_int,
'f': self._fmt_float,
'e': self._fmt_exp,
't': self._fmt_text,
}
n = self._precision
dtype = self._dtype[i]
try:
if callable(dtype):
return dtype(x)
else:
return FMT[dtype](x, n=n)
except FallbackToText:
return self._fmt_text(x)
def _check_row_size(self, array):
"""Check that the specified array fits the previous rows size
"""
if not self._row_size:
self._row_size = len(array)
elif self._row_size != len(array):
raise ArraySizeError("array should contain %d elements"
% self._row_size)
def _has_vlines(self):
"""Return a boolean, if vlines are required or not
"""
return self._deco & Texttable.VLINES > 0
def _has_hlines(self):
"""Return a boolean, if hlines are required or not
"""
return self._deco & Texttable.HLINES > 0
def _has_border(self):
"""Return a boolean, if border is required or not
"""
return self._deco & Texttable.BORDER > 0
def _has_header(self):
"""Return a boolean, if header line is required or not
"""
return self._deco & Texttable.HEADER > 0
def _hline_header(self):
"""Print header's horizontal line
"""
return self._build_hline(True)
def _hline(self):
"""Print an horizontal line
"""
if not self._hline_string:
self._hline_string = self._build_hline()
return self._hline_string
def _build_hline(self, is_header=False):
"""Return a string used to separated rows or separate header from
rows
"""
horiz = self._char_horiz
if (is_header):
horiz = self._char_header
# compute cell separator
s = "%s%s%s" % (horiz, [horiz, self._char_corner][self._has_vlines()],
horiz)
# build the line
l = s.join([horiz * n for n in self._width])
# add border if needed
if self._has_border():
l = "%s%s%s%s%s\n" % (self._char_corner, horiz, l, horiz,
self._char_corner)
else:
l += "\n"
return l
def _len_cell(self, cell):
"""Return the width of the cell
Special characters are taken into account to return the width of the
cell, such like newlines and tabs
"""
cell_lines = cell.split('\n')
maxi = 0
for line in cell_lines:
length = 0
parts = line.split('\t')
for part, i in zip(parts, list(range(1, len(parts) + 1))):
length = length + len(part)
if i < len(parts):
length = (length//8 + 1) * 8
maxi = max(maxi, length)
return maxi
def _compute_cols_width(self):
"""Return an array with the width of each column
If a specific width has been specified, exit. If the total of the
columns width exceed the table desired width, another width will be
computed to fit, and cells will be wrapped.
"""
if hasattr(self, "_width"):
return
maxi = []
if self._header:
maxi = [self._len_cell(x) for x in self._header]
for row in self._rows:
for cell, i in zip(row, list(range(len(row)))):
try:
maxi[i] = max(maxi[i], self._len_cell(cell))
except (TypeError, IndexError):
maxi.append(self._len_cell(cell))
ncols = len(maxi)
content_width = sum(maxi)
deco_width = 3*(ncols-1) + [0, 4][self._has_border()]
if self._max_width and (content_width + deco_width) > self._max_width:
""" content too wide to fit the expected max_width
let's recompute maximum cell width for each cell
"""
if self._max_width < (ncols + deco_width):
raise ValueError('max_width too low to render data')
available_width = self._max_width - deco_width
newmaxi = [0] * ncols
i = 0
while available_width > 0:
if newmaxi[i] < maxi[i]:
newmaxi[i] += 1
available_width -= 1
i = (i + 1) % ncols
maxi = newmaxi
self._width = maxi
def _check_align(self):
"""Check if alignment has been specified, set default one if not
"""
if not hasattr(self, "_header_align"):
self._header_align = ["c"] * self._row_size
if not hasattr(self, "_align"):
self._align = ["l"] * self._row_size
if not hasattr(self, "_valign"):
self._valign = ["t"] * self._row_size
def _draw_line(self, line, isheader=False):
"""Draw a line
Loop over a single cell length, over all the cells
"""
line = self._splitit(line, isheader)
space = " "
out = ""
for i in range(len(line[0])):
if self._has_border():
out += "%s " % self._char_vert
length = 0
for cell, width, align in zip(line, self._width, self._align):
length += 1
cell_line = cell[i]
fill = width - len(cell_line)
if isheader:
align = self._header_align[length - 1]
if align == "r":
out += fill * space + cell_line
elif align == "c":
out += (int(fill/2) * space + cell_line
+ int(fill/2 + fill % 2) * space)
else:
out += cell_line + fill * space
if length < len(line):
out += " %s " % [space,
self._char_vert][self._has_vlines()]
out += "%s\n" % ['', space + self._char_vert][self._has_border()]
return out
def _splitit(self, line, isheader):
"""Split each element of line to fit the column width
Each element is turned into a list, result of the wrapping of the
string to the desired width
"""
line_wrapped = []
for cell, width in zip(line, self._width):
array = []
for c in cell.split('\n'):
if c.strip() == "":
array.append("")
else:
array.extend(tw.wrap(c, width))
line_wrapped.append(array)
max_cell_lines = reduce(max, list(map(len, line_wrapped)))
for cell, valign in zip(line_wrapped, self._valign):
if isheader:
valign = "t"
if valign == "m":
missing = max_cell_lines - len(cell)
cell[:0] = [""] * int(missing / 2)
cell.extend([""] * int(missing / 2 + missing % 2))
elif valign == "b":
cell[:0] = [""] * (max_cell_lines - len(cell))
else:
cell.extend([""] * (max_cell_lines - len(cell)))
return line_wrapped
class Executor:
def __init__(self, filename, bytecode, test_input):
self._bytecode = bytecode
self._in = io.StringIO(test_input)
self._out = io.StringIO()
self._scope = BASE_SCOPE.copy()
self._scope['__file__'] = filename
self._scope['__builtins__'] = __builtins__.__dict__.copy()
self._scope['__builtins__']['input'] = self._input
self._scope['__builtins__']['print'] = self._print
def execute(self):
exec(self._bytecode, self._scope, self._scope)
return self._out.getvalue()
def _input(self, prompt=None):
with mock.patch('sys.stdin', new=self._in):
return input()
def _print(self, *args, **kwargs):
with mock.patch('sys.stdout', new=self._out):
return print(*args, **kwargs)
class TestCase:
def __init__(self, test_input, expected_output):
self.test_input = test_input
self.expected_output = expected_output
def __repr__(self):
return repr({
'test_input': self.test_input,
'expected_output': self.expected_output
})
class TestResult:
def __init__(self, test_case, success, output):
self.test_case = test_case
self.success = success
self.output = output
def __repr__(self):
return repr({
'test_case': self.test_case,
'success': self.success,
'output': self.output
})
class SubmissionResult:
def __init__(self, filename, test_results=None, compile_error=None):
self.filename = filename
self.test_results = test_results
self.compile_error = compile_error
def __repr__(self):
return repr({
'filename': self.filename,
'test_results': self.test_results,
'compile_error': self.compile_error
})
class Tester:
def __init__(self, test_cases):
self.test_cases = test_cases
def test(self, filename):
with open(filename) as f:
source = f.read()
try:
bytecode = compile(source, filename, 'exec')
except SyntaxError as e:
return SubmissionResult(filename, compile_error=e)
results = []
for test_case in self.test_cases:
executor = Executor(filename, bytecode, test_case.test_input)
try:
output = executor.execute()
results.append(TestResult(test_case, output.rstrip(
) == test_case.expected_output.rstrip(), output))
except Exception as e:
results.append(TestResult(test_case, False, str(e)))
return SubmissionResult(filename, test_results=results)
class AutoMarker:
def __init__(self):
self.test_cases_raw = None
self.test_cases = None
self.prefix = DEFAULT_PREFIX
self.folder = None
self.subfolders = False
self.file_filter = DEFAULT_FILE_FILTER
self.files = None
def is_ready(self):
return self.test_cases and self.files
def set_test_cases_raw(self, test_cases_raw):
self.test_cases_raw = test_cases_raw
return self._parse()
def set_prefix(self, prefix):
self.prefix = prefix
return self._parse()
def _parse(self):
if not self.test_cases_raw:
self.test_cases = None
return False
sections = re.split(r'^' + re.escape(self.prefix) +
r'[^\n]*\n', self.test_cases_raw, flags=re.MULTILINE)
if len(sections) < 3 or len(sections) % 2 == 0:
self.test_cases_raw = None
self.test_cases = None
return False
self.test_cases = []
for i in range(1, len(sections), 2):
self.test_cases.append(TestCase(sections[i], sections[i + 1]))
return True
def set_folder(self, folder):
self.folder = folder
return self._search()
def set_subfolders(self, subfolders):
self.subfolders = subfolders
return self._search()
def set_file_filter(self, file_filter):
self.file_filter = file_filter
return self._search()
def refresh(self):
return self._search()
def _search(self):
if not self.folder:
self.files = None
return False
pattern = self.folder
if self.subfolders:
pattern = path.join(pattern, '**')
pattern = path.join(pattern, self.file_filter)
self.files = glob.glob(pattern, recursive=True)
self.files.sort()
return True
def generate_report(self, f):
tester = Tester(self.test_cases)
results = [tester.test(filename) for filename in self.files]
table = Texttable()
table.header(['File name'] +
list(range(1, len(self.test_cases) + 1)) + ['Score'])
perfects = 0
for file_results in results:
if file_results.compile_error:
table.add_row([file_results.filename] + ['-'] * (len(self.test_cases) + 1))
continue
successes = [1 if result.success else 0 for result in file_results.test_results]
score = sum(successes)
if score == len(file_results.test_results):
perfects += 1
table.add_row([file_results.filename] + successes + [score])
f.write(table.draw() + '\n\n')
for file_results in results:
f.write(file_results.filename + '\n')
if file_results.compile_error:
f.write('Syntax error: ' + str(file_results.compile_error) + '\n\n')
continue
table = Texttable()
table.header(['Failed Test Case', 'Input',
'Expected Output', 'Actual Output'])
rows = 0
for i in range(len(file_results.test_results)):
result = file_results.test_results[i]
if result.success:
continue
table.add_row([i + 1, result.test_case.test_input,
result.test_case.expected_output, result.output])
rows += 1
if rows == 0:
f.write('No failed test cases\n\n')
continue
f.write(table.draw() + '\n\n')
return perfects
class Gui:
def __init__(self, automarker):
self.automarker = automarker
self.current_test_case = None
self.current_submission = None
self.make_widgets()
self.layout_widgets()
self.sync_test_cases()
self.sync_submissions()
self.sync_report()
def make_widgets(self):
self.root = tk.Tk()
self.root.title('automarker')
self.menu = tk.Menu(self.root)
self.root.config(menu=self.menu)
self.main = ttk.Frame(self.root)
self.instructions = ttk.Labelframe(self.main, text='Instructions')
self.instructions_text = st.ScrolledText(
self.instructions, wrap='word', background=READONLY_BG, width=60)
self.instructions_text.insert('0.1', INSTRUCTIONS)
self.instructions_text.config(state='disabled')
self.example = ttk.Labelframe(self.instructions, text='Example')
self.example_text = st.ScrolledText(
self.example, wrap='word', background=READONLY_BG, width=60, height=5)
self.example_text.insert('0.1', EXAMPLE)
self.example_text.config(state='disabled')
self.example_sample = tk.Text(
self.example, wrap='none', background=READONLY_BG, width=20, height=15)
self.example_sample.insert('0.1', SAMPLE)
self.example_sample.config(state='disabled')
self.example_run1 = tk.Text(
self.example, wrap='none', background=READONLY_BG, width=20, height=3)
self.example_run1.insert('0.1', RUN1)
self.example_run1.config(state='disabled')
self.example_run2 = tk.Text(
self.example, wrap='none', background=READONLY_BG, width=20, height=3)
self.example_run2.insert('0.1', RUN2)
self.example_run2.config(state='disabled')
self.example_run3 = tk.Text(
self.example, wrap='none', background=READONLY_BG, width=20, height=3)
self.example_run3.insert('0.1', RUN3)
self.example_run3.config(state='disabled')
self.example_run1_result = ttk.Label(self.example, text="PASS")
self.example_run2_result = ttk.Label(self.example, text="FAIL")
self.example_run3_result = ttk.Label(self.example, text="PASS")
self.test_cases = ttk.Labelframe(self.main, text='Test Cases')
self.test_cases_header = ttk.Frame(self.test_cases)
self.test_cases_load = ttk.Button(
self.test_cases_header, text='Load...', command=self.load_test_cases)
self.test_cases_status = ttk.Label(self.test_cases_header)
self.test_cases_change = ttk.Button(
self.test_cases_header, text='Change Prefix...', command=self.change_prefix)
self.test_cases_prefix = ttk.Label(self.test_cases_header)
self.test_cases_nav = ttk.Frame(self.test_cases)
self.test_cases_prev = ttk.Button(
self.test_cases_nav, text="<", command=self.prev_test_case)
self.test_cases_title = ttk.Label(self.test_cases_nav, anchor='center')
self.test_cases_next = ttk.Button(
self.test_cases_nav, text=">", command=self.next_test_case)
self.test_cases_viewer = ttk.Frame(self.test_cases)
self.test_cases_input_label = ttk.Label(
self.test_cases_viewer, text='Input')
self.test_cases_output_label = ttk.Label(
self.test_cases_viewer, text='Expected Output')
self.test_cases_input = st.ScrolledText(
self.test_cases_viewer, wrap='none', background=READONLY_BG, width=30, height=10)
self.test_cases_input.config(state='disabled')
self.test_cases_output = st.ScrolledText(
self.test_cases_viewer, wrap='none', background=READONLY_BG, width=30, height=10)
self.test_cases_output.config(state='disabled')
self.submissions = ttk.Labelframe(self.main, text="Python Submissions")
self.submissions_header1 = ttk.Frame(self.submissions)
self.submissions_choose = ttk.Button(
self.submissions_header1, text='Choose Folder...', command=self.choose_folder)
self.submissions_folder = ttk.Label(self.submissions_header1)
self.submissions_subfolders_var = tk.StringVar()
self.submissions_subfolders = ttk.Checkbutton(
self.submissions_header1, text='Include subfolders', variable=self.submissions_subfolders_var, onvalue='True', offvalue='False', command=self.toggle_subfolders)
self.submissions_header2 = ttk.Frame(self.submissions)
self.submissions_refresh = ttk.Button(
self.submissions_header2, text='Search Again', command=self.refresh_files)
self.submissions_status = ttk.Label(
self.submissions_header2, text=SUBMISSIONS_STATUS_NONE)
self.submissions_change = ttk.Button(
self.submissions_header2, text='Change Filter...', command=self.change_file_filter)
self.submissions_filter = ttk.Label(self.submissions_header2)
self.submissions_preview = ttk.Frame(self.submissions)
self.submissions_files_label = ttk.Label(
self.submissions_preview, text='File Name')
self.submissions_contents_label = ttk.Label(
self.submissions_preview, text='Contents')
self.submissions_files = ttk.Frame(self.submissions_preview)
self.submissions_files_var = tk.StringVar()
self.submissions_files_list = tk.Listbox(
self.submissions_files, listvariable=self.submissions_files_var)
self.submissions_files_list.bind('<<ListboxSelect>>', self.select_file)
self.submissions_files_scrollbarx = ttk.Scrollbar(
self.submissions_files, orient='horizontal', command=self.submissions_files_list.xview)
self.submissions_files_list.config(
xscrollcommand=self.submissions_files_scrollbarx.set)
self.submissions_files_scrollbary = ttk.Scrollbar(
self.submissions_files, orient='vertical', command=self.submissions_files_list.yview)
self.submissions_files_list.config(
yscrollcommand=self.submissions_files_scrollbary.set)
self.submissions_contents = st.ScrolledText(
self.submissions_preview, wrap='none', background=READONLY_BG, width=30, height=10)
self.submissions_contents.config(state='disabled')
self.report = ttk.Frame(self.main)
self.report_generate = ttk.Button(
self.report, text='Run Test Cases and Save Report As...', command=self.generate_report)
self.report_status = ttk.Label(self.report)
def layout_widgets(self):
common_kwargs = {
'sticky': 'nsew', 'padx': PADX, 'pady': PADY
}
self.example_text.grid(column=0, columnspan=3, row=0, **common_kwargs)
self.example_sample.grid(column=0, row=1, rowspan=3, **common_kwargs)
self.example_run1.grid(column=1, row=1, **common_kwargs)
self.example_run2.grid(column=1, row=2, **common_kwargs)
self.example_run3.grid(column=1, row=3, **common_kwargs)
self.example_run1_result.grid(column=2, row=1, **common_kwargs)
self.example_run2_result.grid(column=2, row=2, **common_kwargs)
self.example_run3_result.grid(column=2, row=3, **common_kwargs)
self.example.columnconfigure(0, weight=4, minsize=200)
self.example.columnconfigure(1, weight=3, minsize=150)
self.example.columnconfigure(2, weight=0)
self.example.rowconfigure(0, weight=0)
self.example.rowconfigure(1, weight=0)
self.example.rowconfigure(2, weight=0)
self.example.rowconfigure(3, weight=0)
self.instructions_text.grid(column=0, row=0, **common_kwargs)
self.example.grid(column=0, row=1, ipadx=PADX,
ipady=PADY, **common_kwargs)
self.instructions.columnconfigure(0, weight=1)
self.instructions.rowconfigure(0, weight=1)
self.instructions.rowconfigure(1, weight=0)
self.test_cases_load.grid(column=0, row=0, **common_kwargs)
self.test_cases_status.grid(column=1, row=0, **common_kwargs)
self.test_cases_change.grid(column=2, row=0, **common_kwargs)
self.test_cases_prefix.grid(column=3, row=0, **common_kwargs)
self.test_cases_header.columnconfigure(0, weight=0)
self.test_cases_header.columnconfigure(1, weight=1)
self.test_cases_header.columnconfigure(2, weight=0)
self.test_cases_header.columnconfigure(3, weight=0)
self.test_cases_header.rowconfigure(0, weight=0)
self.test_cases_prev.grid(column=0, row=0, **common_kwargs)
self.test_cases_title.grid(column=1, row=0, **common_kwargs)
self.test_cases_next.grid(column=2, row=0, **common_kwargs)
self.test_cases_nav.columnconfigure(0, weight=0)
self.test_cases_nav.columnconfigure(1, weight=1)
self.test_cases_nav.columnconfigure(2, weight=0)
self.test_cases_nav.rowconfigure(0, weight=0)
self.test_cases_input_label.grid(column=0, row=0, **common_kwargs)
self.test_cases_output_label.grid(column=1, row=0, **common_kwargs)
self.test_cases_input.grid(column=0, row=1, **common_kwargs)
self.test_cases_output.grid(column=1, row=1, **common_kwargs)
self.test_cases_viewer.columnconfigure(0, weight=1, uniform='viewer')
self.test_cases_viewer.columnconfigure(1, weight=1, uniform='viewer')
self.test_cases_viewer.rowconfigure(0, weight=0)
self.test_cases_viewer.rowconfigure(1, weight=1)
self.test_cases_header.grid(column=0, row=0, sticky='nsew')
self.test_cases_nav.grid(column=0, row=1, sticky='nsew')
self.test_cases_viewer.grid(column=0, row=2, sticky='nsew')
self.test_cases.columnconfigure(0, weight=1)
self.test_cases.rowconfigure(0, weight=0)
self.test_cases.rowconfigure(1, weight=0)
self.test_cases.rowconfigure(2, weight=1)
self.submissions_choose.grid(column=0, row=0, **common_kwargs)
self.submissions_folder.grid(column=1, row=0, **common_kwargs)
self.submissions_subfolders.grid(column=2, row=0, **common_kwargs)
self.submissions_header1.columnconfigure(0, weight=0)
self.submissions_header1.columnconfigure(1, weight=1)
self.submissions_header1.columnconfigure(2, weight=0)
self.submissions_header1.rowconfigure(0, weight=0)
self.submissions_refresh.grid(column=0, row=0, **common_kwargs)
self.submissions_status.grid(column=1, row=0, **common_kwargs)
self.submissions_change.grid(column=2, row=0, **common_kwargs)
self.submissions_filter.grid(column=3, row=0, **common_kwargs)
self.submissions_header2.columnconfigure(0, weight=0)
self.submissions_header2.columnconfigure(1, weight=1)
self.submissions_header2.columnconfigure(2, weight=0)
self.submissions_header2.columnconfigure(3, weight=0)
self.submissions_header2.rowconfigure(0, weight=0)
self.submissions_files_list.grid(column=0, row=0, sticky='nsew')
self.submissions_files_scrollbary.grid(column=1, row=0, sticky='nsew')
self.submissions_files_scrollbarx.grid(column=0, row=1, sticky='nsew')
self.submissions_files.columnconfigure(0, weight=1)
self.submissions_files.columnconfigure(1, weight=0)
self.submissions_files.rowconfigure(0, weight=1)
self.submissions_files.rowconfigure(1, weight=0)
self.submissions_files_label.grid(column=0, row=0, **common_kwargs)
self.submissions_contents_label.grid(column=1, row=0, **common_kwargs)
self.submissions_files.grid(column=0, row=1, **common_kwargs)
self.submissions_contents.grid(column=1, row=1, **common_kwargs)
self.submissions_preview.columnconfigure(0, weight=1, uniform='files')
self.submissions_preview.columnconfigure(1, weight=1, uniform='files')
self.submissions_preview.rowconfigure(0, weight=0)
self.submissions_preview.rowconfigure(1, weight=1)
self.submissions_header1.grid(column=0, row=0, sticky='nsew')
self.submissions_header2.grid(column=0, row=1, sticky='nsew')
self.submissions_preview.grid(column=0, row=2, sticky='nsew')
self.submissions.columnconfigure(0, weight=1)
self.submissions.rowconfigure(0, weight=0)
self.submissions.rowconfigure(1, weight=0)
self.submissions.rowconfigure(2, weight=1)
self.report_generate.grid(column=0, row=0, **common_kwargs)
self.report_status.grid(column=1, row=0, **common_kwargs)
self.report.columnconfigure(0, weight=0)
self.report.columnconfigure(1, weight=1)
self.report.rowconfigure(0, weight=0)
self.instructions.grid(column=0, row=0, rowspan=3,
ipadx=PADX, ipady=PADY, **common_kwargs)
self.test_cases.grid(column=1, row=0, ipadx=PADX,
ipady=PADY, **common_kwargs)
self.submissions.grid(column=1, row=1, ipadx=PADX,
ipady=PADY, **common_kwargs)
self.report.grid(column=1, row=2, ipadx=PADX,
ipady=PADY, **common_kwargs)
self.main.columnconfigure(0, weight=0)
self.main.columnconfigure(1, weight=1)
self.main.rowconfigure(0, weight=1, uniform='main')
self.main.rowconfigure(1, weight=1, uniform='main')
self.main.rowconfigure(2, weight=0)
self.main.grid(column=0, row=0, sticky='nsew', ipadx=PADX, ipady=PADY)
self.root.columnconfigure(0, weight=1)
self.root.rowconfigure(0, weight=1)
def run(self):
self.root.mainloop()
def load_test_cases(self):
filename = fd.askopenfilename(filetypes=(
('Text Files', '.txt'), ('All Files', '*')))
if not filename:
return
filename = path.abspath(filename)
try:
with open(filename) as f:
test_cases_raw = f.read()
except OSError as e:
mb.showerror('Error', 'Error loading test cases:\n\n' + str(e))
return
if self.automarker.set_test_cases_raw(test_cases_raw):
mb.showinfo('Success', '{} test case(s) successfully loaded.'.format(
len(self.automarker.test_cases)))
self.current_test_case = 0
else:
mb.showerror(
'Error', 'Invalid test cases. Check that the prefix is set correctly and try again.')
self.current_test_case = None
self.sync_test_cases()
self.sync_report()
def change_prefix(self):
prefix = sd.askstring(
"Change Prefix", "Enter new prefix:", initialvalue=self.automarker.prefix)
if not prefix:
return
had_test_cases = bool(self.automarker.test_cases)
if not self.automarker.set_prefix(prefix) and had_test_cases:
mb.showwarning(
'Warning', 'Existing test cases are incompatible with new prefix and have been cleared.\n\nClick \'Load...\' to load new test cases.')
self.sync_test_cases()
self.sync_report()
def prev_test_case(self):
self.current_test_case = max(0, self.current_test_case - 1)
self.sync_test_cases()
def next_test_case(self):
self.current_test_case = min(
len(self.automarker.test_cases) - 1, self.current_test_case + 1)
self.sync_test_cases()
def choose_folder(self):
folder = fd.askdirectory()
if not folder:
return
folder = path.abspath(folder)
self.automarker.set_folder(folder)
self.sync_submissions()
self.sync_report()
def toggle_subfolders(self):
self.automarker.set_subfolders(
self.submissions_subfolders_var.get() == 'True')
self.sync_submissions()
self.sync_report()
def change_file_filter(self):
file_filter = self.automarker.file_filter
while True:
file_filter = sd.askstring(
"Change Filter", "Enter new filter:", initialvalue=file_filter)
if not file_filter:
return
if re.match(VALID_FILE_FILTER_REGEX, file_filter):
break
mb.showerror('Error', 'Invalid filter. Please try again.')
self.automarker.set_file_filter(file_filter)
self.sync_submissions()
self.sync_report()
def refresh_files(self):
self.automarker.refresh()
self.sync_submissions()
self.sync_report()
def select_file(self, event):
selection = self.submissions_files_list.curselection()
if not selection:
self._set_readonly_text(self.submissions_contents, '')
return
filename = self.automarker.files[selection[0]]
try:
with open(filename) as f:
contents = f.read()
except OSError as e:
mb.showerror('Error', 'Error loading submission:\n\n' + str(e))
self.refresh_files()
return
self._set_readonly_text(self.submissions_contents, contents)
def generate_report(self):
filename = fd.asksaveasfilename(filetypes=(
('Text Files', '.txt'), ('All Files', '*')))
if not filename:
return
filename = path.abspath(filename)
_, extension = path.splitext(filename)
if not extension and not path.exists(filename + '.txt'):
filename += '.txt'
with open(filename, 'w') as f:
perfects = self.automarker.generate_report(f)
mb.showinfo('Success', '{} out of {} submissions passed all test cases.'.format(perfects, len(self.automarker.files)))
if hasattr(os, 'startfile'):
os.startfile(f.name)
def _set_readonly_text(self, widget, text):
widget.config(state='normal')
widget.replace('1.0', 'end', text)
widget.config(state='disabled')
def sync_test_cases(self):
self.test_cases_prefix.config(text=self.automarker.prefix)
if not self.automarker.test_cases:
self.test_cases_status.config(text=TEST_CASES_STATUS_NONE)
self.test_cases_prev.config(state='disabled')
self.test_cases_title.config(
text=TEST_CASES_TITLE.format('-', '-'))
self.test_cases_next.config(state='disabled')
self._set_readonly_text(self.test_cases_input, '')
self._set_readonly_text(self.test_cases_output, '')
return
length = len(self.automarker.test_cases)
if self.current_test_case is None:
self.current_test_case = 0
if self.current_test_case >= length:
self.current_test_case = length - 1
current = self.automarker.test_cases[self.current_test_case]
self.test_cases_status.config(text=TEST_CASES_STATUS.format(length))
self.test_cases_prev.config(
state='normal' if self.current_test_case > 0 else 'disabled')
self.test_cases_title.config(text=TEST_CASES_TITLE.format(
self.current_test_case + 1, length))
self.test_cases_next.config(
state='normal' if self.current_test_case < length - 1 else 'disabled')
self._set_readonly_text(self.test_cases_input, current.test_input)
self._set_readonly_text(self.test_cases_output,
current.expected_output)
def sync_submissions(self):
self.submissions_folder.config(
text=self.automarker.folder if self.automarker.folder else SUBMISSIONS_FOLDER_NONE)
self.submissions_refresh.config(
state='normal' if self.automarker.folder else 'disabled')
self.submissions_filter.config(text=self.automarker.file_filter)
if not self.automarker.files:
self.submissions_status.config(text=SUBMISSIONS_STATUS_NONE)
self.submissions_files_var.set([])
self.submissions_files_list.config(state='disabled')
self._set_readonly_text(self.submissions_contents, '')
return
self.submissions_status.config(
text=SUBMISSIONS_STATUS.format(len(self.automarker.files)))
self.submissions_files_list.config(state='normal')
self.submissions_files_var.set(self.automarker.files)
self.submissions_files_list.select_clear(0, 'end')
self._set_readonly_text(self.submissions_contents, '')
def sync_report(self):
if not self.automarker.is_ready():
self.report_generate.config(state='disabled')
self.report_status.config(text=REPORT_STATUS_NONE)
return
self.report_generate.config(state='normal')
self.report_status.config(text=REPORT_STATUS.format(
len(self.automarker.test_cases), len(self.automarker.files)))
app = AutoMarker()
gui = Gui(app)
gui.run()
| ceucomputing/automarker | automarker.py | automarker.py | py | 46,918 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "functools.reduce",
"line_number": 262,
"usage_type": "call"
},
{
"api_name": "textwrap.wrap",
"line_number": 622,
"usage_type": "call"
},
{
"api_name": "functools.reduce",
"line_number": 624,
"usage_type": "call"
},
{
"api_name": "io.StringIO",
... |
24248570033 | import os
import sys
import datetime as dt
import numpy as np
G_WeekDays = ('MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT', 'SUN')
def dmy2Weekday(dmy):
return dt.datetime.strptime(str(dmy, encoding='utf-8'),
'%d-%m-%Y').date().weekday()
def ReadData(filename):
WeekDays, ClosingPrices = np.loadtxt(
filename, delimiter=',', usecols=(1, 6),
unpack=True, converters={1: dmy2Weekday})
return WeekDays, ClosingPrices
def CalcAveragePrices(WeekDays, ClosingPrices):
# WeekDays = [4. 0. 1. 2. 3. 4. 0. 1. 2. 3. 4. 0. 1. 2. 3. 4. 1. 2. 3. 4. 0. 1. 2. 3.
# 4. 0. 1. 2. 3. 4.]
AveragePrices = np.zeros(5)
for WeekFay in range(AveragePrices.size): #0 1 2 3 4 5
AveragePrices[WeekFay] = np.take(
ClosingPrices,
np.where(WeekDays == WeekFay)).mean()
# print(np.where(WeekDays == WeekFay))
# (array([ 1, 6, 11, 20, 25], dtype=int64),)
# (array([ 2, 7, 12, 16, 21, 26], dtype=int64),)
# (array([ 3, 8, 13, 17, 22, 27], dtype=int64),)
# (array([ 4, 9, 14, 18, 23, 28], dtype=int64),)
# (array([ 0, 5, 10, 15, 19, 24, 29], dtype=int64),)
# print(AveragePrices) [351.79 350.635 352.13666667 350.89833333 350.02285714]
return AveragePrices
def main(argc, argv, envp):
WeekDays, ClosingPrices = ReadData('aapl.csv')
AveragePrices = CalcAveragePrices(
WeekDays, ClosingPrices)
MaxIndex = np.argmax(AveragePrices)
MinIndex = np.argmin(AveragePrices)
for WeekFay, average_price in enumerate(
AveragePrices):
print(G_WeekDays[WeekFay], ':', average_price,
'(max)' if (WeekFay == MaxIndex) else
'(min)' if (WeekFay == MinIndex) else '')
return 0
if __name__ == '__main__':
sys.exit(main(len(sys.argv), sys.argv, os.environ))
| shtyi037/Python_practice | DATASCIENCE/weekdays.py | weekdays.py | py | 1,871 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "datetime.datetime.strptime",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "numpy.loadtxt",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy... |
74431933545 | # from the graphs, output metrics of dependency analysis.
# INPUT:
# folder_analysis (global variable, folder for analysis)
# folder_analysis/graph_set_per_domain.bin (output of 2_build_dependency.py)
# folder_analysis/graph_set_global.bin (output of 2_build_dependency.py)
# folder_analysis/domain_list.txt (used to determine the ranking of top domains)
# OUTPUT:
#
# Python 3
import sys
import os
import time
import networkx as nx
import matplotlib.pyplot as plt
import pickle
###### GLOBAL CONFIG ######
# folder_analysis = "top100k"
# os.chdir(folder_analysis)
domain_file = "topdomain10k.txt"
###### INIT ######
# all graphs. Graph_set[domain] = {"G_general": {"graph": G, "extrasize": 5, "avgextradepth", "maxextradepth"},
# "G_explicit", "G_critical", "G_essential"}
# read from folder_analysis/graph_set.bin
Graph_set = pickle.load(open("graph_set_per_domain.bin", "rb"))
print("[+]", len(Graph_set), "domains in the Graph set.\n")
# global graphs. Global_graph_set = {"general": G, "explicit", "critical", "essential"}
Global_graph_set = pickle.load(open("graph_set_global.bin", "rb"))
###### MAIN ######
### Global graph analysis.
# 1. relative density = |E(Global_graph)| / |E(Global_essential)|
Global_essential_edge_count = Global_graph_set["essential"].number_of_edges()
print("[+] Count of edges in Global graph of essential:", Global_essential_edge_count)
for mode in ["general", "explicit", "critical"]:
edge_count = Global_graph_set[mode].number_of_edges()
RelativeDensity = edge_count / Global_essential_edge_count
print("[+] RelativeDensity of", mode, "is", RelativeDensity)
# 2. the indegree of each node (the most depended domains).
# TODO: the distribution of global indegree.
top = 50 # print the top 20 domains.
print("\n[+] The indegree of top", top, "nodes (excluding TLDs):")
# first find the closure of G_critical.
G = nx.transitive_closure(Global_graph_set["critical"])
node_indegree = {}
for node in G.nodes():
node_indegree[node] = G.in_degree(node)
# sort the nodes in G by their indegree.
temp = sorted(node_indegree.items(), key=lambda x: x[1], reverse=True)
counter = 0
for item in temp:
if "." in item[0]:
print("\t", item)
counter += 1
if counter > top:
break
### Individual graph analysis.
# 1. distribution of ExtraSize, MaxExtraDepth, AvgExtraDepth.
has_zn = {} # count of domains that has non-essential dependency (non-empty Zn); has_zn = {"general": count_of_domain, "explicit", "critical"}
avg_zn = {} # the avg length of non-empty Zn; avg_zn = {"general", "explicit", "critical"}
max_extra_depth_under_4 = {}
for domain in Graph_set:
for mode in ["general", "explicit", "critical"]:
if mode not in has_zn:
has_zn[mode] = 0
avg_zn[mode] = 0
max_extra_depth_under_4[mode] = 0
if Graph_set[domain][mode]["extrasize"] > 0:
has_zn[mode] += 1
avg_zn[mode] += Graph_set[domain][mode]["extrasize"]
if Graph_set[domain][mode]["maxextradepth"] < 4:
max_extra_depth_under_4[mode] += 1
# calc average.
for mode in ["general", "explicit", "critical"]:
avg_zn[mode] /= has_zn[mode]
print("\n[+] domains with non-essential dependency: ")
for mode in ["general", "explicit", "critical"]:
print(mode, "count:", has_zn[mode], "pct:", has_zn[mode] / len(Graph_set), "avg:", avg_zn[mode])
print("\n[+] domains with max-extra-depth < 3: ")
for mode in ["general", "explicit", "critical"]:
print(mode, "count:", max_extra_depth_under_4[mode], "pct:", max_extra_depth_under_4[mode] / len(Graph_set))
# 2. relationship between domain rank & |Zn|
# read domain rankings.
domain_list = []
inputf = open(domain_file)
for line in inputf:
domain_list.append(line.split("\t")[0])
inputf.close()
# concentrate magnitude domains in one dot.
magnitude = 10000
# extract and draw |Zn| in order.
x = []
y_general = []
y_explicit = []
y_critical = []
for i in range(0, len(domain_list), magnitude):
x.append(i)
avg_g = 0
avg_e = 0
avg_c = 0
for j in range(i, i + magnitude):
avg_g += Graph_set[domain_list[j]]["general"]["extrasize"]
avg_e += Graph_set[domain_list[j]]["explicit"]["extrasize"]
avg_c += Graph_set[domain_list[j]]["critical"]["extrasize"]
y_general.append(avg_g / float(magnitude))
y_explicit.append(avg_e / float(magnitude))
y_critical.append(avg_c / float(magnitude))
plt.plot(x, y_general, 'o-', color='g', label="G_general")
plt.plot(x, y_explicit, 'o-', color='b', label="G_explicit")
plt.plot(x, y_critical, 'o-', color='r', label="G_critical")
plt.xlabel("Domain ranking")
plt.ylabel("Avg # extra dependency")
plt.legend(loc = "best")
plt.show()
# 3. relationship between |Zn| and TLD.
# avg_zn[mode] = {com: (sum_value, domain_count), net: (sum_value, domain_count), ...}
avg_zn = {}
non_empty_zn = {}
for mode in ["general", "explicit", "critical"]:
avg_zn[mode] = {}
non_empty_zn[mode] = {}
for domain in Graph_set:
# split domains according to TLDs.
tld = domain[domain.rfind(".") + 1:]
if tld not in avg_zn[mode]:
avg_zn[mode][tld] = [0, 0] # [sum_value, domain_count]
non_empty_zn[mode][tld] = [0, 0]
avg_zn[mode][tld][0] += Graph_set[domain][mode]["extrasize"]
avg_zn[mode][tld][1] += 1
# check |Zn| of this domain.
if Graph_set[domain][mode]["extrasize"] > 0:
# Zn is non-empty.
non_empty_zn[mode][tld][0] += 1
non_empty_zn[mode][tld][1] += 1
# draw the results. first the avg |Zn| graph per TLD.
plt.clf()
x = []
y_general = []
y_explicit = []
y_critical = []
# tld_list = ["com", "net", "org", "ru", "de", "uk", "jp", "br", "info", "pl", "cn", "fr", "it", "nl", "au", "in", "es", "eu", "cz", "ca"]
tld_list = ["com", "net", "org", "xyz", "info", "top", "cc", "co", "io", "me", "cn", "tv", "ru", "de", "uk", "jp", "br", "pl", "fr", "eu"]
for tld in tld_list: # avg_zn["general"]:
x.append(tld)
y_general.append(avg_zn["general"][tld][0] / float(avg_zn["general"][tld][1]))
y_explicit.append(avg_zn["explicit"][tld][0] / float(avg_zn["explicit"][tld][1]))
y_critical.append(avg_zn["critical"][tld][0] / float(avg_zn["critical"][tld][1]))
plt.plot(x, y_general, 'o-', color='g', label="G_general")
plt.plot(x, y_explicit, 'o-', color='b', label="G_explicit")
plt.plot(x, y_critical, 'o-', color='r', label="G_critical")
plt.xlabel("TLD")
plt.ylabel("Avg # extra dependency")
plt.legend(loc = "best")
plt.show()
# draw the results. the ratio of domains with non-empty |Zn| graph per TLD.
plt.clf()
x = []
y_general = []
y_explicit = []
y_critical = []
tld_list = ["com", "net", "org", "xyz", "info", "top", "cc", "co", "io", "me", "cn", "tv", "ru", "de", "uk", "jp", "br", "pl", "fr", "eu"]
for tld in tld_list: # avg_zn["general"]:
x.append(tld)
y_general.append(non_empty_zn["general"][tld][0] / float(non_empty_zn["general"][tld][1]))
y_explicit.append(non_empty_zn["explicit"][tld][0] / float(non_empty_zn["explicit"][tld][1]))
y_critical.append(non_empty_zn["critical"][tld][0] / float(non_empty_zn["critical"][tld][1]))
plt.plot(x, y_general, 'o-', color='g', label="G_general")
plt.plot(x, y_explicit, 'o-', color='b', label="G_explicit")
plt.plot(x, y_critical, 'o-', color='r', label="G_critical")
plt.xlabel("TLD")
plt.ylabel("% Domains with non-empty extra dependencies")
plt.legend(loc = "best")
plt.show()
| cess-pro/Domain_Relation | src/sf/3_analyze_dependency.py | 3_analyze_dependency.py | py | 7,500 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pickle.load",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "networkx.transitive_closure",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot... |
26287011843 | from unittest.mock import patch
import pytest
from deboiler import Deboiler
from deboiler.dataset import ListDataset
from deboiler.models.page import ParsedPage, RawPage
@pytest.mark.parametrize("operation_mode", ["memory", "performance"])
def test_pipeline_end_to_end(operation_mode):
# `parse_counter` defined as global, so it can be changed within the `gen_mocked_page` function
global parse_counter
parse_counter = 0
base_url = "http://www.globality.com"
html_content = "<html></html>"
pages_count = 10
def gen_mocked_page():
global parse_counter
parsed_page = ParsedPage(url=f"{base_url}/{parse_counter}", content=html_content)
parse_counter += 1
return parsed_page
with patch.object(RawPage, "parse") as mocked:
mocked.side_effect = gen_mocked_page
dataset = ListDataset(
[
dict(url=f"{base_url}/{n}", status=200, content=html_content)
for n in range(pages_count)
],
content_type_key=None,
)
deboiler = Deboiler(
# Mocking does not work on multi-processing
# To test memory-optimized mode with multi-processing,
# we should rely on manual testing
n_processes=1,
operation_mode=operation_mode,
)
# During fit, each page should be parsed one time
# So, we expect parse_counter == pages_count
assert parse_counter == 0
deboiler.fit(dataset)
assert parse_counter == pages_count
# During transform in memory-optimized mode, each page will be
# parsed again. So we expect parse_counter == pages_count
# For performance-optimized mode, however, parsed pages should be
# cached during fit and reused during transform.
# So, we expect parse_counter == 0
parse_counter = 0
output_pages = list(deboiler.transform(dataset))
assert len(output_pages) == pages_count
assert parse_counter == (operation_mode == "memory") * pages_count
| globality-corp/deboiler | deboiler/tests/test_operation_modes.py | test_operation_modes.py | py | 2,080 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "deboiler.models.page.ParsedPage",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "unittest.mock.patch.object",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "deboiler.models.page.RawPage",
"line_number": 26,
"usage_type": "argument"
}... |
34970482318 | import dbus
import os
import re
import time
import unittest
import six
import sys
import glob
from packaging.version import Version
import udiskstestcase
class UDisksLVMTestBase(udiskstestcase.UdisksTestCase):
@classmethod
def setUpClass(cls):
udiskstestcase.UdisksTestCase.setUpClass()
if not cls.check_module_loaded('lvm2'):
udiskstestcase.UdisksTestCase.tearDownClass()
raise unittest.SkipTest('Udisks module for LVM tests not loaded, skipping.')
@classmethod
def _get_lvm_version(cls):
_ret, out = cls.run_command('lvm version')
m = re.search(r'LVM version:.* ([\d\.]+)', out)
if not m or len(m.groups()) != 1:
raise RuntimeError('Failed to determine LVM version from: %s' % out)
return Version(m.groups()[0])
def _create_vg(self, vgname, devices):
manager = self.get_object('/Manager')
vg_path = manager.VolumeGroupCreate(vgname, devices, self.no_options,
dbus_interface=self.iface_prefix + '.Manager.LVM2')
vg = self.bus.get_object(self.iface_prefix, vg_path)
self.assertIsNotNone(vg)
# this makes sure the object is fully setup (e.g. has the Properties iface)
vgsize = self.get_property(vg, '.VolumeGroup', 'Size')
vgsize.assertGreater(0)
ret, _out = self.run_command('vgs %s' % vgname)
self.assertEqual(ret, 0)
return vg
def _remove_vg(self, vg, tear_down=False, ignore_removed=False):
try:
vgname = self.get_property_raw(vg, '.VolumeGroup', 'Name')
if tear_down:
options = dbus.Dictionary(signature='sv')
options['tear-down'] = dbus.Boolean(True)
else:
options = self.no_options
vg.Delete(True, options, dbus_interface=self.iface_prefix + '.VolumeGroup')
ret, _out = self.run_command('vgs %s' % vgname)
self.assertNotEqual(ret, 0)
except dbus.exceptions.DBusException as e:
if not ignore_removed:
raise e
class UdisksLVMTest(UDisksLVMTestBase):
'''This is a basic LVM test suite'''
def _rescan_lio_devices(self):
''' Bring back all vdevs that have been deleted by the test '''
ret, out = self.run_command("for f in $(find /sys/devices -path '*tcm_loop*/scan'); do echo '- - -' >$f; done")
if ret != 0:
raise RuntimeError("Cannot rescan vdevs: %s", out)
self.udev_settle()
# device names might have changed, need to find our vdevs again
tcmdevs = glob.glob('/sys/devices/*tcm_loop*/tcm_loop_adapter_*/*/*/*/block/sd*')
udiskstestcase.test_devs = self.vdevs = ['/dev/%s' % os.path.basename(p) for p in tcmdevs]
for d in self.vdevs:
obj = self.get_object('/block_devices/' + os.path.basename(d))
self.assertHasIface(obj, self.iface_prefix + '.Block')
def test_01_manager_interface(self):
'''Test for module D-Bus Manager interface presence'''
manager = self.get_object('/Manager')
intro_data = manager.Introspect(self.no_options, dbus_interface='org.freedesktop.DBus.Introspectable')
self.assertIn('interface name="%s.Manager.LVM2"' % self.iface_prefix, intro_data)
def test_10_linear(self):
'''Test linear (plain) LV functionality'''
vgname = 'udisks_test_vg'
# Use all the virtual devices but the last one
devs = dbus.Array()
for d in self.vdevs[:-1]:
dev_obj = self.get_object('/block_devices/' + os.path.basename(d))
self.assertIsNotNone(dev_obj)
devs.append(dev_obj)
vg = self._create_vg(vgname, devs)
self.addCleanup(self._remove_vg, vg)
dbus_vgname = self.get_property(vg, '.VolumeGroup', 'Name')
dbus_vgname.assertEqual(vgname)
# Create linear LV on the VG
_ret, sys_vgsize = self.run_command('vgs -o size --noheadings --units=b --nosuffix %s' % vgname)
vgsize = self.get_property(vg, '.VolumeGroup', 'Size')
vgsize.assertEqual(int(sys_vgsize))
_ret, sys_vgfree = self.run_command('vgs -o vg_free --noheadings --units=b --nosuffix %s' % vgname)
vg_freesize = self.get_property(vg, '.VolumeGroup', 'FreeSize')
vg_freesize.assertEqual(int(sys_vgfree))
vg_freesize.assertEqual(vgsize.value)
lvname = 'udisks_test_lv'
lv_path = vg.CreatePlainVolume(lvname, dbus.UInt64(vgsize.value), self.no_options,
dbus_interface=self.iface_prefix + '.VolumeGroup')
self.assertIsNotNone(lv_path)
ret, _out = self.run_command('lvs %s' % os.path.join(vgname, lvname))
self.assertEqual(ret, 0)
lv = self.bus.get_object(self.iface_prefix, lv_path)
lv_block_path = lv.Activate(self.no_options, dbus_interface=self.iface_prefix + '.LogicalVolume')
self.assertIsNotNone(lv_block_path)
lvsize = self.get_property(lv, '.LogicalVolume', 'Size')
lvsize.assertEqual(vgsize.value)
# check some dbus properties
dbus_vg = self.get_property(lv, '.LogicalVolume', 'VolumeGroup')
dbus_vg.assertEqual(str(vg.object_path))
dbus_name = self.get_property(lv, '.LogicalVolume', 'Name')
dbus_name.assertEqual(lvname)
dbus_active = self.get_property(lv, '.LogicalVolume', 'Active')
dbus_active.assertTrue()
dbus_type = self.get_property(lv, '.LogicalVolume', 'Type')
dbus_type.assertEqual('block') # type is only 'block' or 'pool'
dbus_layout = self.get_property(lv, '.LogicalVolume', 'Layout')
dbus_layout.assertEqual('linear')
def assertSegs(pvs):
# Check that there is exactly one segment per PV
struct = self.get_property(lv, '.LogicalVolume', 'Structure').value
self.assertEqual(struct["type"], "linear")
self.assertNotIn("data", struct)
self.assertNotIn("metadata", struct)
segs = struct["segments"]
self.assertEqual(len(segs), len(pvs))
seg_pvs = list(map(lambda s: s[2], segs))
for p in pvs:
self.assertIn(p.object_path, seg_pvs)
assertSegs(devs)
_ret, sys_uuid = self.run_command('lvs -o uuid --no-heading %s' % os.path.join(vgname, lvname))
dbus_uuid = self.get_property(lv, '.LogicalVolume', 'UUID')
dbus_uuid.assertEqual(sys_uuid)
# check that the 'BlockDevice' property is set after Activate
lv_prop_block = self.get_property(lv, '.LogicalVolume', 'BlockDevice')
lv_prop_block.assertEqual(lv_block_path)
# Shrink the LV
lv.Resize(dbus.UInt64(lvsize.value/2), self.no_options, dbus_interface=self.iface_prefix + '.LogicalVolume')
lv_block = self.bus.get_object(self.iface_prefix, lv_block_path)
self.assertIsNotNone(lv_block)
new_lvsize = self.get_property(lv, '.LogicalVolume', 'Size')
new_lvsize.assertLess(lvsize.value)
# Add one more device to the VG
new_dev_obj = self.get_object('/block_devices/' + os.path.basename(self.vdevs[-1]))
self.assertIsNotNone(new_dev_obj)
vg.AddDevice(new_dev_obj, self.no_options, dbus_interface=self.iface_prefix + '.VolumeGroup')
new_vgsize = self.get_property(vg, '.VolumeGroup', 'Size')
new_vgsize.assertGreater(vgsize.value)
# Attempt to resize the LV to the whole VG, but specify only
# the original PVS. This is expected to fail.
msg = "Insufficient free space"
with six.assertRaisesRegex(self, dbus.exceptions.DBusException, msg):
lv.Resize(dbus.UInt64(new_vgsize.value),
dbus.Dictionary({'pvs': devs}, signature='sv'),
dbus_interface=self.iface_prefix + '.LogicalVolume')
# Now resize the LV to the whole VG without contraints
lv.Resize(dbus.UInt64(new_vgsize.value), self.no_options, dbus_interface=self.iface_prefix + '.LogicalVolume')
new_lvsize = self.get_property(lv, '.LogicalVolume', 'Size')
new_lvsize.assertEqual(new_vgsize.value)
assertSegs(devs + [ new_dev_obj ])
# rename the LV
lvname = 'udisks_test_lv2'
new_lvpath = lv.Rename(lvname, self.no_options, dbus_interface=self.iface_prefix + '.LogicalVolume')
# get the new (renamed) lv object
lv = self.bus.get_object(self.iface_prefix, new_lvpath)
self.assertIsNotNone(lv)
dbus_name = self.get_property(lv, '.LogicalVolume', 'Name')
dbus_name.assertEqual(lvname)
ret, _out = self.run_command('lvs %s' % os.path.join(vgname, lvname))
self.assertEqual(ret, 0)
# deactivate/activate check
dbus_prop_active = self.get_property(lv, '.LogicalVolume', 'Active')
dbus_prop_active.assertTrue()
ret, _out = self.run_command('lvchange %s --activate n' % os.path.join(vgname, lvname))
self.assertEqual(ret, 0)
time.sleep(3)
dbus_prop_active = self.get_property(lv, '.LogicalVolume', 'Active')
dbus_prop_active.assertFalse()
ret, _out = self.run_command('lvchange %s --activate y' % os.path.join(vgname, lvname))
self.assertEqual(ret, 0)
time.sleep(3)
dbus_prop_active = self.get_property(lv, '.LogicalVolume', 'Active')
dbus_prop_active.assertTrue()
lv.Deactivate(self.no_options, dbus_interface=self.iface_prefix + '.LogicalVolume')
dbus_prop_active = self.get_property(lv, '.LogicalVolume', 'Active')
dbus_prop_active.assertFalse()
# lvremove
lv.Delete(self.no_options, dbus_interface=self.iface_prefix + '.LogicalVolume')
ret, _out = self.run_command('lvs %s' % os.path.join(vgname, lvname))
self.assertNotEqual(ret, 0)
# make sure the lv is not on dbus
udisks = self.get_object('')
objects = udisks.GetManagedObjects(dbus_interface='org.freedesktop.DBus.ObjectManager')
self.assertNotIn(new_lvpath, objects.keys())
@udiskstestcase.tag_test(udiskstestcase.TestTags.UNSTABLE)
def test_15_raid(self):
'''Test raid volumes functionality'''
vgname = 'udisks_test_vg'
# Use all the virtual devices
devs = dbus.Array()
for d in self.vdevs:
dev_obj = self.get_object('/block_devices/' + os.path.basename(d))
self.assertIsNotNone(dev_obj)
devs.append(dev_obj)
self.addCleanup(self.wipe_fs, d)
vg = self._create_vg(vgname, devs)
self.addCleanup(self._remove_vg, vg)
dbus_vgname = self.get_property(vg, '.VolumeGroup', 'Name')
dbus_vgname.assertEqual(vgname)
first_vdev_uuid = self.get_property(devs[0], '.Block', 'IdUUID').value
# Create raid1 LV on the VG
lvname = 'udisks_test_lv'
vg_freesize = self.get_property(vg, '.VolumeGroup', 'FreeSize')
vdev_size = vg_freesize.value / len(devs)
lv_size = int(vdev_size * 0.75)
lv_path = vg.CreatePlainVolumeWithLayout(lvname, dbus.UInt64(lv_size),
"raid1", devs[0:3],
self.no_options,
dbus_interface=self.iface_prefix + '.VolumeGroup')
self.assertIsNotNone(lv_path)
lv = self.bus.get_object(self.iface_prefix, lv_path)
self.get_property(lv, '.LogicalVolume', 'SyncRatio').assertEqual(1.0, timeout=60, poll_vg=vg)
_ret, sys_type = self.run_command('lvs -o seg_type --noheadings --nosuffix %s/%s' % (vgname, lvname))
self.assertEqual(sys_type, "raid1")
self.get_property(lv, '.LogicalVolume', 'Layout').assertEqual("raid1")
def assertSegs(struct, size, pv):
self.assertEqual(struct["type"], "linear")
self.assertNotIn("data", struct)
self.assertNotIn("metadata", struct)
if pv is not None:
self.assertEqual(len(struct["segments"]), 1)
if size is not None:
self.assertEqual(struct["segments"][0][1], size)
self.assertEqual(struct["segments"][0][2], pv.object_path)
else:
self.assertEqual(len(struct["segments"]), 0)
def assertRaid1Stripes(structs, size, pv1, pv2, pv3):
self.assertEqual(len(structs), 3)
assertSegs(structs[0], size, pv1)
assertSegs(structs[1], size, pv2)
assertSegs(structs[2], size, pv3)
def assertRaid1Structure(pv1, pv2, pv3):
struct = self.get_property(lv, '.LogicalVolume', 'Structure').value
self.assertEqual(struct["type"], "raid1")
self.assertEqual(struct["size"], lv_size)
self.assertNotIn("segments", struct)
assertRaid1Stripes(struct["data"], lv_size, pv1, pv2, pv3)
assertRaid1Stripes(struct["metadata"], None, pv1, pv2, pv3)
def waitRaid1Structure(pv1, pv2, pv3):
for _ in range(5):
try:
assertRaid1Structure(pv1, pv2, pv3)
return
except AssertionError:
pass
time.sleep(1)
# Once again for the error message
assertRaid1Structure(pv1, pv2, pv3)
waitRaid1Structure(devs[0], devs[1], devs[2])
# Yank out the first vdev and repair the LV with the fourth
_ret, _output = self.run_command('echo yes >/sys/block/%s/device/delete' % os.path.basename(self.vdevs[0]))
self.addCleanup(self._rescan_lio_devices)
# give udisks some time to register the change
self.run_command('udevadm trigger %s' % self.vdevs[0])
self.udev_settle()
_ret, sys_health = self.run_command('lvs -o health_status --noheadings --nosuffix %s/%s' % (vgname, lvname))
self.assertEqual(sys_health, "partial")
waitRaid1Structure(None, devs[1], devs[2])
self.get_property(vg, '.VolumeGroup', 'MissingPhysicalVolumes').assertEqual([first_vdev_uuid])
lv.Repair(devs[3:4], self.no_options,
dbus_interface=self.iface_prefix + '.LogicalVolume')
_ret, sys_health = self.run_command('lvs -o health_status --noheadings --nosuffix %s/%s' % (vgname, lvname))
self.assertEqual(sys_health, "")
self.get_property(lv, '.LogicalVolume', 'SyncRatio').assertEqual(1.0, timeout=60, poll_vg=vg)
waitRaid1Structure(devs[3], devs[1], devs[2])
# Tell the VG that everything is alright
vg.RemoveMissingPhysicalVolumes(self.no_options,
dbus_interface=self.iface_prefix + '.VolumeGroup')
self.get_property(vg, '.VolumeGroup', 'MissingPhysicalVolumes').assertEqual([])
def test_20_thin(self):
'''Test thin volumes functionality'''
vgname = 'udisks_test_thin_vg'
# Use all the virtual devices
devs = dbus.Array()
for d in self.vdevs:
dev_obj = self.get_object('/block_devices/' + os.path.basename(d))
self.assertIsNotNone(dev_obj)
devs.append(dev_obj)
vg = self._create_vg(vgname, devs)
self.addCleanup(self._remove_vg, vg)
# Create thin pool on the VG
vgsize = int(self.get_property_raw(vg, '.VolumeGroup', 'FreeSize'))
tpname = 'udisks_test_tp'
tp_path = vg.CreateThinPoolVolume(tpname, dbus.UInt64(vgsize), self.no_options,
dbus_interface=self.iface_prefix + '.VolumeGroup')
self.assertIsNotNone(tp_path)
ret, _out = self.run_command('lvs %s' % os.path.join(vgname, tpname))
self.assertEqual(ret, 0)
tp = self.bus.get_object(self.iface_prefix, tp_path)
tpsize = self.get_property(tp, '.LogicalVolume', 'Size')
# check that we report same size as lvs (udisks includes metadata so we need to add it too)
_ret, dsize = self.run_command('lvs -olv_size --noheadings --units=b --nosuffix %s' % os.path.join(vgname, tpname))
_ret, msize = self.run_command('lvs -olv_metadata_size --noheadings --units=b --nosuffix %s' % os.path.join(vgname, tpname))
tpsize.assertEqual(int(dsize.strip()) + int(msize.strip()))
dbus_type = self.get_property(tp, '.LogicalVolume', 'Type')
dbus_type.assertEqual("pool")
# Create thin volume in the pool with virtual size twice the backing pool
tvname = 'udisks_test_tv'
tv_path = vg.CreateThinVolume(tvname, dbus.UInt64(int(tpsize.value) * 2), tp, self.no_options,
dbus_interface=self.iface_prefix + '.VolumeGroup')
tv = self.bus.get_object(self.iface_prefix, tv_path)
self.assertIsNotNone(tv)
ret, _out = self.run_command('lvs %s' % os.path.join(vgname, tvname))
self.assertEqual(ret, 0)
# Check the block device of the thin volume
lv_block_path = tv.Activate(self.no_options, dbus_interface=self.iface_prefix + '.LogicalVolume')
lv_block = self.bus.get_object(self.iface_prefix, lv_block_path)
self.assertIsNotNone(lv_block)
blocksize = self.get_property(lv_block, '.Block', 'Size')
blocksize.assertGreater(vgsize)
tv_tp = self.get_property(tv, '.LogicalVolume', 'ThinPool')
tv_tp.assertEqual(tp_path)
def test_30_snapshot(self):
'''Test LVM snapshoting'''
vgname = 'udisks_test_snap_vg'
# Use all the virtual devices
devs = dbus.Array()
for d in self.vdevs:
dev_obj = self.get_object('/block_devices/' + os.path.basename(d))
self.assertIsNotNone(dev_obj)
devs.append(dev_obj)
vg = self._create_vg(vgname, devs)
self.addCleanup(self._remove_vg, vg)
# Create the origin LV
vgsize = int(self.get_property_raw(vg, '.VolumeGroup', 'FreeSize'))
lvname = 'udisks_test_origin_lv'
lv_path = vg.CreatePlainVolume(lvname, dbus.UInt64(vgsize / 2), self.no_options,
dbus_interface=self.iface_prefix + '.VolumeGroup')
lv = self.bus.get_object(self.iface_prefix, lv_path)
self.assertIsNotNone(lv)
ret, _out = self.run_command('lvs %s' % os.path.join(vgname, lvname))
self.assertEqual(ret, 0)
# Create the LV's snapshot
snapname = 'udisks_test_snap_lv'
vg_freesize = int(self.get_property_raw(vg, '.VolumeGroup', 'FreeSize'))
snap_path = lv.CreateSnapshot(snapname, vg_freesize, self.no_options,
dbus_interface=self.iface_prefix + '.LogicalVolume')
snap = self.bus.get_object(self.iface_prefix, snap_path)
self.assertIsNotNone(snap)
# check dbus properties
dbus_origin = self.get_property(snap, '.LogicalVolume', 'Origin')
dbus_origin.assertEqual(lv_path)
dbus_name = self.get_property(snap, '.LogicalVolume', 'Name')
dbus_name.assertEqual(snapname)
ret, _out = self.run_command('lvs %s' % os.path.join(vgname, snapname))
self.assertEqual(ret, 0)
def test_40_cache(self):
'''Basic LVM cache test'''
vgname = 'udisks_test_cache_vg'
# Use all the virtual devices
devs = dbus.Array()
for d in self.vdevs:
dev_obj = self.get_object('/block_devices/' + os.path.basename(d))
self.assertIsNotNone(dev_obj)
devs.append(dev_obj)
vg = self._create_vg(vgname, devs)
self.addCleanup(self._remove_vg, vg)
# Create the origin LV
vgsize = int(self.get_property_raw(vg, '.VolumeGroup', 'FreeSize'))
orig_lvname = 'udisks_test_origin_lv'
lv_path = vg.CreatePlainVolume(orig_lvname, dbus.UInt64(vgsize / 2), self.no_options,
dbus_interface=self.iface_prefix + '.VolumeGroup')
lv = self.bus.get_object(self.iface_prefix, lv_path)
self.assertIsNotNone(lv)
ret, _out = self.run_command('lvs %s' % os.path.join(vgname, orig_lvname))
self.assertEqual(ret, 0)
# Create the caching LV
cache_lvname = 'udisks_test_cache_lv'
vgsize = int(self.get_property_raw(vg, '.VolumeGroup', 'FreeSize'))
# 8 MiB reserved for the cache metadata created automatically by LVM
lv_cache_path = vg.CreatePlainVolume(cache_lvname, dbus.UInt64((vgsize / 2) - 8 * 1024**2), self.no_options,
dbus_interface=self.iface_prefix + '.VolumeGroup')
cache_lv = self.bus.get_object(self.iface_prefix, lv_cache_path)
self.assertIsNotNone(cache_lv)
# Add the cache to the origin
lv.CacheAttach('udisks_test_cache_lv', self.no_options, dbus_interface=self.iface_prefix + '.LogicalVolume')
_ret, out = self.run_command('lvs %s/%s --noheadings -o segtype' % (vgname, orig_lvname))
self.assertEqual(out, 'cache')
# Split the cache
lv.CacheSplit(self.no_options, dbus_interface=self.iface_prefix + '.LogicalVolume')
_ret, out = self.run_command('lvs %s/%s --noheadings -o lv_layout' % (vgname, orig_lvname))
self.assertEqual(out, 'linear')
_ret, out = self.run_command('lvs %s/%s --noheadings -o lv_layout' % (vgname, cache_lvname))
self.assertEqual(out, 'cache,pool')
def test_50_rename_vg(self):
''' Test VG renaming '''
vgname = 'udisks_test_rename_vg'
# Use all the virtual devices
devs = dbus.Array()
for d in self.vdevs:
dev_obj = self.get_object('/block_devices/' + os.path.basename(d))
self.assertIsNotNone(dev_obj)
devs.append(dev_obj)
vg = self._create_vg(vgname, devs)
vgname = 'udisks_test_rename_vg2'
new_vgpath = vg.Rename(vgname, self.no_options, dbus_interface=self.iface_prefix + '.VolumeGroup')
# get the new (renamed) lv object
vg = self.bus.get_object(self.iface_prefix, new_vgpath)
self.assertIsNotNone(vg)
self.addCleanup(self._remove_vg, vg)
dbus_name = self.get_property(vg, '.VolumeGroup', 'Name')
dbus_name.assertEqual(vgname)
ret, _out = self.run_command('vgs %s' % vgname)
self.assertEqual(ret, 0)
def test_60_pvs(self):
''' Test adding and removing PVs from VG '''
vgname = 'udisks_test_pv_vg'
# create vg with one pv
old_pv = self.get_object('/block_devices/' + os.path.basename(self.vdevs[0]))
self.assertIsNotNone(old_pv)
vg = self._create_vg(vgname, dbus.Array([old_pv]))
self.addCleanup(self._remove_vg, vg)
# create an lv on it
lvname = 'udisks_test_lv'
lv_path = vg.CreatePlainVolume(lvname, dbus.UInt64(4 * 1024**2), self.no_options,
dbus_interface=self.iface_prefix + '.VolumeGroup')
lv = self.bus.get_object(self.iface_prefix, lv_path)
self.assertIsNotNone(lv)
# add a new pv to the vg
new_pv = self.get_object('/block_devices/' + os.path.basename(self.vdevs[1]))
vg.AddDevice(new_pv, self.no_options, dbus_interface=self.iface_prefix + '.VolumeGroup')
_ret, out = self.run_command('pvs --noheadings -o vg_name %s' % self.vdevs[1])
self.assertEqual(out, vgname)
# empty the old pv
vg.EmptyDevice(old_pv, self.no_options, dbus_interface=self.iface_prefix + '.VolumeGroup', timeout=120 * 100)
_ret, pv_size = self.run_command('pvs --noheadings --units=B --nosuffix -o pv_size %s' % self.vdevs[0])
_ret, pv_free = self.run_command('pvs --noheadings --units=B --nosuffix -o pv_free %s' % self.vdevs[0])
self.assertEqual(pv_size, pv_free)
# remove the old pv from the vg
vg.RemoveDevice(old_pv, False, self.no_options, dbus_interface=self.iface_prefix + '.VolumeGroup')
_ret, out = self.run_command('pvs --noheadings -o vg_name %s' % self.vdevs[0])
self.assertEqual(out, '')
class UdisksLVMVDOTest(UDisksLVMTestBase):
'''This is a basic LVM VDO test suite'''
LOOP_DEVICE_PATH = '/var/tmp/udisks_test_disk_lvmvdo'
@classmethod
def setUpClass(cls):
UDisksLVMTestBase.setUpClass()
if not cls.module_available("kvdo"):
udiskstestcase.UdisksTestCase.tearDownClass()
raise unittest.SkipTest('VDO kernel module not available, skipping.')
lvm_version = cls._get_lvm_version()
if lvm_version < Version('2.3.07'):
udiskstestcase.UdisksTestCase.tearDownClass()
raise unittest.SkipTest('LVM >= 2.3.07 is needed for LVM VDO, skipping.')
def setUp(self):
# create backing sparse file
# VDO needs at least 5G of space and we need some room for the grow test
# ...rumors go that vdo internally operates on 2G extents...
self.run_command('truncate -s 8G %s' % self.LOOP_DEVICE_PATH)
ret_code, self.dev_name = self.run_command('losetup --find --show %s' % self.LOOP_DEVICE_PATH)
self.assertEqual(ret_code, 0)
time.sleep(0.5)
self.device = self.get_device(self.dev_name)
self.assertIsNotNone(self.device)
super(UdisksLVMVDOTest, self).setUp()
def tearDown(self):
# need to process scheduled cleanup before the backing device is torn down
self.doCleanups()
# tear down loop device
self.run_command('losetup --detach %s' % self.dev_name)
os.remove(self.LOOP_DEVICE_PATH)
super(UdisksLVMVDOTest, self).tearDown()
def test_create(self):
vgname = 'udisks_test_vdo_vg'
# create vg on our testing device
vg = self._create_vg(vgname, [self.device])
self.addCleanup(self._remove_vg, vg)
vg_free = self.get_property(vg, '.VolumeGroup', 'FreeSize')
vg_free.assertGreater(0)
lv_name = 'udisks_test_vdovlv'
pool_name = 'udisks_test_vdopool'
psize = vg_free.value
vsize = psize * 5
lv_path = vg.CreateVDOVolume(lv_name, pool_name, dbus.UInt64(psize), dbus.UInt64(vsize),
dbus.UInt64(0), True, True, "auto", self.no_options,
dbus_interface=self.iface_prefix + '.VolumeGroup')
self.assertIsNotNone(lv_path)
ret, _out = self.run_command('lvs %s' % os.path.join(vgname, lv_name))
self.assertEqual(ret, 0)
lv = self.bus.get_object(self.iface_prefix, lv_path)
self.assertIsNotNone(lv)
self.assertHasIface(lv, self.iface_prefix + '.VDOVolume')
dbus_name = self.get_property(lv, '.LogicalVolume', 'Name')
dbus_name.assertEqual(lv_name)
# lv size -> original 'virtual' size
dbus_size = self.get_property(lv, '.LogicalVolume', 'Size')
dbus_size.assertEqual(vsize)
# VDO pool properties
pool_path = self.get_property(lv, '.VDOVolume', 'VDOPool')
pool_path.assertNotEqual('/')
pool = self.bus.get_object(self.iface_prefix, pool_path.value)
self.assertIsNotNone(pool)
dbus_name = self.get_property(pool, '.LogicalVolume', 'Name')
dbus_name.assertEqual(pool_name)
# pool size -> original 'physical' size
dbus_size = self.get_property(pool, '.LogicalVolume', 'Size')
dbus_size.assertEqual(psize)
dbus_type = self.get_property(lv, '.LogicalVolume', 'Type')
dbus_type.assertNotEqual('vdopool')
# VDO properties
dbus_comp = self.get_property(lv, '.VDOVolume', 'Compression')
dbus_comp.assertTrue()
dbus_dedup = self.get_property(lv, '.VDOVolume', 'Deduplication')
dbus_dedup.assertTrue()
# ThinPool property should not be set
dbus_tp = self.get_property(lv, '.LogicalVolume', 'ThinPool')
dbus_tp.assertEqual('/')
# get statistics and do some simple sanity check
stats = lv.GetStatistics(self.no_options, dbus_interface=self.iface_prefix + '.VDOVolume')
self.assertIn("writeAmplificationRatio", stats.keys())
def test_enable_disable_compression_deduplication(self):
vgname = 'udisks_test_vdo_vg'
# create vg on our testing device
vg = self._create_vg(vgname, [self.device])
self.addCleanup(self._remove_vg, vg)
vg_free = self.get_property(vg, '.VolumeGroup', 'FreeSize')
vg_free.assertGreater(0)
lv_name = 'udisks_test_vdovlv'
pool_name = 'udisks_test_vdopool'
psize = vg_free.value
vsize = psize * 5
lv_path = vg.CreateVDOVolume(lv_name, pool_name, dbus.UInt64(psize), dbus.UInt64(vsize),
dbus.UInt64(0), True, True, "auto", self.no_options,
dbus_interface=self.iface_prefix + '.VolumeGroup')
self.assertIsNotNone(lv_path)
lv = self.bus.get_object(self.iface_prefix, lv_path)
self.assertIsNotNone(lv)
# initial state: both compression and deduplication should be enabled
dbus_comp = self.get_property(lv, '.VDOVolume', 'Compression')
dbus_comp.assertTrue()
dbus_dedup = self.get_property(lv, '.VDOVolume', 'Deduplication')
dbus_dedup.assertTrue()
# disable deduplication
lv.EnableDeduplication(False, self.no_options,
dbus_interface=self.iface_prefix + '.VDOVolume')
dbus_dedup = self.get_property(lv, '.VDOVolume', 'Deduplication')
dbus_dedup.assertFalse()
# disable compression
lv.EnableCompression(False, self.no_options,
dbus_interface=self.iface_prefix + '.VDOVolume')
dbus_comp = self.get_property(lv, '.VDOVolume', 'Compression')
dbus_comp.assertFalse()
# enable both again
lv.EnableDeduplication(True, self.no_options,
dbus_interface=self.iface_prefix + '.VDOVolume')
dbus_dedup = self.get_property(lv, '.VDOVolume', 'Deduplication')
dbus_dedup.assertTrue()
# disable compression
lv.EnableCompression(True, self.no_options,
dbus_interface=self.iface_prefix + '.VDOVolume')
dbus_comp = self.get_property(lv, '.VDOVolume', 'Compression')
dbus_comp.assertTrue()
def test_resize_logical(self):
vgname = 'udisks_test_vdo_vg'
# create vg on our testing device
vg = self._create_vg(vgname, [self.device])
self.addCleanup(self._remove_vg, vg)
vg_free = self.get_property(vg, '.VolumeGroup', 'FreeSize')
vg_free.assertGreater(0)
lv_name = 'udisks_test_vdovlv'
pool_name = 'udisks_test_vdopool'
psize = vg_free.value
vsize = psize * 2
lv_path = vg.CreateVDOVolume(lv_name, pool_name, dbus.UInt64(psize), dbus.UInt64(vsize),
dbus.UInt64(0), True, True, "auto", self.no_options,
dbus_interface=self.iface_prefix + '.VolumeGroup')
self.assertIsNotNone(lv_path)
lv = self.bus.get_object(self.iface_prefix, lv_path)
self.assertIsNotNone(lv)
lv.ResizeLogical(vsize * 5, self.no_options,
dbus_interface=self.iface_prefix + '.VDOVolume')
dbus_size = self.get_property(lv, '.LogicalVolume', 'Size')
dbus_size.assertEqual(vsize * 5)
@udiskstestcase.tag_test(udiskstestcase.TestTags.UNSTABLE)
def test_resize_physical(self):
vgname = 'udisks_test_vdo_vg'
# create vg on our testing device
vg = self._create_vg(vgname, [self.device])
self.addCleanup(self._remove_vg, vg)
vg_free = self.get_property(vg, '.VolumeGroup', 'FreeSize')
vg_free.assertGreater(2 * 1024**3)
lv_name = 'udisks_test_vdovlv'
pool_name = 'udisks_test_vdopool'
psize = vg_free.value - 2 * 1024**3
vsize = psize * 5
lv_path = vg.CreateVDOVolume(lv_name, pool_name, dbus.UInt64(psize), dbus.UInt64(vsize),
dbus.UInt64(0), True, True, "auto", self.no_options,
dbus_interface=self.iface_prefix + '.VolumeGroup')
self.assertIsNotNone(lv_path)
lv = self.bus.get_object(self.iface_prefix, lv_path)
self.assertIsNotNone(lv)
lv.ResizePhysical(vg_free.value, self.no_options,
dbus_interface=self.iface_prefix + '.VDOVolume')
pool_path = self.get_property(lv, '.VDOVolume', 'VDOPool')
pool_path.assertNotEqual('/')
pool = self.bus.get_object(self.iface_prefix, pool_path.value)
self.assertIsNotNone(pool)
dbus_size = self.get_property(pool, '.LogicalVolume', 'Size')
dbus_size.assertEqual(vg_free.value)
class UdisksLVMTeardownTest(UDisksLVMTestBase):
'''Stacked LVM + LUKS automatic teardown tests'''
PASSPHRASE = 'einszweidrei'
def setUp(self):
super(UdisksLVMTeardownTest, self).setUp()
def tearDown(self):
self.doCleanups()
super(UdisksLVMTeardownTest, self).tearDown()
def _remove_luks(self, device, name, close=True):
if close:
try:
self.remove_file('/etc/luks-keys/%s' % name, ignore_nonexistent=True)
device.Lock(self.no_options, dbus_interface=self.iface_prefix + '.Encrypted')
except dbus.exceptions.DBusException as e:
# ignore when luks is actually already locked
if not str(e).endswith('is not unlocked') and not 'No such interface' in str(e) and \
not 'Object does not exist at path' in str(e):
raise e
try:
d = dbus.Dictionary(signature='sv')
d['erase'] = True
device.Format('empty', d, dbus_interface=self.iface_prefix + '.Block')
except dbus.exceptions.DBusException as e:
if not 'No such interface' in str(e) and not 'Object does not exist at path' in str(e):
raise e
def _init_stack(self, name):
vgname = name + '_vg'
lvname = name + '_lv'
# backup and restore
crypttab = self.read_file('/etc/crypttab')
self.addCleanup(self.write_file, '/etc/crypttab', crypttab)
fstab = self.read_file('/etc/fstab')
self.addCleanup(self.write_file, '/etc/fstab', fstab)
# create VG with one PV
self.pv = self.get_object('/block_devices/' + os.path.basename(self.vdevs[0]))
self.assertIsNotNone(self.pv)
self.vg = self._create_vg(vgname, dbus.Array([self.pv]))
self.vg_path = self.vg.object_path
self.addCleanup(self._remove_vg, self.vg, tear_down=True, ignore_removed=True)
# create an LV on it
self.lv_path = self.vg.CreatePlainVolume(lvname, dbus.UInt64(200 * 1024**2), self.no_options,
dbus_interface=self.iface_prefix + '.VolumeGroup')
self.lv = self.bus.get_object(self.iface_prefix, self.lv_path)
self.assertIsNotNone(self.lv)
self.lv_block_path = self.lv.Activate(self.no_options, dbus_interface=self.iface_prefix + '.LogicalVolume')
self.assertIsNotNone(self.lv_block_path)
self.lv_block = self.get_object(self.lv_block_path)
self.assertIsNotNone(self.lv_block)
# create LUKS on the LV
options = dbus.Dictionary(signature='sv')
options['encrypt.type'] = 'luks2'
options['encrypt.passphrase'] = self.PASSPHRASE
options['label'] = 'COCKPITFS'
options['tear-down'] = dbus.Boolean(True)
crypttab_items = dbus.Dictionary({'name': self.str_to_ay(vgname),
'options': self.str_to_ay('verify,discard'),
'passphrase-contents': self.str_to_ay(self.PASSPHRASE),
'track-parents': True},
signature=dbus.Signature('sv'))
fstab_items = dbus.Dictionary({'dir': self.str_to_ay(vgname),
'type': self.str_to_ay('ext4'),
'opts': self.str_to_ay('defaults'),
'freq': 0, 'passno': 0,
'track-parents': True},
signature=dbus.Signature('sv'))
options['config-items'] = dbus.Array([('crypttab', crypttab_items),
('fstab', fstab_items)])
self.lv_block.Format('ext4', options, dbus_interface=self.iface_prefix + '.Block')
self.addCleanup(self._remove_luks, self.lv_block, vgname)
self.luks_uuid = self.get_property_raw(self.lv_block, '.Block', 'IdUUID')
self.luks_block_path = self.get_property_raw(self.lv_block, '.Encrypted', 'CleartextDevice')
luks_block = self.get_object(self.luks_block_path)
self.assertIsNotNone(luks_block)
self.fs_uuid = self.get_property_raw(luks_block, '.Block', 'IdUUID')
# check for present crypttab configuration item
conf = self.get_property(self.lv_block, '.Block', 'Configuration')
conf.assertTrue()
self.assertEqual(conf.value[0][0], 'crypttab')
# check for present fstab configuration item on a cleartext block device
conf = self.get_property(luks_block, '.Block', 'Configuration')
conf.assertTrue()
self.assertEqual(conf.value[0][0], 'fstab')
child_conf = self.get_property(self.lv_block, '.Encrypted', 'ChildConfiguration')
child_conf.assertTrue()
self.assertEqual(child_conf.value[0][0], 'fstab')
self.assertEqual(child_conf.value, conf.value)
# check that fstab and crypttab records have been added
crypttab = self.read_file('/etc/crypttab')
self.assertIn(vgname, crypttab)
self.assertIn(self.luks_uuid, crypttab)
fstab = self.read_file('/etc/fstab')
self.assertIn(vgname, fstab)
self.assertIn(self.fs_uuid, fstab)
def _check_torn_down_stack(self, name):
# check that all created objects don't exist anymore
msg = r'Object does not exist at path|No such interface'
with six.assertRaisesRegex(self, dbus.exceptions.DBusException, msg):
luks_block = self.get_object(self.luks_block_path)
self.get_property_raw(luks_block, '.Block', 'DeviceNumber')
with six.assertRaisesRegex(self, dbus.exceptions.DBusException, msg):
lv_block = self.get_object(self.lv_block_path)
self.get_property_raw(lv_block, '.Block', 'DeviceNumber')
with six.assertRaisesRegex(self, dbus.exceptions.DBusException, msg):
# the lvm2 udisks module is not fully synchronous, see https://github.com/storaged-project/udisks/pull/814
time.sleep(2)
lv = self.get_object(self.lv_path)
self.get_property_raw(lv, '.LogicalVolume', 'Name')
with six.assertRaisesRegex(self, dbus.exceptions.DBusException, msg):
vg = self.get_object(self.vg_path)
self.get_property_raw(vg, '.VolumeGroup', 'Name')
# check that fstab and crypttab records have been removed
crypttab = self.read_file('/etc/crypttab')
self.assertNotIn(name, crypttab)
self.assertNotIn(self.luks_uuid, crypttab)
fstab = self.read_file('/etc/fstab')
self.assertNotIn(name, fstab)
self.assertNotIn(self.fs_uuid, fstab)
@udiskstestcase.tag_test(udiskstestcase.TestTags.UNSAFE)
def test_teardown_active_vg_unlocked(self):
''' Test tear-down by removing the base VG (not deactivated, unlocked) '''
name = 'udisks_test_teardown_active_vg_unlocked'
self._init_stack(name)
self._remove_vg(self.vg, tear_down=True, ignore_removed=False)
self._check_torn_down_stack(name)
@udiskstestcase.tag_test(udiskstestcase.TestTags.UNSAFE)
def test_teardown_active_vg_locked(self):
''' Test tear-down by removing the base VG (not deactivated, locked) '''
name = 'udisks_test_teardown_active_vg_locked'
self._init_stack(name)
self.lv_block.Lock(self.no_options, dbus_interface=self.iface_prefix + '.Encrypted')
self._remove_vg(self.vg, tear_down=True, ignore_removed=False)
self._check_torn_down_stack(name)
@udiskstestcase.tag_test(udiskstestcase.TestTags.UNSAFE)
def test_teardown_inactive_vg_locked(self):
''' Test tear-down by removing the base VG (deactivated, locked) '''
name = 'udisks_test_teardown_inactive_locked'
self._init_stack(name)
self.lv_block.Lock(self.no_options, dbus_interface=self.iface_prefix + '.Encrypted')
self.lv.Deactivate(self.no_options, dbus_interface=self.iface_prefix + '.LogicalVolume')
self._remove_vg(self.vg, tear_down=True, ignore_removed=False)
self._check_torn_down_stack(name)
@udiskstestcase.tag_test(udiskstestcase.TestTags.UNSAFE)
def test_reformat_inactive_vg_locked(self):
''' Test tear-down by re-formatting the base PV (VG deactivated, locked) '''
name = 'test_reformat_inactive_vg_locked'
self._init_stack(name)
self.lv_block.Lock(self.no_options, dbus_interface=self.iface_prefix + '.Encrypted')
self.lv.Deactivate(self.no_options, dbus_interface=self.iface_prefix + '.LogicalVolume')
# now reformat the PV with tear-down flag
options = dbus.Dictionary(signature='sv')
options['label'] = 'AFTER_TEARDOWN'
options['tear-down'] = dbus.Boolean(True)
self.pv.Format('ext4', options, dbus_interface=self.iface_prefix + '.Block')
self.addCleanup(self.wipe_fs, self.vdevs[0])
# TODO: implement proper teardown across combined LVM + LUKS stack
# https://github.com/storaged-project/udisks/issues/781
# check that fstab and crypttab records have been removed
# TODO: these checks are the opposite - record shouldn't be present, once this is fixed
# self._check_torn_down_stack(name)
crypttab = self.read_file('/etc/crypttab')
self.assertIn(name, crypttab)
self.assertIn(self.luks_uuid, crypttab)
fstab = self.read_file('/etc/fstab')
self.assertIn(name, fstab)
self.assertIn(self.fs_uuid, fstab)
| storaged-project/udisks | src/tests/dbus-tests/test_20_LVM.py | test_20_LVM.py | py | 42,717 | python | en | code | 302 | github-code | 36 | [
{
"api_name": "udiskstestcase.UdisksTestCase",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "udiskstestcase.UdisksTestCase.setUpClass",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "udiskstestcase.UdisksTestCase",
"line_number": 19,
"usage_typ... |
5559929525 | import requests
import json
import time
from pygame import mixer
#Enter your url here and enjoy your free offer!!
url = 'https://www.cricbuzz.com/match-api/21859/commentary.json'
f = 0
c = time.time()
while True:
try:
current_scorecard = requests.get(url).json()['score']['prev_overs']
except:
mixer.init()
mixer.music.load('error.mp3')
mixer.music.play()
break
for score in current_scorecard[-10:-1]:
if score == '6':
mixer.init()
mixer.music.load('six.mp3')
mixer.music.play()
f = 1
break
if f == 1:
break
time.sleep(5*60)
| smit2k14/SWIGGY6 | SWIGGY6.py | SWIGGY6.py | py | 561 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "time.time",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pygame.mixer.init",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pygame.mixer",
"line_numb... |
42926700856 | from django.shortcuts import render
from patient.models import Patient
from .models import Need, Appointment
from .forms import AddNeedForm
def add_need(request, patient_number):
form = AddNeedForm(request.POST or None)
current_patient = Patient.objects.get(id=patient_number)
if form.is_valid():
need = Need(need_string=form.cleaned_data['need_string'],
date=form.cleaned_data['date'],
start=form.cleaned_data['start'],
end=form.cleaned_data['end'],
duration_heal=form.cleaned_data['duration_heal'],
patient=current_patient)
need.save()
success = True
return render(request, 'need/add_need.html', locals())
def appointment_detail(request, id_appointment):
try:
appointment = Appointment.objects.get(id=id_appointment)
patient = appointment.need.patient
except Appointment.DoesNotExist:
exception_raised = True
return render(request, 'event/appointment_detail.html', locals())
| guillaume-guerdoux/tournee_infirmiers | tournee_infirmiers/event/views.py | views.py | py | 1,057 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "forms.AddNeedForm",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "patient.models.Patient.objects.get",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "patient.models.Patient.objects",
"line_number": 9,
"usage_type": "attribute"
},
{
... |
6689998525 | #!/usr/bin/env python3
import argparse
import os
import unittest
import testtools
import sys
PROJECT_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), os.path.pardir)
sys.path.append(PROJECT_DIR)
parser = argparse.ArgumentParser(description="Run tests.")
parser.add_argument("--deployment", choices=["aws", "azure", "local"], nargs="+")
args = parser.parse_args()
if not args.deployment:
args.deployment = []
# https://stackoverflow.com/questions/22484805/a-simple-working-example-for-testtools-concurrentstreamtestsuite
class TracingStreamResult(testtools.StreamResult):
all_correct: bool
output = {}
def __init__(self):
self.all_correct = True
def status(self, *args, **kwargs):
self.all_correct = self.all_correct and (kwargs["test_status"] in ["inprogress", "success"])
if not kwargs["test_status"]:
test_id = kwargs["test_id"]
if test_id not in self.output:
self.output[test_id] = b""
self.output[test_id] += kwargs["file_bytes"]
elif kwargs["test_status"] == "fail":
print('{0[test_id]}: {0[test_status]}'.format(kwargs))
print('{0[test_id]}: {1}'.format(kwargs, self.output[kwargs["test_id"]].decode()))
elif kwargs["test_status"] == "success":
print('{0[test_id]}: {0[test_status]}'.format(kwargs))
cases = []
if "aws" in args.deployment:
from aws import suite
for case in suite.suite():
cases.append(case)
tests = []
for case in cases:
for c in case:
tests.append(c)
for test in tests:
test.setUpClass()
concurrent_suite = testtools.ConcurrentStreamTestSuite(lambda: ((test, None) for test in tests))
result = TracingStreamResult()
result.startTestRun()
concurrent_suite.run(result)
result.stopTestRun()
for test in tests:
test.tearDownClass()
sys.exit(not result.all_correct)
| spcl/serverless-benchmarks | tests/test_runner.py | test_runner.py | py | 1,892 | python | en | code | 97 | github-code | 36 | [
{
"api_name": "os.path.join",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path.realpath",
"line_n... |
15827619242 | from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import *
import logging
import pymongo
import pickle
import numpy as np
import bson
import geojson as gj
# This change should be removed in the next server update, by which time hopefully the new geojson version will incorporate the long-term fix for their default precision
# See - jazzband/geojson#177
# See = https://github.com/e-mission/e-mission-server/pull/900/commits/d2ada640f260aad8cbcfecb81345f4087c810baa
gj.geometry.Geometry.__init__.__defaults__ = (None, False, 15)
import emission.core.wrapper.common_trip as ecwct
import emission.core.get_database as edb
import emission.storage.decorations.trip_queries as esdtq
# constants
DAYS_IN_WEEK = 7
HOURS_IN_DAY = 24
#################################################################################
############################ database functions #################################
#################################################################################
def save_common_trip(common_trip):
db = edb.get_common_trip_db()
probs = _np_array_to_json_format(common_trip.probabilites)
db.insert({
"user_id" : common_trip.user_id,
"start_place" : common_trip.start_place,
"end_place" : common_trip.end_place,
"start_loc" : common_trip.start_loc,
"end_loc" : common_trip.end_loc,
"trips" : common_trip["trips"],
"probabilites" : probs,
"start_times": common_trip["start_times"],
"durations": common_trip["durations"]
})
def get_common_trip_from_db(user_id, start_place_id, end_place_id):
db = edb.get_common_trip_db()
json_obj = db.find_one({"user_id" : user_id,
"start_place" : start_place_id,
"end_place" : end_place_id})
return make_common_trip_from_json(json_obj)
def get_all_common_trips_for_user(user_id):
db = edb.get_common_trip_db()
return db.find({"user_id" : user_id})
def make_common_trip_from_json(json_obj):
probs = _json_to_np_array(json_obj["probabilites"])
props = {
"user_id" : json_obj["user_id"],
"start_place" : json_obj["start_place"],
"end_place" : json_obj["end_place"],
"start_loc" : json_obj["start_loc"],
"end_loc" : json_obj["end_loc"],
"trips" : json_obj["trips"],
"probabilites" : probs
}
return ecwct.CommonTrip(props)
def _np_array_to_json_format(array):
return array.tolist()
def _json_to_np_array(mongo_thing):
return np.array(mongo_thing)
def make_new_common_trip(props=None):
if props:
return ecwct.CommonTrip(props)
return ecwct.CommonTrip()
def clear_existing_trips(user_id):
db = edb.get_common_trip_db()
db.remove({'user_id': user_id})
##############################################################################
def get_weight(common_trip):
return len(common_trip["trips"])
def add_real_trip_id(trip, _id):
trip.trips.append(_id)
def get_start_hour(section_info):
return section_info.data.start_local_dt.hour
def get_day(section_info):
return section_info.data.start_local_dt.weekday
def get_start_time(section_info):
return section_info.data.start_local_dt
def increment_probability(trip, day, hour):
trip.probabilites[day, hour] += 1
def set_up_trips(list_of_cluster_data, user_id):
# Import in here to avoid recursive imports
# TODO: This should really be moved to a separate class that creates the
# entire graph at one time
import emission.storage.decorations.common_place_queries as esdcpq
clear_existing_trips(user_id)
for dct in list_of_cluster_data:
start_times = []
durations = []
start_loc = gj.Point(dct['start_coords'])
end_loc = gj.Point(dct['end_coords'])
start_place_id = esdcpq.get_common_place_at_location(start_loc).get_id()
end_place_id = esdcpq.get_common_place_at_location(end_loc).get_id()
#print 'dct["sections"].trip_id %s is' % dct["sections"][0]
probabilites = np.zeros((DAYS_IN_WEEK, HOURS_IN_DAY))
for sec in dct["sections"]:
probabilites[get_day(sec), get_start_hour(sec)] += 1
start_times.append(get_start_time(sec))
durations.append(sec.data.duration)
trip = make_new_common_trip()
trip.user_id = user_id
trip.start_place = start_place_id
trip.end_place = end_place_id
trip.start_loc = start_loc
trip.end_loc = end_loc
trip.probabilites = probabilites
trip.trips = [unc_trip.get_id() for unc_trip in dct["sections"]]
trip.start_times = start_times
trip.durations = durations
save_common_trip(trip)
| e-mission/e-mission-server | emission/storage/decorations/common_trip_queries.py | common_trip_queries.py | py | 4,875 | python | en | code | 22 | github-code | 36 | [
{
"api_name": "future.standard_library.install_aliases",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "future.standard_library",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "geojson.geometry",
"line_number": 17,
"usage_type": "attribute"
},
{
... |
16128993395 | import time
import asyncio
import tornado.web
import tornado.ioloop
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.write("Hello, world\n")
class NonBlocking(tornado.web.RequestHandler):
async def get(self):
await asyncio.sleep(10)
class Blocking(tornado.web.RequestHandler):
def get(self):
time.sleep(10)
def make_app():
return tornado.web.Application([
(r"/", MainHandler),
(r"/non_blocking", NonBlocking),
(r"/blocking", Blocking),
])
if __name__ == "__main__":
app = make_app()
app.listen(8888)
tornado.ioloop.IOLoop.current().start()
| czasg/ScrapyLearning | czaSpider/dump/tornado学习/test.py | test.py | py | 647 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "tornado.web.web",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "tornado.web",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "tornado.web.web",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "tornado.web",
... |
11316618459 | import pandas as pd
import datetime
import json
def date_to_week(d):
split_date = d.split('/')
return int(datetime.date(int(split_date[2]), int(split_date[0]), int(split_date[1])).strftime('%U'))
# Historical deaths data (and 2020, which will be overwritten where possible)
deaths = pd.read_csv('deaths_2020-06-24.csv')
deaths = deaths.drop(columns=['Type', 'Week Ending Date', 'Time Period', 'Suppress','Note','State Abbreviation'])
deaths = deaths.rename(columns={'Age Group': 'Slice'})
deaths = deaths.rename(columns={'Number of Deaths': 'Deaths'})
#deaths = deaths.set_index(['Jurisdiction', 'Year', 'Slice', 'Week'])
all_deaths = deaths.set_index(['Jurisdiction', 'Year', 'Slice', 'Week']).sum(level=[0, 1, 3]).reset_index()
all_deaths['Slice'] = 'All Deaths'
mean_deaths = all_deaths[all_deaths['Year'] != 2020].set_index(['Jurisdiction', 'Year', 'Slice', 'Week']).mean(level=[0, 2, 3]).reset_index()
mean_deaths['Year'] = '2015-2019 Mean'
max_deaths = all_deaths[all_deaths['Year'] != 2020].set_index(['Jurisdiction', 'Year', 'Slice', 'Week']).max(level=[0, 2, 3]).reset_index()
max_deaths['Year'] = '2015-2019 Max'
# 2020 total deaths, and COVID deaths (filtering out other "select deaths numbers")
covid_deaths = pd.read_csv('covid19_deaths_2020-06-25.csv')
covid_deaths = covid_deaths.drop(columns=['Influenza Deaths', 'Pneumonia and COVID-19 Deaths', 'Pneumonia Deaths'])
covid_deaths = covid_deaths.drop(columns=['Percent of Expected Deaths', 'Data as of', 'Start week', 'Group'])
covid_deaths = covid_deaths.drop(columns=['Pneumonia, Influenza, or COVID-19 Deaths', 'Indicator', 'Footnote'])
covid_deaths = covid_deaths.rename(columns={'State': 'Jurisdiction'})
covid_deaths = covid_deaths.rename(columns={'End Week': 'Week'})
covid_deaths = covid_deaths.rename(columns={'Total Deaths': 'All Deaths'})
covid_deaths['Week'] = covid_deaths['Week'].map(date_to_week)
covid_deaths['Year'] = 2020
covid_deaths = covid_deaths.melt(var_name='Slice', value_vars=['COVID-19 Deaths', 'All Deaths'], value_name='Deaths', id_vars=['Jurisdiction', 'Year', 'Week'])
#covid_deaths = covid_deaths.set_index(['Jurisdiction', 'Year', 'Slice', 'Week'])
covid_mean_implied = pd.concat([covid_deaths[covid_deaths['Slice'] == 'COVID-19 Deaths'], mean_deaths])\
.set_index(['Jurisdiction', 'Year', 'Slice', 'Week']).sum(level=[0, 3]).reset_index()
covid_mean_implied['Year'] = '2015-2019 Mean + COVID-19 Reported'
covid_mean_implied['Slice'] = 'All Deaths'
covid_max_implied = pd.concat([covid_deaths[covid_deaths['Slice'] == 'COVID-19 Deaths'], max_deaths])\
.set_index(['Jurisdiction', 'Year', 'Slice', 'Week']).sum(level=[0, 3]).reset_index()
covid_max_implied['Year'] = '2015-2019 Max + COVID-19 Reported'
covid_max_implied['Slice'] = 'All Deaths'
deaths_all = pd.concat([deaths, all_deaths, mean_deaths, max_deaths, covid_mean_implied, covid_max_implied, covid_deaths])
deaths_all = deaths_all.drop_duplicates(subset=['Jurisdiction', 'Year', 'Slice', 'Week'], keep='last') # Take more recent covid deaths total data over older historical dataset (which includes some 2020)
#deaths_all = deaths_all.set_index(['Jurisdiction', 'Year', 'Slice', 'Week'])
deaths_all = deaths_all.pivot_table(index='Week', columns=['Jurisdiction', 'Year', 'Slice'], values='Deaths')
# Cannot find a good pandas way to do this
deaths_dict = {}
for index in deaths_all:
# deaths_dict[index] = deaths_all[index]
traverse = deaths_dict
for i in range(len(index) - 1):
if index[i] not in traverse:
traverse[index[i]] = {}
traverse = traverse[index[i]]
traverse[index[-1]] = deaths_all[index].dropna().to_dict()
#print(deaths)
with open('src/deaths.json', 'w') as fp:
json.dump(deaths_dict, fp, allow_nan=False)
| AdamHickerson/covid-19-deaths | corona2.py | corona2.py | py | 3,769 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "datetime.date",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"lin... |
17454665839 | import getpass
import os
import subprocess
from paramiko.client import SSHClient, AutoAddPolicy
from paramiko.config import SSHConfig
from .transfer import Transfer
class Connection(object):
host = None
original_host = None
user = None
port = None
ssh_config = None
connect_timeout = None
connect_kwargs = None
client = None
transport = None
_sftp = None
def __init__(
self,
host,
user=None,
port=None,
connect_timeout=None,
connect_kwargs=None,
):
shorthand = self.derive_shorthand(host)
host = shorthand['host']
err = (
'You supplied the {} via both shorthand and kwarg! Please pick one.' # noqa
)
if shorthand['user'] is not None:
if user is not None:
raise ValueError(err.format('user'))
user = shorthand['user']
if shorthand['port'] is not None:
if port is not None:
raise ValueError(err.format('port'))
port = shorthand['port']
self.ssh_config = self.load_ssh_config(host)
self.original_host = host
self.host = host
if 'hostname' in self.ssh_config:
self.host = self.ssh_config['hostname']
self.user = user or self.ssh_config.get('user', getpass.getuser())
self.port = port or int(self.ssh_config.get('port', '22'))
if connect_timeout is None:
connect_timeout = self.ssh_config.get('connecttimeout')
if connect_timeout is not None:
connect_timeout = int(connect_timeout)
self.connect_timeout = connect_timeout
if connect_kwargs is None:
connect_kwargs = {}
if 'identityfile' in self.ssh_config:
connect_kwargs.setdefault('key_filename', [])
connect_kwargs['key_filename'].extend(
self.ssh_config['identityfile']
)
self.connect_kwargs = connect_kwargs
client = SSHClient()
client.set_missing_host_key_policy(AutoAddPolicy())
self.client = client
self.transport = None
def derive_shorthand(self, host_string):
user_hostport = host_string.rsplit('@', 1)
hostport = user_hostport.pop()
user = user_hostport[0] if user_hostport and user_hostport[0] else None
# IPv6: can't reliably tell where addr ends and port begins, so don't
# try (and don't bother adding special syntax either, user should avoid
# this situation by using port=).
if hostport.count(':') > 1:
host = hostport
port = None
# IPv4: can split on ':' reliably.
else:
host_port = hostport.rsplit(':', 1)
host = host_port.pop(0) or None
port = host_port[0] if host_port and host_port[0] else None
if port is not None:
port = int(port)
return {'user': user, 'host': host, 'port': port}
def load_ssh_config(self, host):
ssh_config = SSHConfig()
for path in (
os.path.expanduser('~/.ssh/config'),
'/etc/ssh/ssh_config'
):
if os.path.isfile(path):
with open(path) as fd:
ssh_config.parse(fd)
return ssh_config.lookup(host)
@property
def is_connected(self):
return self.transport.active if self.transport else False
def open(self):
if self.is_connected:
return
err = (
"Refusing to be ambiguous: connect() kwarg '{}' was given both via regular arg and via connect_kwargs!" # noqa
)
for key in 'hostname port username'.split():
if key in self.connect_kwargs:
raise ValueError(err.format(key))
if (
'timeout' in self.connect_kwargs
and self.connect_timeout is not None
):
raise ValueError(err.format('timeout'))
# No conflicts -> merge 'em together
kwargs = dict(
self.connect_kwargs,
username=self.user,
hostname=self.host,
port=self.port,
)
if self.connect_timeout:
kwargs['timeout'] = self.connect_timeout
if 'key_filename' in kwargs and not kwargs['key_filename']:
del kwargs['key_filename']
self.client.connect(**kwargs)
self.transport = self.client.get_transport()
def close(self):
if self.is_connected:
self.client.close()
def __enter__(self):
return self
def __exit__(self, *exc):
self.close()
def run(self, command, **kwargs):
self.open()
return self.client.exec_command(command, **kwargs)
def sudo(self, command, **kwargs):
self.open()
return self.client.exec_command('sudo -S -p ' + command, **kwargs)
def local(self, command, **kwargs):
return subprocess.call(command, shell=True, **kwargs)
def sftp(self):
self.open()
if self._sftp is None:
self._sftp = self.client.open_sftp()
return self._sftp
def get(self, *args, **kwargs):
return Transfer(self).get(*args, **kwargs)
def put(self, *args, **kwargs):
return Transfer(self).put(*args, **kwargs)
| Yeolar/bunder | bunder/connection.py | connection.py | py | 5,335 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "getpass.getuser",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "paramiko.client.SSHClient",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "paramiko.client.AutoAddPolicy",
"line_number": 72,
"usage_type": "call"
},
{
"api_name"... |
30180533882 | #!/bin/env python3
import sys
import re
from functools import cache
def readfile(filename):
with open(filename) as f:
inp = f.read()
return inp
def step1(inp):
return recurse(inp, findwire(inp, 'a'))
def step2(inp, override):
inp = re.sub('.*-> b\n', f'{override} -> b\n', inp)
return step1(inp)
def findwire(inp, wire):
return re.search(f'(.*-> {wire})\n', inp).group(1).split(' -> ')[0]
@cache
def recurse(inp, instr):
expr = ''
for i in instr.split(' '):
if str(i).isalpha():
i = recurse(inp, findwire(inp, i))
expr += str(i)
return eval(expr)
def main():
filename = "input.txt"
if len(sys.argv) > 1:
filename = sys.argv[1]
inp = readfile(filename)
inp = inp.replace("AND", "&").replace("OR", "|").replace("LSHIFT", "<<").replace("RSHIFT", ">>").replace("NOT", "65535 -")
step1_out = step1(inp)
step2_out = step2(inp, step1_out)
print(f'Step 1: {step1_out}')
print(f'Step 2: {step2_out}')
if __name__ == '__main__':
main()
##!/bin/env python3
#
#import re
#
#with open('input.txt') as f:
# lines = f.read().splitlines()
#
#step1 = 0
#step2 = 0
#
#signals = {}
#
#for l in lines:
# inp, out = l.split(" -> ")
#
# inp = re.sub(r"([a-z]+)", r"signals['\1']", inp)
#
# inp = inp.replace("AND", "&").replace("OR", "|").replace("LSHIFT", "<<").replace("RSHIFT", ">>").replace("NOT", "65535 -")
# print(F"{out} = {inp}")
# signals[out] = eval(inp)
#
#print(signals['a'])
#
#
#
#
#print(F"Step 1: {step1}")
#print(F"Step 2: {step2}")
| reyemxela/adventofcode | 2015/07/main.py | main.py | py | 1,530 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "re.sub",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "functools.cache",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "sys.argv",
"line_number": 40,
... |
13050560084 | from __future__ import unicode_literals
"""Benchmark for SQLAlchemy.
An adaptation of Robert Brewers' ZooMark speed tests. """
import datetime
from sqlalchemy import Table, Column, Integer, Unicode, Date, \
DateTime, Time, Float, Sequence, ForeignKey, \
select, join, and_, outerjoin, func
from sqlalchemy.testing import replay_fixture
ITERATIONS = 1
class ZooMarkTest(replay_fixture.ReplayFixtureTest):
"""Runs the ZooMark and squawks if method counts vary from the norm."""
__requires__ = 'cpython',
__only_on__ = 'postgresql+psycopg2'
def _run_steps(self, ctx):
with ctx():
self._baseline_1a_populate()
with ctx():
self._baseline_2_insert()
with ctx():
self._baseline_3_properties()
with ctx():
self._baseline_4_expressions()
with ctx():
self._baseline_5_aggregates()
with ctx():
self._baseline_6_editing()
with ctx():
self._baseline_7_multiview()
def setup_engine(self):
self._baseline_1_create_tables()
def teardown_engine(self):
self._baseline_8_drop()
def _baseline_1_create_tables(self):
Table(
'Zoo',
self.metadata,
Column('ID', Integer, Sequence('zoo_id_seq'),
primary_key=True, index=True),
Column('Name', Unicode(255)),
Column('Founded', Date),
Column('Opens', Time),
Column('LastEscape', DateTime),
Column('Admission', Float),
)
Table(
'Animal',
self.metadata,
Column('ID', Integer, Sequence('animal_id_seq'),
primary_key=True),
Column('ZooID', Integer, ForeignKey('Zoo.ID'), index=True),
Column('Name', Unicode(100)),
Column('Species', Unicode(100)),
Column('Legs', Integer, default=4),
Column('LastEscape', DateTime),
Column('Lifespan', Float(4)),
Column('MotherID', Integer, ForeignKey('Animal.ID')),
Column('PreferredFoodID', Integer),
Column('AlternateFoodID', Integer),
)
self.metadata.create_all()
def _baseline_1a_populate(self):
Zoo = self.metadata.tables['Zoo']
Animal = self.metadata.tables['Animal']
engine = self.metadata.bind
wap = engine.execute(Zoo.insert(), Name='Wild Animal Park',
Founded=datetime.date(2000, 1, 1),
Opens=datetime.time(8, 15, 59),
LastEscape=datetime.datetime(
2004, 7, 29, 5, 6, 7),
Admission=4.95).inserted_primary_key[0]
sdz = engine.execute(Zoo.insert(), Name='San Diego Zoo',
Founded=datetime.date(1935, 9, 13),
Opens=datetime.time(9, 0, 0),
Admission=0).inserted_primary_key[0]
engine.execute(Zoo.insert(inline=True), Name='Montr\xe9al Biod\xf4me',
Founded=datetime.date(1992, 6, 19),
Opens=datetime.time(9, 0, 0), Admission=11.75)
seaworld = engine.execute(Zoo.insert(), Name='Sea_World',
Admission=60).inserted_primary_key[0]
# Let's add a crazy futuristic Zoo to test large date values.
engine.execute(
Zoo.insert(), Name='Luna Park',
Founded=datetime.date(2072, 7, 17),
Opens=datetime.time(0, 0, 0),
Admission=134.95).inserted_primary_key[0]
# Animals
leopardid = engine.execute(Animal.insert(), Species='Leopard',
Lifespan=73.5).inserted_primary_key[0]
engine.execute(Animal.update(Animal.c.ID == leopardid), ZooID=wap,
LastEscape=datetime.datetime(
2004, 12, 21, 8, 15, 0, 999907,)
)
engine.execute(
Animal.insert(),
Species='Lion', ZooID=wap).inserted_primary_key[0]
engine.execute(Animal.insert(), Species='Slug', Legs=1, Lifespan=.75)
engine.execute(Animal.insert(), Species='Tiger',
ZooID=sdz).inserted_primary_key[0]
# Override Legs.default with itself just to make sure it works.
engine.execute(Animal.insert(inline=True), Species='Bear', Legs=4)
engine.execute(Animal.insert(inline=True), Species='Ostrich', Legs=2,
Lifespan=103.2)
engine.execute(Animal.insert(inline=True), Species='Centipede',
Legs=100)
engine.execute(Animal.insert(), Species='Emperor Penguin',
Legs=2, ZooID=seaworld).inserted_primary_key[0]
engine.execute(Animal.insert(), Species='Adelie Penguin',
Legs=2, ZooID=seaworld).inserted_primary_key[0]
engine.execute(Animal.insert(inline=True), Species='Millipede',
Legs=1000000, ZooID=sdz)
# Add a mother and child to test relationships
bai_yun = engine.execute(
Animal.insert(),
Species='Ape',
Name='Bai Yun',
Legs=2).inserted_primary_key[0]
engine.execute(Animal.insert(inline=True), Species='Ape',
Name='Hua Mei', Legs=2, MotherID=bai_yun)
def _baseline_2_insert(self):
Animal = self.metadata.tables['Animal']
i = Animal.insert(inline=True)
for x in range(ITERATIONS):
i.execute(Species='Tick', Name='Tick %d' % x, Legs=8)
def _baseline_3_properties(self):
Zoo = self.metadata.tables['Zoo']
Animal = self.metadata.tables['Animal']
engine = self.metadata.bind
def fullobject(select):
"""Iterate over the full result row."""
return list(engine.execute(select).first())
for x in range(ITERATIONS):
# Zoos
fullobject(Zoo.select(Zoo.c.Name == 'Wild Animal Park'))
fullobject(Zoo.select(Zoo.c.Founded ==
datetime.date(1935, 9, 13)))
fullobject(Zoo.select(Zoo.c.Name ==
'Montr\xe9al Biod\xf4me'))
fullobject(Zoo.select(Zoo.c.Admission == float(60)))
# Animals
fullobject(Animal.select(Animal.c.Species == 'Leopard'))
fullobject(Animal.select(Animal.c.Species == 'Ostrich'))
fullobject(Animal.select(Animal.c.Legs == 1000000))
fullobject(Animal.select(Animal.c.Species == 'Tick'))
def _baseline_4_expressions(self):
Zoo = self.metadata.tables['Zoo']
Animal = self.metadata.tables['Animal']
engine = self.metadata.bind
def fulltable(select):
"""Iterate over the full result table."""
return [list(row) for row in engine.execute(select).fetchall()]
for x in range(ITERATIONS):
assert len(fulltable(Zoo.select())) == 5
assert len(fulltable(Animal.select())) == ITERATIONS + 12
assert len(fulltable(Animal.select(Animal.c.Legs == 4))) \
== 4
assert len(fulltable(Animal.select(Animal.c.Legs == 2))) \
== 5
assert len(
fulltable(
Animal.select(
and_(
Animal.c.Legs >= 2,
Animal.c.Legs < 20)))) == ITERATIONS + 9
assert len(fulltable(Animal.select(Animal.c.Legs > 10))) \
== 2
assert len(fulltable(Animal.select(Animal.c.Lifespan
> 70))) == 2
assert len(fulltable(Animal.select(Animal.c.Species.
startswith('L')))) == 2
assert len(fulltable(Animal.select(Animal.c.Species.
endswith('pede')))) == 2
assert len(fulltable(Animal.select(Animal.c.LastEscape
!= None))) == 1
assert len(
fulltable(
Animal.select(
None == Animal.c.LastEscape))) == ITERATIONS + 11
# In operator (containedby)
assert len(fulltable(Animal.select(Animal.c.Species.like('%pede%'
)))) == 2
assert len(
fulltable(
Animal.select(
Animal.c.Species.in_(
['Lion', 'Tiger', 'Bear'])))) == 3
# Try In with cell references
class thing(object):
pass
pet, pet2 = thing(), thing()
pet.Name, pet2.Name = 'Slug', 'Ostrich'
assert len(
fulltable(
Animal.select(
Animal.c.Species.in_([pet.Name, pet2.Name])))) == 2
# logic and other functions
assert len(fulltable(Animal.select(Animal.c.Species.like('Slug'
)))) == 1
assert len(fulltable(Animal.select(Animal.c.Species.like('%pede%'
)))) == 2
name = 'Lion'
assert len(
fulltable(
Animal.select(
func.length(
Animal.c.Species) == len(name)))) == ITERATIONS + 3
assert len(
fulltable(
Animal.select(
Animal.c.Species.like('%i%')))) == ITERATIONS + 7
# Test now(), today(), year(), month(), day()
assert len(
fulltable(
Zoo.select(
and_(
Zoo.c.Founded != None,
Zoo.c.Founded < func.current_timestamp(
_type=Date))))) == 3
assert len(
fulltable(
Animal.select(
Animal.c.LastEscape == func.current_timestamp(
_type=Date)))) == 0
assert len(
fulltable(
Animal.select(
func.date_part(
'year',
Animal.c.LastEscape) == 2004))) == 1
assert len(
fulltable(
Animal.select(
func.date_part(
'month',
Animal.c.LastEscape) == 12))) == 1
assert len(
fulltable(
Animal.select(
func.date_part(
'day',
Animal.c.LastEscape) == 21))) == 1
def _baseline_5_aggregates(self):
Animal = self.metadata.tables['Animal']
Zoo = self.metadata.tables['Zoo']
engine = self.metadata.bind
for x in range(ITERATIONS):
# views
view = engine.execute(select([Animal.c.Legs])).fetchall()
legs = sorted([x[0] for x in view])
expected = {
'Leopard': 73.5,
'Slug': .75,
'Tiger': None,
'Lion': None,
'Bear': None,
'Ostrich': 103.2,
'Centipede': None,
'Emperor Penguin': None,
'Adelie Penguin': None,
'Millipede': None,
'Ape': None,
'Tick': None,
}
for species, lifespan in engine.execute(
select([Animal.c.Species, Animal.c.Lifespan])).fetchall():
assert lifespan == expected[species]
expected = ['Montr\xe9al Biod\xf4me', 'Wild Animal Park']
e = select([Zoo.c.Name],
and_(Zoo.c.Founded != None,
Zoo.c.Founded <= func.current_timestamp(),
Zoo.c.Founded >= datetime.date(1990,
1,
1)))
values = [val[0] for val in engine.execute(e).fetchall()]
assert set(values) == set(expected)
# distinct
legs = [
x[0]
for x in engine.execute(
select([Animal.c.Legs],
distinct=True)).fetchall()]
legs.sort()
def _baseline_6_editing(self):
Zoo = self.metadata.tables['Zoo']
engine = self.metadata.bind
for x in range(ITERATIONS):
# Edit
SDZ = engine.execute(Zoo.select(Zoo.c.Name == 'San Diego Zoo'
)).first()
engine.execute(
Zoo.update(
Zoo.c.ID == SDZ['ID']),
Name='The San Diego Zoo',
Founded=datetime.date(1900, 1, 1),
Opens=datetime.time(7, 30, 0), Admission='35.00')
# Test edits
SDZ = engine.execute(Zoo.select(Zoo.c.Name == 'The San Diego Zoo'
)).first()
assert SDZ['Founded'] == datetime.date(1900, 1, 1), \
SDZ['Founded']
# Change it back
engine.execute(Zoo.update(Zoo.c.ID == SDZ['ID'
]), Name='San Diego Zoo',
Founded=datetime.date(1935, 9, 13),
Opens=datetime.time(9, 0, 0),
Admission='0')
# Test re-edits
SDZ = engine.execute(Zoo.select(Zoo.c.Name == 'San Diego Zoo'
)).first()
assert SDZ['Founded'] == datetime.date(1935, 9, 13)
def _baseline_7_multiview(self):
Zoo = self.metadata.tables['Zoo']
Animal = self.metadata.tables['Animal']
engine = self.metadata.bind
def fulltable(select):
"""Iterate over the full result table."""
return [list(row) for row in engine.execute(select).fetchall()]
for x in range(ITERATIONS):
fulltable(
select(
[Zoo.c.ID] + list(Animal.c),
Zoo.c.Name == 'San Diego Zoo',
from_obj=[join(Zoo, Animal)]))
Zoo.select(Zoo.c.Name == 'San Diego Zoo')
fulltable(
select(
[Zoo.c.ID, Animal.c.ID],
and_(
Zoo.c.Name == 'San Diego Zoo',
Animal.c.Species == 'Leopard'
),
from_obj=[join(Zoo, Animal)])
)
# Now try the same query with INNER, LEFT, and RIGHT JOINs.
fulltable(select([
Zoo.c.Name, Animal.c.Species],
from_obj=[join(Zoo, Animal)]))
fulltable(select([
Zoo.c.Name, Animal.c.Species],
from_obj=[outerjoin(Zoo, Animal)]))
fulltable(select([
Zoo.c.Name, Animal.c.Species],
from_obj=[outerjoin(Animal, Zoo)]))
def _baseline_8_drop(self):
self.metadata.drop_all()
| lameiro/cx_oracle_on_ctypes | test/integration/3rdparty/SQLAlchemy-1.0.8/test/aaa_profiling/test_zoomark.py | test_zoomark.py | py | 15,649 | python | en | code | 20 | github-code | 36 | [
{
"api_name": "sqlalchemy.testing.replay_fixture.ReplayFixtureTest",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "sqlalchemy.testing.replay_fixture",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Table",
"line_number": 47,
"usage_t... |
14696985443 | from datasets import load_dataset
import matplotlib.pyplot as plt
rated_KoRAE = load_dataset("Cartinoe5930/KoRAE_rated", split="train")
score_result = {}
x_label = []
for i in range(0, 21):
score_result[i * 0.5] = 0
x_label.append(i * 0.5)
for data in rated_KoRAE:
score_result[float(data["score"])] += 1
x = list(score_result.keys())
y = list(score_result.values())
plt.figure(figsize=(10, 6))
plt.bar(x, y, width=0.4, align='center', alpha=0.7)
plt.xlabel('Scores')
plt.ylabel('Frequency')
plt.title('Score Distribution Plot')
plt.xticks(x, rotation=45)
plt.grid(axis='y', linestyle='--', alpha=0.6)
plt.show() | gauss5930/KoRAE | rating/score_plot.py | score_plot.py | py | 630 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "datasets.load_dataset",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "matpl... |
44008094218 | # Basic idea:
# Physicsy sandy sand (pixel-perfect collisions, run simulation to keep up with real time)
# Character moving around
# Tap direction (WASD) to choose stuff to pick up, tap next to throw
# Hold direction to choose stuff to morph, press other keys to morph it
import pygame
from pygame.locals import *
import numpy as np
import time
pygame.init()
coltonum = lambda r, g, b: (((r << 8) + g) << 8) + b
w, h = 500, 250
screensize = np.int32((w, h))
screen = pygame.Surface(screensize)
window = pygame.display.set_mode(screensize * 2)
screengrid = pygame.surfarray.pixels3d(screen)
windowgrid = pygame.surfarray.pixels3d(window)
keys = set()
nx = 10
pos = np.int32([[*np.tile(np.arange(0, nx, 1, "int32"), 50), *np.tile(np.arange(300 - nx, 300, 1, "int32"), 50)],
[*np.repeat(np.arange(0, 50, 1, "int32"), nx), *np.repeat(np.arange(0, 50, 1, "int32"), nx)]]).T
mass = np.ones(pos.shape[0], "int32")
vel = np.zeros(pos.shape, "int32")
vel[:pos.shape[0] // 2] = 20
mass[pos.shape[0] // 2:] = 10
force = np.zeros(pos.shape, "int32")
while True:
screen.fill(0)
screengrid[pos[:, 0], pos[:, 1], 0] = 255 - ((mass - 1) * 10)
# try:
# screengrid[collpos[:, 0], collpos[:, 1], 1] = 255
# except NameError:
# pass
for i in range(2):
for j in range(2):
windowgrid[i::2, j::2] = screengrid
pygame.display.flip()
mousepos = np.int32(pygame.mouse.get_pos())
for e in pygame.event.get():
if e.type == QUIT:
quit()
elif e.type == KEYDOWN:
keys.add(e.key)
if e.key == K_ESCAPE:
quit()
elif e.type == KEYUP:
keys.discard(e.key)
# time.sleep(mousepos[0] / (10 * w))
# In order to physics in discrete space, subdivide each tick according to lcm of integer speeds of objects
# Each object moves only in steps of 1 in x or y
# At the beginning of the appropriate subdivision of a tick, an object will move 1
# If, say, there are objects a and b moving at x speeds 2 and 3 (units per tick), the tick will be cut into 6:
# 0: a b (this is the initial state)
# 1: a b
# 2: a b
# 3: a b
# 4: a b
# 5: a b
# 6: a b (this is the state after the tick has passed)
# At each stage, collisions are checked and speeds altered, etc
# Which will... probably get complicated fast
# Each object will have to keep track of the subtick offset for its movement speed (last collision)
# The velocity composition (and hence lcm) may change with collisions
# Packed, resting objects will collide... a lot, which could be slow
# Maybe keep track of columns/rows locked against the sides?
uniquev = np.unique(np.abs(vel))
tickdiv = np.max(uniquev)
# print(uniquev, tickdiv)
# https://stackoverflow.com/a/42472824
for v in uniquev[(tickdiv % uniquev) != 0]:
# print(v, tickdiv, np.gcd(tickdiv, v))
tickdiv = int(tickdiv * v / np.gcd(tickdiv, v))
# print("F", tickdiv)
i = 1
while i < tickdiv:
movers = ((tickdiv / i) % np.abs(vel)) == 0
# print(i, tickdiv, vel[movers])
pos[movers] += np.sign(vel[movers])
vel[((pos == 0) & (vel < 0)) | ((pos == (w, h)) & (vel > 0))] *= -1
pos = np.minimum(np.maximum(0, pos), (w - 1, h - 1))
# https://stackoverflow.com/q/11528078
a = np.lexsort([*pos.T])
coll = np.all(pos[a[:-1]] == pos[a[1:]], axis=1)
# print(sum(coll))
if np.any(coll):
collpos = pos[a[:-1][coll]]
# print(pos[a[0]], pos[a[-1]])
# Assume that there are only 2 parties to each collision, and that all masses are equal...
oppcoll = (vel[a[:-1][coll]] * vel[a[1:][coll]]) < 0
# print(sum(oppcoll))
newvel = vel.copy()
# print(vel.shape, a.shape, oppcoll.shape)
newvel[a[:-1][coll][oppcoll[:, 0]], 0] = vel[a[1:][coll][oppcoll[:, 0]], 0]
newvel[a[:-1][coll][oppcoll[:, 1]], 1] = vel[a[1:][coll][oppcoll[:, 1]], 1]
newvel[a[1:][coll][oppcoll[:, 0]], 0] = vel[a[:-1][coll][oppcoll[:, 0]], 0]
newvel[a[1:][coll][oppcoll[:, 1]], 1] = vel[a[:-1][coll][oppcoll[:, 1]], 1]
newvel[a[:-1][coll][~oppcoll[:, 0]], 0] = vel[a[1:][coll][~oppcoll[:, 0]], 0]
newvel[a[:-1][coll][~oppcoll[:, 1]], 1] = vel[a[1:][coll][~oppcoll[:, 1]], 1]
newvel[a[1:][coll][~oppcoll[:, 0]], 0] = vel[a[:-1][coll][~oppcoll[:, 0]], 0]
newvel[a[1:][coll][~oppcoll[:, 1]], 1] = vel[a[:-1][coll][~oppcoll[:, 1]], 1]
# print(time.time(), np.all(newvel == vel), newvel is vel)
vel[:, :] = newvel[:, :]
if not np.all((tickdiv % vel) == 0):
p = i / tickdiv
uniquev = np.unique(np.abs(vel))
for v in uniquev[(tickdiv % uniquev) != 0]:
tickdiv = int(tickdiv * v / np.gcd(tickdiv, v))
# print(tickdiv)
i = np.floor(p * tickdiv)
i += 1
# vel -= np.int32(np.ceil(vel / 100))
| ninjafrostpn/PythonProjects | Bending/Bending 3.py | Bending 3.py | py | 5,219 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pygame.init",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.int32",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pygame.Surface",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "pygame.display.set_mode",
... |
42088370430 | from src.depth_transform import *
from src.audio_transform import *
from PIL import Image
import matplotlib.pyplot as plt
import numpy as np
from src.dataset_utils import _load_audio_file
MAX_DEPTH = 10000
MIN_DEPTH = 0.0
def test_depth_arr(depth):
params = {"max_depth": 10000}
depth = transform_depth(depth, params)
return depth
if __name__ == "__main__":
#Image
IMAGE_EXAMPLE = Image.open("camera_47.jpeg")
#Depth
DEPTH_EXAMPLE = np.load("depth_47.npy")
DEPTH_EXAMPLE = test_depth_arr(DEPTH_EXAMPLE)
DEPTH_IMG = Image.fromarray(DEPTH_EXAMPLE)
#Audio
params = {"feature_name": "spectrogram", "n_fft": 512, "power": 1, "win_length": 128, "hop_length": 64, "to_db": False}
AUDIO_EXAMPLE, sr = _load_audio_file("audio_47.wav", sr=44100, max_depth=MAX_DEPTH)
AUDIO_SPEC = transform_audio(AUDIO_EXAMPLE, feature_extraction_params=params)
fig, axs = plt.subplots(1, 3)
im1 = axs[0].imshow(IMAGE_EXAMPLE)
axs[0].axis('off')
axs[0].set_title("Original Image")
im2 = axs[1].imshow(DEPTH_IMG, vmin=0, vmax=1)
axs[1].axis('off')
axs[1].set_title("Depth")
cbar2 = fig.colorbar(im2, ax=axs[1])
im3 = axs[2].imshow(AUDIO_SPEC, aspect="auto", origin="lower")
axs[2].set_title("Spectrogram")
cbar3 = fig.colorbar(im3, ax=axs[2])
plt.subplots_adjust(wspace=0.3)
plt.show()
| Hadiaz1/Batvision-tf | tests/data_tests.py | data_tests.py | py | 1,376 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "PIL.Image.open",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "numpy.load",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "PIL.Image.fromarray",
"line_n... |
14547146966 | import os
from pprint import pprint
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
from app_ml.functionalities.constants import SPARK_WINDOW, MONGO_URL, WINDOW, SAVED_MODEL_PATH, POSTGRES_URL
from app_ml.functionalities.preprocessing import create_test_set
from app_ml.models.RNN import RNN
from sqlalchemy import create_engine, insert, MetaData, Table
from collections import Counter
import pymongo
import tensorflow as tf
import numpy as np
import datetime
DATES = [datetime.datetime(2021, 2, 17, 9, 13, 27), datetime.datetime(2021, 2, 18, 21, 33, 2)]
FILES = [
'opel_corsa_01',
'opel_corsa_02'
# 'peugeot_207_01'
# 'peugeot_207_02'
]
def read_driver_route_id_from_mongo():
cars_db = pymongo.MongoClient(MONGO_URL)['cars']
routes_collection = cars_db['routes']
cursor = routes_collection.aggregate(
[
{'$group': {'_id': {'DriverId': '$DriverId', 'RouteId': '$RouteId'}}}
]
)
driver_route_ids = [(d['_id']['DriverId'], d['_id']['RouteId']) for d in cursor]
return driver_route_ids
def predict_for_file(model, file):
x_test, y_test = create_test_set(WINDOW, file)
x_test, _ = model.preprocess_test(x_test, y_test)
predictions = model.model.predict(x_test)
results = np.argmax(predictions, axis=1)
return results
def make_scores(results):
results_count = Counter(results)
aggressive_count = 0 if results_count.get(0) is None else results_count.get(0)
normal_count = 0 if results_count.get(1) is None else results_count.get(1)
total_count = aggressive_count + normal_count
return aggressive_count / total_count, normal_count / total_count
def make_postgres_data_list(route_id, results):
data = []
start = 10
for i in range(0, len(results)):
end = start + SPARK_WINDOW - 1
aggressive_score, normal_score = make_scores(results[i])
d = {
'route_id': route_id, 'start_time': start, 'end_time': end,
'aggressive_score': aggressive_score, 'normal_score': normal_score
}
data.append(d)
start = end + 1
return data
def insert_to_postgres_db(route_id, driver_id, date, model, engine, file):
results = predict_for_file(model, file)
spark_windowed_results = [results[i:SPARK_WINDOW+i] for i in range(0, len(results) - SPARK_WINDOW, SPARK_WINDOW)]
data = make_postgres_data_list(route_id, spark_windowed_results)
meta = MetaData(engine)
routes = Table('routes', meta, autoload=True)
scores = Table('scores', meta, autoload=True)
routes_stmt = insert(routes).values({'route_id': route_id, 'user_id': driver_id, 'date': date, 'is_active': 0})
scores_stmt = insert(scores).values(data)
conn = engine.connect()
conn.execute(routes_stmt)
conn.execute(scores_stmt)
conn.execute('COMMIT')
conn.close()
def main():
driver_route_ids = read_driver_route_id_from_mongo()
model = RNN((20, 15), 2)
model.model = tf.keras.models.load_model(SAVED_MODEL_PATH, compile=False)
engine = create_engine(POSTGRES_URL)
for i in range(0, len(FILES)):
insert_to_postgres_db(driver_route_ids[i][1], driver_route_ids[i][0], DATES[i], model, engine, FILES[i])
engine.dispose()
print('Data inserted successfully to PostgresDB')
if __name__ == '__main__':
main() | serapan/DrEYEve | app_ml/data/insert_data_to_postgres.py | insert_data_to_postgres.py | py | 3,331 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.environ",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pymongo.MongoClient",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "app_ml.functiona... |
3125630169 | from sklearn.ensemble import GradientBoostingRegressor
from sklearn.model_selection import GridSearchCV
import catboost as cb
import xgboost as xgb
import pandas as pd
def gradientBoost(X,y):
gboost=GradientBoostingRegressor(random_state=11)
params={'max_depth':[1,2,3],'n_estimators':[100,300,500,1000]}
search=GridSearchCV(estimator=gboost,param_grid=params,scoring='r2',cv=3,return_train_score=True,verbose=2)
search.fit(X,y)
print('Best parameters for Gradient Boosting are:', search.best_params_)
return search.best_estimator_
def catboost(X,y):
cboost=cb.CatBoostRegressor(silent=True)
params={'learning_rate':[0.1,0.5,0.01,0.05],'iterations':[100,300,500,1000],'depth':[1,2]}
search=GridSearchCV(estimator=cboost,param_grid=params,scoring='r2',cv=3,return_train_score=True,verbose=2)
search.fit(X,y)
print('Best parameters for CatBoost are:', search.best_params_)
return search.best_estimator_
def xgboost(X,y):
xgboost=xgb.XGBRegressor(objective ='reg:linear',enable_cateorical=True)
params={'learning_rate':[0.1,0.5,0.01,1],'n_estimators':[10,30,100,500],'colsample_bytree':[0.3,0.5],
'max_depth':[1,2,3,4,5], 'alpha': [0.1,1,0.01,10]}
search=GridSearchCV(estimator=xgboost,param_grid=params,scoring='r2',cv=3,return_train_score=True,verbose=1)
search.fit(X,y)
print('Best parameters for XgBoost are:', search.best_params_)
return search.best_estimator_
| aamir09/DS2Project | PartB/models/boosting.py | boosting.py | py | 1,456 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "sklearn.ensemble.GradientBoostingRegressor",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.GridSearchCV",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "catboost.CatBoostRegressor",
"line_number": 16,
"usage_typ... |
29376670360 | import time
import unittest
from datetime import datetime
from typing import Tuple
from flask.testing import FlaskClient
from flask_socketio import SocketIOTestClient
from sqlalchemy import select
from app import db
from app import make_app
from app import socket_io
from app.authentication.models import chats, User
from app.chats import Message
from app.config import TestConfig
class SocketIOEventsTestCase(unittest.TestCase):
def setUp(self) -> None:
self.events_namespace = '/chats/going'
self.app = make_app(TestConfig)
self.app_context = self.app.app_context()
self.app_context.push()
User.is_chat_between.cache_clear()
User.get_chat_id_by_users_ids.cache_clear()
db.create_all()
def tearDown(self) -> None:
User.is_chat_between.cache_clear()
User.get_chat_id_by_users_ids.cache_clear()
db.session.remove()
db.drop_all()
self.app_context.pop()
@staticmethod
def init_two_clients(client1: FlaskClient, client2: FlaskClient):
"""Initializes two given clients for the next actions with socketio events: fills in their sessions with
necessary information.
:param client1: the first flask test client instance
:param client2: the second one"""
client1.post('/authentication/register',
data={'email': 'test1@gmail.com', 'username': 'test_user1',
'name': 'Ann1', 'password1': 'Who am I', 'password2': 'Who am I'},
follow_redirects=True)
client1.post('/authentication/login', data={'email': 'test1@gmail.com',
'password': 'Who am I'},
follow_redirects=True)
client2.post('/authentication/register',
data={'email': 'test2@gmail.com', 'username': 'test_user2',
'name': 'Ann2', 'password1': 'Who am I', 'password2': 'Who am I'},
follow_redirects=True)
client2.post('/authentication/login', data={'email': 'test2@gmail.com',
'password': 'Who am I'})
client1.get('/chats/begin/test_user2')
client2.get('/chats/begin/test_user1')
def get_socket_io_clients(self, *clients: FlaskClient) -> Tuple[SocketIOTestClient, ...]:
"""
For each given flask client instance return socketio test client instance. The function is created only to make
tests clearer.
:param clients: sequence of flask test clients instances
:type clients: FlaskClient
:return: tuple of initialized socket io test clients.
:rtype: tuple[SocketIOTestClient]
"""
return tuple(
[socket_io.test_client(self.app, namespace=self.events_namespace, flask_test_client=client) for client in
clients])
def test_connect_disconnect(self):
socket_io_client = socket_io.test_client(self.app, namespace=self.events_namespace)
self.assertTrue(socket_io_client.is_connected(self.events_namespace))
received = socket_io_client.get_received(self.events_namespace)
self.assertEqual(len(received), 1)
self.assertEqual(received[0]['name'], 'status')
self.assertEqual(received[0]['args'], [{'message': 'connected'}])
self.assertEqual(received[0]['namespace'], self.events_namespace)
socket_io_client.disconnect(self.events_namespace)
self.assertFalse(socket_io_client.is_connected(self.events_namespace))
def test_enter_leave_room(self):
with self.app.test_client() as client1, self.app.test_client() as client2:
self.init_two_clients(client1, client2)
socket_io_client1, socket_io_client2 = self.get_socket_io_clients(client1, client2)
socket_io_client1.emit('enter_room', namespace=self.events_namespace)
socket_io_client2.emit('enter_room', namespace=self.events_namespace)
received1 = socket_io_client1.get_received(self.events_namespace)
received2 = socket_io_client2.get_received(self.events_namespace)
self.assertEqual(len(received1), 3)
self.assertEqual(received1[0]['name'], 'status')
self.assertEqual(received1[0]['args'], [{'message': 'connected'}])
self.assertEqual(received1[0]['namespace'], self.events_namespace)
self.assertEqual(received1[1]['name'], 'status')
self.assertEqual(received1[1]['args'], [{'message': 'Ann1 entered the room'}])
self.assertEqual(received1[1]['namespace'], self.events_namespace)
self.assertEqual(received1[2]['name'], 'status')
self.assertEqual(received1[2]['args'], [{'message': 'Ann2 entered the room'}])
self.assertEqual(received1[2]['namespace'], self.events_namespace)
self.assertEqual(len(received2), 2)
self.assertEqual(received2[0]['name'], 'status')
self.assertEqual(received2[0]['args'], [{'message': 'connected'}])
self.assertEqual(received2[0]['namespace'], self.events_namespace)
self.assertEqual(received2[1]['name'], 'status')
self.assertEqual(received2[1]['args'], [{'message': 'Ann2 entered the room'}])
self.assertEqual(received2[1]['namespace'], self.events_namespace)
socket_io_client1.emit('leave_room', namespace=self.events_namespace)
received2 = socket_io_client2.get_received(self.events_namespace)
self.assertEqual(len(received2), 1)
self.assertEqual(received2[0]['name'], 'status')
self.assertEqual(received2[0]['args'], [{'message': 'Ann1 left the room'}])
self.assertEqual(received2[0]['namespace'], self.events_namespace)
def test_send_messages(self):
self.assertEqual(len(Message.query.all()), 0)
with self.app.test_client() as client1, self.app.test_client() as client2:
self.init_two_clients(client1, client2)
socket_io_client1, socket_io_client2 = self.get_socket_io_clients(client1, client2)
socket_io_client1.emit('enter_room', namespace=self.events_namespace)
socket_io_client2.emit('enter_room', namespace=self.events_namespace)
# erase status messages
socket_io_client1.get_received(namespace=self.events_namespace)
socket_io_client2.get_received(namespace=self.events_namespace)
first_message_time = time.time()
second_message_time = time.time()
socket_io_client1.emit('put_data',
{'message': 'Hello!', 'timestamp_milliseconds': first_message_time * 1000},
namespace=self.events_namespace)
socket_io_client2.emit('put_data', {'message': 'Hi!', 'timestamp_milliseconds': second_message_time * 1000},
namespace=self.events_namespace)
received1 = socket_io_client1.get_received(self.events_namespace)
received2 = socket_io_client2.get_received(self.events_namespace)
self.assertEqual(len(received1), 2)
self.assertEqual(len(received2), 2)
self.assertEqual(received1[0], received2[0])
self.assertEqual(received1[1], received2[1])
self.assertTrue(received1[0]['name'] == received1[1]['name'] == 'print_message')
self.assertEqual(received1[0]['args'][0]['message'], 'Hello!')
self.assertEqual(received1[1]['args'][0]['message'], 'Hi!')
messages = Message.query.all()
self.assertEqual(len(messages), 2)
self.assertEqual(messages[0].text, 'Hello!')
self.assertEqual(messages[0].datetime_writing, datetime.utcfromtimestamp(first_message_time))
self.assertEqual(messages[0].sender_id, 1)
self.assertEqual(messages[0].receiver_id, 2)
self.assertEqual(messages[1].text, 'Hi!')
self.assertEqual(messages[1].datetime_writing, datetime.utcfromtimestamp(second_message_time))
self.assertEqual(messages[1].sender_id, 2)
self.assertEqual(messages[1].receiver_id, 1)
def test_create_chat_in_put_data_event(self):
with self.app.test_client() as client1, self.app.test_client() as client2:
self.init_two_clients(client1, client2)
socket_io_client1, socket_io_client2 = self.get_socket_io_clients(client1, client2)
socket_io_client1.emit('enter_room', namespace=self.events_namespace)
socket_io_client2.emit('enter_room', namespace=self.events_namespace)
result = db.session.execute(select(chats))
self.assertEqual(len(result.all()), 0)
result.close()
self.assertFalse(User.is_chat_between(1, 2))
self.assertEqual(User.is_chat_between.cache_info().currsize, 1)
socket_io_client1.emit('put_data',
{'message': 'test_message', 'timestamp_milliseconds': time.time() * 1000},
namespace=self.events_namespace)
self.assertEqual(User.is_chat_between.cache_info().currsize, 0)
result = db.session.execute(select(chats))
self.assertEqual(len(result.all()), 1)
result.close()
self.assertTrue(User.is_chat_between(1, 2))
self.assertTrue(User.is_chat_between(2, 1))
self.assertEqual(User.is_chat_between.cache_info().currsize, 2)
User.is_chat_between.cache_clear()
self.assertTrue(User.is_chat_between(1, 2))
socket_io_client2.emit('put_data',
{'message': 'test_message2', 'timestamp_milliseconds': time.time() * 1000},
namespace=self.events_namespace)
self.assertEqual(User.is_chat_between.cache_info().currsize, 2)
result = db.session.execute(select(chats))
self.assertEqual(len(result.all()), 1)
result.close()
self.assertTrue(User.is_chat_between(2, 1))
self.assertEqual(User.is_chat_between.cache_info().currsize, 2)
def test_get_more_messages(self):
messages_limit = self.app.config['MESSAGES_PER_LOAD_EVENT']
with self.app.test_client() as client1, self.app.test_client() as client2:
self.init_two_clients(client1, client2)
socket_io_client1, socket_io_client2 = self.get_socket_io_clients(client1, client2)
socket_io_client1.emit('enter_room', namespace=self.events_namespace)
socket_io_client2.emit('enter_room', namespace=self.events_namespace)
User.create_chat(1, 2)
# erase status messages
socket_io_client1.get_received(namespace=self.events_namespace)
socket_io_client2.get_received(namespace=self.events_namespace)
number_of_messages = 15
db.session.add_all(
[Message(text=str(figure), sender_id=1, receiver_id=2) for figure in range(number_of_messages)])
db.session.commit()
self.assertEqual(len(Message.query.all()), number_of_messages)
for messages_offset in range(0, number_of_messages):
socket_io_client1.emit('get_more_messages', {'messages_offset': messages_offset},
namespace=self.events_namespace)
received1 = socket_io_client1.get_received(self.events_namespace)
received2 = socket_io_client2.get_received(self.events_namespace)
self.assertEqual(len(received2), 0)
self.assertEqual(len(received1), 1)
received_data = received1[0]['args'][0]
self.assertTrue(received_data['messages_number'] <= messages_limit)
self.assertEqual(received_data['messages_number'],
number_of_messages - messages_offset if number_of_messages - messages_offset <= messages_limit else messages_limit)
messages_offset = 10
socket_io_client1.emit('get_more_messages', {'messages_offset': messages_offset},
namespace=self.events_namespace)
received1 = socket_io_client1.get_received(self.events_namespace)
received_messages = received1[0]['args'][0]['messages']
for message, message_text in zip(received_messages, reversed(range(
number_of_messages - messages_offset if number_of_messages - messages_offset <= messages_limit else messages_limit))):
self.assertTrue(message['is_current_user'])
self.assertEqual(message['message_text'], str(message_text))
messages_offset = 10
socket_io_client2.emit('get_more_messages', {'messages_offset': messages_offset},
namespace=self.events_namespace)
received2 = socket_io_client2.get_received(self.events_namespace)
received_messages = received2[0]['args'][0]['messages']
for message in received_messages:
self.assertFalse(message['is_current_user'])
def test_isolated_clients_chat(self):
with self.app.test_client() as client1, self.app.test_client() as client2, self.app.test_client() as client3:
self.init_two_clients(client1, client2)
client3.post('/authentication/register',
data={'email': 'test3@gmail.com', 'username': 'test_user3',
'name': 'Ann3', 'password1': 'Who am I', 'password2': 'Who am I'},
follow_redirects=True)
client3.post('/authentication/login', data={'email': 'test3@gmail.com',
'password': 'Who am I'},
follow_redirects=True)
client3.get('/chats/begin/test_user2')
socket_io_client1, socket_io_client2, socket_io_client3 = self.get_socket_io_clients(client1, client2,
client3)
socket_io_client1.emit('enter_room', namespace=self.events_namespace)
socket_io_client2.emit('enter_room', namespace=self.events_namespace)
socket_io_client3.emit('enter_room', namespace=self.events_namespace)
# erase status messages
socket_io_client1.get_received(namespace=self.events_namespace)
socket_io_client2.get_received(namespace=self.events_namespace)
socket_io_client3.get_received(namespace=self.events_namespace)
socket_io_client1.emit('put_data',
{'message': 'test_message', 'timestamp_milliseconds': time.time() * 1000},
namespace=self.events_namespace)
self.assertEqual(len(socket_io_client1.get_received(self.events_namespace)), 1)
self.assertEqual(len(socket_io_client2.get_received(self.events_namespace)), 1)
self.assertEqual(len(socket_io_client3.get_received(self.events_namespace)), 0)
| dmytro-afanasiev/flask-simple-chats | tests/test_socketio_events.py | test_socketio_events.py | py | 15,296 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "unittest.TestCase",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "app.make_app",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "app.config.TestConfig",
"line_number": 21,
"usage_type": "argument"
},
{
"api_name": "app.aut... |
43165056383 | #from osgeo import gdal, osr, ogr # Python bindings for GDAL
import numpy as np
import matplotlib.pyplot as plt
import sys
import os
import pickle
import json
from rlxutils import subplots
from .utils import pimshow
class Chipset:
def __init__(self, chipset_folder=None, data=None, metadata=None):
self.folder = chipset_folder
self.chip_fnames = os.listdir(chipset_folder)
chip_ids_npz = [i.split(".")[0] for i in self.chip_fnames if i.endswith(".npz")]
chip_ids_pkl = [i.split(".")[0] for i in self.chip_fnames if i.endswith("metadata.pkl")]
# keep the chips with both metadata and data
self.chip_ids = [i for i in chip_ids_npz if i in chip_ids_pkl]
def get_chip(self, chip_id):
if not chip_id in self.chip_ids:
raise ValueError(f"{chip_id} does not exist")
return Chip(self.folder, chip_id)
def random_chip(self):
chip_id = self.chip_ids[np.random.randint(len(self.chip_ids))]
return Chip(self.folder, chip_id)
def chips(self):
for chip_id in self.chip_ids:
yield Chip(self.folder, chip_id)
class Chip:
def __init__(self, chipset_folder, chip_id, data=None, metadata=None):
self.chipset_folder = chipset_folder
self.chip_id = chip_id
assert (data is None and metadata is None) or (data is not None and metadata is not None), "'data' and 'metadata' must be both set or unset"
if data is None:
with np.load(f"{self.chipset_folder}/{self.chip_id}.npz") as my_file:
self.data = my_file['arr_0']
my_file.close()
with open(f"{self.chipset_folder}/{self.chip_id}.metadata.pkl", "rb") as f:
self.metadata = pickle.load(f)
f.close()
else:
self.data = data
self.metadata = metadata
def clone(self):
return self.__class__(self.chipset_folder, self.chip_id, self.data.copy(), self.metadata.copy())
def get_varnames(self, exceptions=[]):
r = self.get_timestep_varnames() + self.get_timepair_varnames() + self.get_static_varnames()
r = [i for i in r if not i in exceptions]
return r
def get_time_varnames(self):
return self.get_timestep_varnames() + self.get_timepair_varnames()
def get_timestep_varnames(self):
return list(np.unique([i.split("::")[1] for i in self.metadata['variables'] if i.startswith("TS::")]))
def get_timesteps(self):
return list(np.unique([i.split("::")[2] for i in self.metadata['variables'] if i.startswith("TS::")]))
def get_timepair_varnames(self):
return list(np.unique([i.split("::")[1] for i in self.metadata['variables'] if i.startswith("TP::")]))
def get_timepairs(self):
return list(np.unique([i.split("::")[2] for i in self.metadata['variables'] if i.startswith("TP::")]))
def get_static_varnames(self):
return list(np.unique([i.split("::")[1] for i in self.metadata['variables'] if i.startswith("ST::")]))
def apply_with_other(self, other, func):
"""
applies a function element-wise to the data of two chips
which must have exactly the same vars.
the function must have signature func(x,y) with x,y and the return value
must all be arrays of the same size
"""
thisvars = self.get_varnames()
othervars = other.get_varnames()
if self.get_varnames() != other.get_varnames():
raise ValueError("chips must have the same number of variables")
r = self.clone()
r.data = func(self.data, other.data)
for name in self.metadata['variables']:
self.metadata['variables']
return r
def apply(self, var_name, func, args):
"""
applies a function element-wise to the data of two chips
which must have exactly the same vars.
the function must have signature func(x,y) with x,y and the return value
must all be arrays of the same size
"""
try:
assert(var_name in self.get_varnames())
except:
raise ValueError(f'{var_name} not in chip')
r = self.sel([var_name])
r.data, name_tag = func(np.nan_to_num(r.data), **args)
if name_tag:
r.metadata['variables']= [name.replace(var_name,var_name+'_'+name_tag) for name in r.metadata['variables']]
return r
def apply_across_time(self, func):
"""
applies a function across the time dimension rendering time variables into static
returns a new chip
"""
tsvars = self.get_timestep_varnames()
tpvars = self.get_timepair_varnames()
selected_vars = tsvars + tpvars
new_data = np.vstack([np.apply_over_axes(func, self.sel(v).data,0) for v in selected_vars])
new_metadata = self.metadata.copy()
# all variables now become static
new_metadata['variables'] = [f"ST::{i}__{func.__name__}" for i in tsvars] + [f"ST::{i}__{func.__name__}" for i in tpvars]
new_chip_id = self.chip_id + f"_{np.random.randint(1000000):07d}"
new_metadata['chip_id'] = new_chip_id
return self.__class__(self.chipset_folder, new_chip_id, new_data, new_metadata)
def diff_channels(self, tag1, tag2):
var_names = self.get_varnames()
new_metadata = self.metadata.copy()
new_metadata['variables'] =[]
new_data = []
new_vars = []
for varname in var_names:
if tag1 in varname:
v, idx = self.get_array_idxs(varnames=[varname.replace(tag1, tag2), varname])
new_data.append(np.expand_dims(np.squeeze(self.data[idx[0]]-self.data[idx[1]]),axis=0))
if tag1 in v[0]:
v = v[0].replace(tag1,'diff')
else:
v = v[0].replace(tag2,'diff')
new_metadata['variables'].append(v)
elif tag2 in varname:
pass
else:
v, idx = self.get_array_idxs(varnames=[varname])
new_data.append(np.expand_dims(np.squeeze(self.data[idx]),0))
new_metadata['variables'].append(v[0])
new_data = np.vstack(new_data)
new_chip_id = self.chip_id + f"_{np.random.randint(1000000):07d}"
new_metadata['chip_id'] = new_chip_id
return self.__class__(self.chipset_folder, new_chip_id, new_data, new_metadata)
def get_array_idxs(self, varnames=None, start_date=None, end_date=None):
if varnames is None:
varnames = self.get_varnames()
elif not type(varnames)==list:
varnames = [varnames]
vspecs = self.metadata['variables']
selected_idxs = []
selected_vars = []
for i in range(len(vspecs)):
vspec = vspecs[i]
if vspec.startswith('TS::'):
_,vname,vdate = vspec.split("::")
if vname in varnames\
and (start_date is None or start_date<=vdate)\
and (end_date is None or end_date>=vdate):
selected_idxs.append(i)
selected_vars.append(vspec)
elif vspec.startswith('TP::'):
_,vname,vdate = vspec.split("::")
vdate1, vdate2 = vdate.split("_")
if vname in varnames\
and (start_date is None or (start_date<=vdate1 and start_date<=vdate2))\
and (end_date is None or (end_date>=vdate2 and end_date>=vdate2)):
selected_idxs.append(i)
selected_vars.append(vspec)
elif vspec.startswith('ST::'):
_, vname = vspec.split("::")
if vname in varnames:
selected_idxs.append(i)
selected_vars.append(vspec)
return selected_vars, selected_idxs
def get_array(self, varnames=None, start_date=None, end_date=None):
_, selected_idxs = self.get_array_idxs(varnames, start_date, end_date)
return self.data[selected_idxs]
def plot(self, overlay=None, log=False, **kwargs):
if not 'n_cols' in kwargs:
kwargs['n_cols'] = 5
if not 'usizex' in kwargs:
kwargs['usizex'] = 4
if not 'usizey' in kwargs:
kwargs['usizey'] = 3
for ax,i in subplots(len(self.data), **kwargs):
if log and np.nanmin(self.data[i])>=0:
x = np.log10(self.data[i]+1e-4)
else:
x = self.data[i]
pimshow(x)
if np.sum(overlay):
pimshow(np.squeeze(overlay), alpha=0.2)
plt.colorbar()
varname = self.metadata['variables'][i].split("::")
if len(varname)==2:
tit = varname[1]
else:
tit = f"{varname[1]}\n{varname[2]}"
plt.title(tit)
plt.tight_layout()
def sel(self, varnames=None, start_date=None, end_date=None):
selected_vars, selected_idxs = self.get_array_idxs(varnames, start_date, end_date)
new_data = self.data[selected_idxs]
new_metadata = self.metadata.copy()
new_metadata['variables'] = selected_vars
new_chip_id = self.chip_id + f"_{np.random.randint(1000000):07d}"
new_metadata['chip_id'] = new_chip_id
return self.__class__(self.chipset_folder, new_chip_id, new_data, new_metadata)
def save_as_geotif(self, dest_folder):
from osgeo import gdal, osr, ogr
def getGeoTransform(extent, nlines, ncols):
resx = (extent[2] - extent[0]) / ncols
resy = (extent[3] - extent[1]) / nlines
return [extent[0], resx, 0, extent[3] , 0, -resy]
# Define the data extent (min. lon, min. lat, max. lon, max. lat)
extent = list(self.metadata['bounds'].values()) # South America
# Export the test array to GeoTIFF ================================================
# Get GDAL driver GeoTiff
driver = gdal.GetDriverByName('GTiff')
data = self.data
# Get dimensions
nlines = data.shape[1]
ncols = data.shape[2]
nbands = len(data)
data_type = gdal.GDT_Float32 # gdal.GDT_Float32
# Create a temp grid
#options = ['COMPRESS=JPEG', 'JPEG_QUALITY=80', 'TILED=YES']
grid_data = driver.Create('grid_data', ncols, nlines, nbands, data_type)#, options)
# Write data for each bands
for i in range(len(data)):
grid_data.GetRasterBand(i+1).WriteArray(self.data[i])
# Lat/Lon WSG84 Spatial Reference System
import os
import sys
proj_lib = "/".join(sys.executable.split("/")[:-2]+['share', 'proj'])
os.environ['PROJ_LIB']=proj_lib
srs = osr.SpatialReference()
#srs.ImportFromProj4('+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs')
srs.ImportFromEPSG(int(self.metadata['crs'].split(':')[1]))
# Setup projection and geo-transform
grid_data.SetProjection(srs.ExportToWkt())
grid_data.SetGeoTransform(getGeoTransform(extent, nlines, ncols))
# Save the file
file_name = f'{dest_folder}/{self.chip_id}.tif'
print(f'saved {file_name} with {nbands} bands')
driver.CreateCopy(file_name, grid_data, 0)
# Close the file
driver = None
grid_data = None
# Delete the temp grid
import os
os.remove('grid_data')
#===========================
@staticmethod
def concat_static(chip_list,sufixes=None):
"""
concats two chips containing only static variables
"""
if sufixes is None:
sufixes = [""] * len(chip_list)
assert len(chip_list)==len(sufixes), f"you have {len(chip_list)} chips but {len(sufixes)} sufixes"
c = chip_list[0]
p = sufixes[0]
# all variables must be static
assert len(c.get_time_varnames())==0, "chips can only contain static variables"
r = c.clone()
r.chip_id += "_concat"
r.metadata['variables'] = [f"{i}{p}" for i in r.metadata['variables']]
for i in range(1,len(chip_list)):
c = chip_list[i]
p = sufixes[i]
if c.metadata['bounds'] != r.metadata['bounds']:
raise ValueError("all chips must have the same bounds")
if c.metadata['crs'] != r.metadata['crs']:
raise ValueError("all chips must have the same crs")
r.data = np.vstack([r.data, c.data])
r.metadata['variables'] += [f"{i}{p}" for i in c.metadata['variables']]
if len(np.unique( r.metadata['variables'] )) != sum([len(i.metadata['variables'] ) for i in chip_list] ):
raise ValueError("there were overlapping variable names in the chips. use 'sufixes'")
return r | VMBoehm/SAR-landslide-detection-pretraining | src/datamodules/components/chips.py | chips.py | py | 13,357 | python | en | code | 25 | github-code | 36 | [
{
"api_name": "os.listdir",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.random.randint",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "numpy.load",
"l... |
23099937336 | import os
import sys
import urllib.request
import urllib.parse
import datetime
import time
import json
client_id ='GWWUHPgV0uguJWvgsMFu'
client_secret = 'slKycSCDf4'
# url 접속 요청 후 응답리턴함수
def getRequestUrl(url):
req = urllib.request.Request(url)
req.add_header("X-Naver-Client-Id", client_id)
req.add_header('X-Naver-Client-Secret', client_secret)
try:
res = urllib.request.urlopen(req)
if res.getcode() == 200:
print(f"[{datetime.datetime.now()} ] Url Request Success")
return res.read().decode('utf-8')
except Exception as e:
print(e)
print(f"[{datetime.datetime.now()}] Error for URL : {url}")
return None
def getNaverSearch(node, srcText, start, display):
base = 'https://openapi.naver.com/v1/search'
node = f'/{node}.json'
text = urllib.parse.quote(srcText) # url주제어 맞춰서 파싱
parmeters=f'?query={text}&start={start}&diplay={display}'
url = base + node + parmeters
resDecode = getRequestUrl(url)
if resDecode == None:
return None
else:
return json.loads(resDecode)
def getPostData(post, jsonResult, cnt):
title = post['title']
description = post['description']
org_link = post['originallink']
link = post['link']
pDate = datetime.datetime.strptime(post['pubDate'], '%a, %d %b %Y %H:%M:%S +0900')
pDate= pDate.strftime('%Y-%m-%d %H:%M:%S')
jsonResult.append({'cnt':cnt, 'title':title, 'description':description,
'org_link':org_link, 'link':link, 'pDate':pDate})
# 실행최초함수
def main():
node = 'news'
srcText = input('검색어를 입력하세요: ')
cnt = 0
jsonResult= []
jsonRes = getNaverSearch(node, srcText, 1, 50)
total = jsonRes['total']
while ((jsonRes != None) and (jsonRes['display'] != 0)):
for post in jsonRes['items']:
cnt +=1
getPostData(post, jsonResult,cnt)
# 만약 50개를 다돌았다고 치면 51개부터 시작해야하기때문에 기존 display값을 더해준다.
start = jsonRes['start'] + jsonRes['display']
jsonRes = getNaverSearch(node, srcText,start, 50)
print(f'전체 검색 : {total} 건')
# file output
with open(f'./{srcText}_naver_{node}.json',mode='w',encoding='utf-8') as outfile:
jsonFile = json.dumps(jsonResult, indent = 4, sort_keys = True, ensure_ascii=False)
outfile.write(jsonFile)
print(f'가져온 데이터 : {cnt} 건')
print(f'{srcText}_naver_{node}.json SAVED')
if __name__ == '__main__':
main()
| omago123/StudyBigData | day01/naverCrawler.py | naverCrawler.py | py | 2,636 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "urllib.request.request.Request",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "urllib.request.request",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "urllib.request",
"line_number": 17,
"usage_type": "name"
},
{
"api_nam... |
21683733200 | #!/usr/bin/env python3
import numpy as np
import networkx as nx
import logging
import collections
import os
import math
try:
from PySide2 import QtGui, QtCore
except ImportError:
from PySide6 import QtGui, QtCore
from functools import lru_cache
from origami.core.predict import PredictorType
from origami.core.neighbors import neighbors
from origami.core.math import partition_path
class Pens:
def __init__(self, keys, width=10):
self._pens = dict()
for i, k in enumerate(keys):
color = QtGui.QColor.fromHsv(
20 + 230 * (i / (1 + len(keys))), 200, 250)
pen = QtGui.QPen()
pen.setWidth(width)
pen.setColor(color)
pen.setCapStyle(QtCore.Qt.RoundCap)
self._pens[k] = pen
def get(self, key):
return self._pens[key]
def get_region_classes(predictors):
classes = []
for p in predictors:
if p.type == PredictorType.REGION.name:
for c in p.classes:
if c != "BACKGROUND":
classes.append((p.name, c))
return sorted(classes)
def render_separators(pixmap, separators):
pens = Pens(sorted(p[:2] for p in separators.keys()))
qp = QtGui.QPainter()
qp.begin(pixmap)
try:
qp.setOpacity(0.75)
for line_path, separator in separators.items():
qp.setPen(pens.get(line_path[:2]))
if separator.geom_type != "LineString":
logging.error(
"encountered %s while rendering separator %s" % (
separator.geom_type, line_path))
continue
pts = [QtCore.QPointF(x, y) for x, y in separator.coords]
qp.drawPolyline(pts)
finally:
qp.end()
return pixmap
def block_hsv(classes):
for i, c in enumerate(classes):
yield tuple(c), (255 * (i / (1 + len(classes))), 100, 200)
class LabelBrushes:
def __init__(self, predictors):
self._classes = get_region_classes(predictors)
@lru_cache(maxsize=32)
def brushes(self, hue=0, saturation=0, value=0, style=QtCore.Qt.SolidPattern):
brushes = dict()
for c, (h, s, v) in block_hsv(self._classes):
brushes[c] = QtGui.QBrush(
QtGui.QColor.fromHsv(
(h + hue) % 256, s + saturation, v + value),
style)
return brushes
def get_brush(self, block_path, **kwargs):
classifier, label, block_id = block_path
return self.brushes(**kwargs)[(classifier, label)]
def default_pen(color="black", width=5):
pen = QtGui.QPen()
pen.setWidth(width)
pen.setColor(QtGui.QColor(color))
pen.setCapStyle(QtCore.Qt.RoundCap)
return pen
def render_blocks(pixmap, blocks, *args, **kwargs):
contours = dict((k, b.image_space_polygon) for (k, b) in blocks.items())
return render_contours(pixmap, contours, *args, **kwargs)
_patterns = (
QtCore.Qt.SolidPattern,
QtCore.Qt.Dense1Pattern,
QtCore.Qt.Dense2Pattern,
QtCore.Qt.Dense3Pattern,
QtCore.Qt.Dense4Pattern,
QtCore.Qt.Dense5Pattern
)
def contour_patterns(contours, buffer=-5, threshold=10):
buffered_contours = dict(
(k, v.buffer(buffer)) for k, v in contours.items())
buffered_contours = dict([
(k, c.convex_hull if c.geom_type != "Polygon" else c)
for k, c in buffered_contours.items()])
neighbors_ = neighbors(buffered_contours)
apart = set()
for a, b in neighbors_.edges():
if buffered_contours[a].distance(buffered_contours[b]) > threshold:
apart.add((a, b))
for a, b in apart:
neighbors_.remove_edge(a, b)
return nx.algorithms.coloring.equitable_color(
neighbors_, 1 + max(d for _, d in neighbors_.degree()))
def render_contours(
pixmap, contours, predictors,
brushes=None, transform=None, scale=1, font_scale=1,
get_label=None, alternate=False, edges=None):
if not contours:
return pixmap
if brushes is None:
brushes = LabelBrushes(predictors)
def points(pts):
pts = np.array(pts)
if transform is not None:
pts = transform(pts)
return [QtCore.QPointF(*pt) for pt in (pts * scale)]
if alternate:
patterns = contour_patterns(contours)
else:
patterns = None
qp = QtGui.QPainter()
qp.begin(pixmap)
try:
qp.setOpacity(0.5)
label_path = collections.defaultdict(list)
for i, (block_path, contour) in enumerate(contours.items()):
path, label = get_label(block_path)
if label is not None:
label_path[path[:2]].append((label, block_path))
sorted_contours = dict()
for k in label_path.keys():
sorted_contours[k] = [(x[1], contours[x[1]]) for x in sorted(
label_path[k], key=lambda x: x[0])]
def render_contour(coords):
qp.drawPolygon(points(coords))
for k in sorted_contours.keys():
for i, (block_path, contour) in enumerate(sorted_contours[k]):
if contour.is_empty:
continue
if contour.geom_type not in ("Polygon", "MultiPolygon"):
logging.error(
"encountered %s while rendering contour %s" % (
contour.geom_type, block_path))
continue
if patterns:
style = _patterns[patterns[block_path] % len(_patterns)]
else:
style = QtCore.Qt.SolidPattern
qp.setBrush(brushes.get_brush(
block_path, style=style))
if contour.geom_type == "Polygon":
render_contour(contour.exterior.coords)
elif contour.geom_type == "MultiPolygon":
for geom in contour.geoms:
render_contour(geom.exterior.coords)
else:
raise ValueError(contour.geom_type)
qp.setBrush(QtGui.QBrush(QtGui.QColor("white")))
font = QtGui.QFont("Arial Narrow", 56 * scale * font_scale, QtGui.QFont.Bold)
qp.setFont(font)
fm = QtGui.QFontMetrics(font)
qp.setPen(default_pen(width=5 * scale * font_scale))
nodes = dict()
node_r = 50 * scale * font_scale
for block_path, contour in contours.items():
if contour.is_empty:
continue
p = points([contour.centroid.coords[0]])[0]
path, label = get_label(block_path)
qp.setBrush(brushes.get_brush(block_path, value=50))
qp.setOpacity(0.8)
qp.drawEllipse(p, node_r, node_r)
if edges:
nodes[block_path] = p
qp.setOpacity(1)
# flags=QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter does
# not work. fix it manually.
label_str = label if isinstance(label, str) else str(label)
w = fm.horizontalAdvance(label_str)
qp.drawText(p.x() - w / 2, p.y() + fm.descent(), label_str)
if edges:
qp.setOpacity(0.8)
qp.setPen(default_pen(width=10 * scale))
for p, q in edges:
coords = [nodes[p], nodes[q]]
qp.drawPolyline(coords)
finally:
qp.end()
return pixmap
def render_arrows(qp, path, pos="center", scale=1):
theta = 45
d = 25 * scale
for (x1, y1), (x2, y2) in zip(path, path[1:]):
dx = x2 - x1
dy = y2 - y1
phi = math.atan2(dy, dx)
phi1 = phi + (90 + theta) * (math.pi / 180)
phi2 = phi - (90 + theta) * (math.pi / 180)
if pos == "begin":
ax, ay = x1, x2
elif pos == "end":
ax, ay = x2, y2
elif pos == "center":
ax = (x1 + x2) / 2
ay = (y1 + y2) / 2
else:
raise ValueError(pos)
qp.drawPolyline([
QtCore.QPointF(ax + d * math.cos(phi1), ay + d * math.sin(phi1)),
QtCore.QPointF(ax, ay),
QtCore.QPointF(ax + d * math.cos(phi2), ay + d * math.sin(phi2))
])
def render_lines(
pixmap, lines, predictors, scale=1, font_scale=1,
get_label=None, show_vectors=False):
if not lines:
return pixmap
brushes = LabelBrushes(predictors)
qp = QtGui.QPainter()
qp.begin(pixmap)
try:
black_pen = default_pen(width=5 * scale)
red_pen = default_pen("#FFA500", width=7 * scale)
for i, (line_path, line) in enumerate(lines.items()):
geom_type = line.image_space_polygon.geom_type
if geom_type != "Polygon":
logging.error("encountered %s as line geometry" % geom_type)
continue
qp.setBrush(brushes.get_brush(line_path[:3], value=(i % 2) * 50))
qp.setPen(black_pen)
qp.setOpacity(0.5)
poly = QtGui.QPolygonF()
coords = np.array(line.image_space_polygon.exterior.coords) * scale
for x, y in coords:
poly.append(QtCore.QPointF(x, y))
qp.drawPolygon(poly)
if show_vectors:
p1, p2 = line.baseline
p1 = np.array(p1) * scale
p2 = np.array(p2) * scale
line_info = line.info
tess_data = line_info["tesseract_data"]
up = np.array(line_info["up"])
lh = abs(tess_data["height"]) - abs(tess_data["ascent"])
up = up * (scale * lh / np.linalg.norm(up))
qp.setOpacity(0.9)
qp.setPen(red_pen)
qp.drawPolyline([QtCore.QPointF(*p1), QtCore.QPointF(*p2)])
m = (np.array(p1) + np.array(p2)) / 2
qp.drawPolyline([QtCore.QPointF(*m), QtCore.QPointF(*(m + up))])
render_arrows(qp, [m, m + up], "end", scale=scale)
if get_label:
font = QtGui.QFont("Arial Narrow", 24 * scale * font_scale, QtGui.QFont.Bold)
qp.setFont(font)
fm = QtGui.QFontMetrics(font)
qp.setPen(default_pen(width=5 * scale * font_scale))
node_r = 25 * scale * font_scale
for i, (line_path, line) in enumerate(lines.items()):
x, y = line.image_space_polygon.centroid.coords[0]
p = QtCore.QPointF(x * scale, y * scale)
path, label = get_label(line_path)
qp.setBrush(brushes.get_brush(line_path[:3], value=50))
qp.setOpacity(0.8)
qp.drawEllipse(p, node_r, node_r)
qp.setOpacity(1)
# flags=QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter does
# not work. fix it manually.
label_str = label if isinstance(label, str) else str(label)
w = fm.horizontalAdvance(label_str)
qp.drawText(p.x() - w / 2, p.y() + fm.descent(), label_str)
finally:
qp.end()
return pixmap
def render_warped_line_paths(pixmap, lines, predictors, resolution=0.1, opacity=0.9):
classes = get_region_classes(predictors)
pens = Pens(classes)
qp = QtGui.QPainter()
qp.begin(pixmap)
try:
qp.setOpacity(opacity)
for i, (line_path, line) in enumerate(lines.items()):
#if line.confidence < 0.5:
# continue # ignore
classifier, label, block_id, line_id = line_path
path, height = line.warped_path(resolution)
pen = pens.get((classifier, label))
pen.setWidth(int(height / 3))
qp.setPen(pen)
poly = QtGui.QPolygonF()
for x, y in path:
poly.append(QtCore.QPointF(x, y))
qp.drawPolyline(poly)
finally:
qp.end()
return pixmap
def render_warped_line_confidence(pixmap, lines):
qp = QtGui.QPainter()
qp.begin(pixmap)
try:
font = QtGui.QFont("Arial Narrow", 48, QtGui.QFont.Bold)
qp.setFont(font)
fm = QtGui.QFontMetrics(font)
for i, (line_path, line) in enumerate(lines.items()):
#if line.confidence < 0.5:
# continue # ignore
path, height = line.warped_path(0.1)
qp.setOpacity(1)
if line.confidence < 0.75:
qp.setPen(default_pen("red"))
label = "%.2f" % line.confidence
w = fm.horizontalAdvance(label)
qp.drawText(
np.mean(path[:, 0]) - w / 2,
np.mean(path[:, 1]) + fm.descent(), label)
finally:
qp.end()
return pixmap
def render_paths(
pixmap, columns,
color="blue", opacity=0.5,
show_dir=False, scale=1):
if not columns:
return pixmap
qp = QtGui.QPainter()
qp.begin(pixmap)
try:
qp.setOpacity(opacity)
qp.setPen(default_pen(color, 10))
for path in columns:
path = np.array(path) * scale
poly = QtGui.QPolygonF()
for x, y in path:
poly.append(QtCore.QPointF(x, y))
qp.drawPolyline(poly)
if show_dir:
for part in partition_path(path, 200):
render_arrows(qp, part, "center", scale=scale)
finally:
qp.end()
return pixmap
| poke1024/origami | origami/batch/annotate/utils.py | utils.py | py | 11,088 | python | en | code | 69 | github-code | 36 | [
{
"api_name": "PySide6.QtGui.QColor.fromHsv",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "PySide6.QtGui.QColor",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "PySide6.QtGui",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "... |
5187515239 | from django.urls import path
from.import views
app_name ="hospital"
urlpatterns = [
path('',views.index,name="index"),
path('about',views.about,name="about"),
path('contact',views.contact,name="contact"),
path('appointment',views.appointment,name="appointment"),
path('doctorprofile',views.doctorprofile,name="doctorprofile"),
path('single',views.single,name="single"),
path('gallery',views.gallery,name="gallery"),
path('staffprofile',views.staffprofile,name="staffprofile"),
path('user_register', views.user_register, name="user_register"),
path('user_login', views.user_login, name="user_login"),
path('user_logout', views.user_logout, name="user_logout"),
path('user_welcome', views.user_welcome, name="user_welcome"),
path('profile', views.profile, name="profile"),
path('doctor1', views.doctor1, name="doctor1"),
path('doctor2', views.doctor2, name="doctor2"),
path('doctor3', views.doctor3, name="doctor3"),
path('doctor4', views.doctor4, name="doctor4"),
path('doctor5', views.doctor5, name="doctor5"),
path('doctor6', views.doctor6, name="doctor6"),
path('doctor7', views.doctor7, name="doctor7"),
path('doctor8', views.doctor8, name="doctor8"),
path('doctor9', views.doctor9, name="doctor9"),
path('doctor10', views.doctor10, name="doctor10"),
path('doctor11', views.doctor11, name="doctor11"),
path('doctor12', views.doctor12, name="doctor12"),
path('doctor13', views.doctor13, name="doctor13"),
path('doctor14', views.doctor14, name="doctor14"),
path('patientform', views.patientform, name="patientform"),
path('auth', views.auth, name="auth"),
path('authL', views.authL, name="authL"),
path('authR', views.authR, name="authR"),
path('thankyou', views.thankyou, name="thankyou"),
] | pythonhere/web | hospital/urls.py | urls.py | py | 1,823 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
... |
26763695977 | import logging
import pandas as pd
import numpy as np
from .mapping import big_map, pivot_result_to_one_map
from .group_columns import full_id_vars, lateralisation_vars
from .melt_then_pivot_query import melt_then_pivot_query
# main function is QUERY_LATERALISATION
def gifs_lat(gif_lat_file):
"""
factor function. opens the right/left gif parcellations from excel and extracts the right/left gifs as series/list.
"""
gifs_right = gif_lat_file.loc[gif_lat_file['R'].notnull(), 'R'].copy()
gifs_left = gif_lat_file.loc[gif_lat_file['L'].notnull(), 'L'].copy()
return gifs_right, gifs_left
def summarise_overall_lat_values(df_or_row,
side_of_symptoms_signs,
pts_dominant_hemisphere_R_or_L,
Right=0,
Left=0,
**kwargs,
):
"""
Factor function for Q_L. Calculated IL, CL, DomH and NonDomH lateralisations.
"""
if 'IL' not in kwargs:
IL = df_or_row['IL'].sum()
CL = df_or_row['CL'].sum()
DomH = df_or_row['DomH'].sum()
NonDomH = df_or_row['NonDomH'].sum()
# BL =df_or['BL (Non-lateralising)'].sum()
else:
IL = kwargs['IL']
CL = kwargs['CL']
DomH = kwargs['DomH']
NonDomH = kwargs['NonDomH']
# pt input
if side_of_symptoms_signs == 'R':
Right += IL
Left += CL
elif side_of_symptoms_signs == 'L':
Right += CL
Left += IL
if pts_dominant_hemisphere_R_or_L:
if pts_dominant_hemisphere_R_or_L == 'R':
Right += DomH
Left += NonDomH
elif pts_dominant_hemisphere_R_or_L == 'L':
Right += NonDomH
Left += DomH
return Right, Left
def QUERY_LATERALISATION_GLOBAL(semiology_term, inspect_result, df, one_map, gif_lat_file,
side_of_symptoms_signs=None,
pts_dominant_hemisphere_R_or_L=None,
normalise_lat_to_loc=False):
"""
After obtaining inspect_result and clinician's filter, can use this function to determine
lateralisation.
Run this after QUERY_SEMIOLOGY OR QUERY_INTERSECTION_TERMS
inspect_result may not have a lateralising column (if all were NaNs)
Whereas QUERY_LATERALISATION goes through row by row, this is global.
---
> inspect_result is obtained from QUERY_SEMIOLOGY OR QUERY_INTERSECTION_TERMS
> df as per pivot_result_to_pixel_intensities's df
> side_of_symptoms_signs: 'R' or 'L' - side of symptoms/signs on limbs
> pts_dominant_hemisphere_R_or_L: if known from e.g. fMRI language 'R' or 'L'
>> gifs_not_lat is the same as localising_only
>> lat_only_Right/Left lateralising only data
returns:
all_combined_gifs: similar in structure to output of pivot_result_to_one_map (final step),
# s rather than pixel intensity.
but in this case, the output column is pt
num_QL_lat: Lateralising Datapoints relevant to query {semiology_term}.
Should be exactly the same as num_query_lat returned by QUERY_SEMIOLOGY.
num_QL_CL: Datapoints that lateralise contralateral to the semiology query {semiology_term}
num_QL_IL: Datapoints that lateralise ipsilaterally to the semiology query {semiology_term}
num_QL_BL: Study reports the datapoint as being Bilateral. Non-informative and not utilised.
num_QL_DomH: Semiology datapoints lateralising to the Dominant Hemisphere
num_QL_NonDomH: Semiology datapoints lateralising to Non-Dominant Hemisphere
---
NB theoretically supports normalise_lat_to_loc but hasn't been tested
Alim-Marvasti Feb 2021
"""
pd.options.mode.chained_assignment = 'raise'
df = df.copy()
logging.debug(f'\n\n Global Lateralisation enabled.')
# -------------LOTS OF CHECKS-------------
# ensure there is patient's lateralised signs and check dominant known or not
if not side_of_symptoms_signs and not pts_dominant_hemisphere_R_or_L:
# print('Please note you must determine at least one of side_of_symptoms_signs or')
# print('pts_dominant_hemisphere_R_or_L keyword arguments for lateralised data extraction.')
no_lateralising_data = True
# check there is lateralising value
try:
num_QL_lat = inspect_result['Lateralising'].sum()
if num_QL_lat > 0:
no_lateralising_data = False
logging.debug(f'\n\nLateralisation based on: {num_QL_lat.sum()} datapoints')
else:
# no lateralising data
no_lateralising_data = True
# num_QL_lat = None
# return None, None, None, None, None, None, None
except KeyError:
# logging.debug(f'No Lateralising values found for this query of the database.')
no_lateralising_data = True
# num_QL_lat = None
# return None, None, None, None, None, None, None
lat_vars = [i for i in lateralisation_vars() if i not in ['Lateralising']]
# check that the lateralising columns isn't null where it shouldn't be i.e. CL/IL/DomH/NonDomH not null:
# but not 'BL (Non-lateralising)'
# first ensure other columns all feature in this inspect_result:
inspect_result2 = inspect_result.copy()
for col in lat_vars:
if col not in inspect_result2.columns:
inspect_result2[col] = np.nan
# now can check lateralising columns isn't null where it shouldn't be:
missing_lat = inspect_result2.loc[(inspect_result2['CL'].notnull()) |
(inspect_result2['IL'].notnull()) |
(inspect_result2['DomH'].notnull()) |
(inspect_result2['NonDomH'].notnull()), :].copy()
missing_lat_null_mask = missing_lat['Lateralising'].isnull()
if not missing_lat_null_mask.all():
# logging.debug('\nNo missing Lateralising data points.')
pass
else:
logging.debug(
'The inspect_result lat col has NaNs/zero where it should not: autofilled')
df_of_missing_lats = missing_lat.loc[missing_lat_null_mask].copy()
df.loc[df_of_missing_lats.index, 'Lateralising'] = df_of_missing_lats[[
'CL', 'IL', 'DomH', 'NonDomH']].sum(axis=1)
# check columns exist (not removed in preceding notnull steps from other functions):
for col in lat_vars:
if col not in inspect_result.columns:
inspect_result[col] = 0
# -------------CHECKS END-------------
# summarise lat values
IL = inspect_result['IL']
CL = inspect_result['CL']
DomH = inspect_result['DomH']
NonDomH = inspect_result['NonDomH']
BL = inspect_result['BL (Non-lateralising)']
num_QL_CL = CL.sum()
num_QL_IL = IL.sum()
num_QL_BL = BL.sum()
num_QL_DomH = DomH.sum()
num_QL_NonDomH = NonDomH.sum()
total_QL_lat = num_QL_CL+num_QL_IL+num_QL_DomH+num_QL_NonDomH
logging.debug(f'\n\nOverall Contralateral: {num_QL_CL} datapoints')
logging.debug(f'Ipsilateral: {num_QL_IL} datapoints')
logging.debug(f'Bilateral/Non-lateralising: {num_QL_BL} datapoints. This is not utilised.')
logging.debug(f'Dominant Hemisphere: {num_QL_DomH} datapoints')
logging.debug(f'Non-Dominant Hemisphere: {num_QL_NonDomH} datapoints')
logging.debug(f'lateralising col sum: {num_QL_lat}. total_QL_lat: {total_QL_lat}.')
# Global initialisation:
gifs_right, gifs_left = gifs_lat(gif_lat_file)
# map localisations to gif parcellations all in one go (not by row)
pivot_result = melt_then_pivot_query(df, inspect_result, semiology_term)
all_combined_gifs = pivot_result_to_one_map(pivot_result, one_map)
# convert to binary R vs L values
Right, Left = \
summarise_overall_lat_values(inspect_result, side_of_symptoms_signs, pts_dominant_hemisphere_R_or_L)
Total = Right+Left
if Total == 0:
no_lateralising_data = True # in case Localising col >0 but this still zero
if Right == Left:
Right_equal_Left = True
elif Right != Left:
Right_equal_Left = False
# remove NaNs to allow div by 2 and multiplication by RR_norm:
all_combined_gifs.fillna(value=0, inplace=True)
# -------------Scenario 1: No lateralising data or equal R and L---------------
# the localising values should be split equally in half between the right and left GIFs
# this is different to the default behaviour of the original QUERY_LATERALISTION, but more intuitive
if no_lateralising_data or Right_equal_Left:
all_combined_gifs['pt #s'] = all_combined_gifs['pt #s']/2
# -------------END---------------------------------------------------------
# If lateralising data, find lowest value of R or L, proprotions and RR and RR_norm:
elif not no_lateralising_data and not Right_equal_Left:
lower_postn = np.argmin([Right, Left])
if lower_postn == 0:
isin_lower = gifs_right # reduce right sided intensities/pt #s
isin_higher = gifs_left
elif lower_postn == 1:
isin_lower = gifs_left
isin_higher = gifs_right
lower_value = [Right, Left][lower_postn]
higher_value = [Right, Left]
higher_value.remove(lower_value)
RR = lower_value / Total
OR = lower_value / higher_value
# now should be able to use above to lateralise the localising gif parcellations:
# if there are 100 localisations in one row, and only 1 IL And 3 CL, it would be too much
# to say the IL side gets one third of the CL side as number of lat is too low
# hence normalise by dividing by proportion_lateralising (which is between (0,1])
# set the scale of influence of lateralisation on the gif parcellations
# in case there are missing laterlisations vs CL/IL/Dom/NonDom numbers, use total_QL_lat:
proportion_lateralising = total_QL_lat / inspect_result['Localising'].sum()
if normalise_lat_to_loc == True:
# see comments on section above about why we should normalise
RR_norm = RR * proportion_lateralising
if RR_norm > 1:
RR_norm = 1
logging.debug('normalised RR capped at 1: lateralising > localising data')
elif normalise_lat_to_loc == False:
# default counter argument: clinically we treat lat and loc entirely separately
RR_norm = RR
# -------------Scenario 2: Unequal lateralising data: RR_norm and 1-RR_norm---------------
df_lower_lat_to_be_reduced = all_combined_gifs.loc[all_combined_gifs['Gif Parcellations'].isin(list(isin_lower))].copy()
# now make these values lower by a proportion = RR_norm (in this case RR_norm = RR as denom is 1)
reduce_these = df_lower_lat_to_be_reduced.loc[:, 'pt #s'].copy()
df_lower_lat_to_be_reduced.loc[:, 'pt #s'] = RR_norm * reduce_these
# re attribute these corrected reduced lateralised values to the entire row's data:
all_combined_gifs.loc[df_lower_lat_to_be_reduced.index, :] = df_lower_lat_to_be_reduced
# now repeat the above steps for the higher values i.e. contralateral side
df_higher_lat_to_be_reduced = all_combined_gifs.loc[all_combined_gifs['Gif Parcellations'].isin(list(isin_higher))].copy()
reduce_these = df_higher_lat_to_be_reduced.loc[:, 'pt #s'].copy()
df_higher_lat_to_be_reduced.loc[:, 'pt #s'] = (1-RR_norm) * reduce_these
all_combined_gifs.loc[df_higher_lat_to_be_reduced.index, :] = df_higher_lat_to_be_reduced
# -------------END----------------------------------------------
# pivot_table the values
fixed = all_combined_gifs.pivot_table(
columns='Gif Parcellations', values='pt #s', aggfunc='sum')
fixed2 = fixed.melt(value_name='pt #s')
fixed2.insert(0, 'Semiology Term', np.nan)
all_combined_gifs = fixed2
all_combined_gifs
return (all_combined_gifs,
num_QL_lat, num_QL_CL, num_QL_IL, num_QL_BL, num_QL_DomH, num_QL_NonDomH)
def QUERY_LAT_GLOBAL_BAYESIANPOSTERIOR(all_combined_gifs,
num_QL_lat, num_QL_CL, num_QL_IL, num_QL_BL, num_QL_DomH, num_QL_NonDomH,
gif_lat_file,
side_of_symptoms_signs=None,
pts_dominant_hemisphere_R_or_L=None,
normalise_lat_to_loc=False):
"""
After obtaining the symmetric posterior-TS only estimate, this function applies global lateralisation.
This has to be done separately as the posterior-from-TS uses cached data, then we can't run QUERY_LATERALISATION_GLOBAL separately
because it won't be able to use the bayes rule from cached results.
i.e., the source of all_combined_gifs argument and the num_QL_lat etc are separate.
This is just an adapted version of QUERY_LATERALISATION_GLOBAL.
> all_combined_gifs as pd.DataFrame THIS IS ALTERED IN PLACE AND CHANGES PROPAGATE
returns as QUERY_LATERALISATION_GLOBAL but probailities not pt #s despite the misnomer in the col names below
---
NB should factorise both functions in future.
Alim-Marvasti March 2021
"""
logging.debug(f'\n\n Bayesian Global Lateralisation enabled.')
# beacuse all_combined_gifs argument comes from Psoteroir_only_cachche.py df_to_dict_like_allcombinedgifs()
# whereas the previous versions of Q_L calculated it using pivot_result_to_one_map()
# so need to make cols the same
if 'pt #s' not in all_combined_gifs.columns:
all_combined_gifs.rename(columns={0 : 'pt #s'}, inplace=True)
# -------------A FEW CHECKS-------------
# ensure there is patient's lateralised signs and check dominant known or not
if not side_of_symptoms_signs and not pts_dominant_hemisphere_R_or_L:
no_lateralising_data = True
# check there is lateralising value
if num_QL_lat > 0:
no_lateralising_data = False
logging.debug(f'\n\n(Bayesian) Global Lateralising data: {num_QL_lat.sum()} datapoints')
else:
# no lateralising data
no_lateralising_data = True
# summarise lat values
total_QL_lat = num_QL_CL + num_QL_IL + num_QL_DomH + num_QL_NonDomH
logging.debug(f'\n\nBayesian values carried over: \nOverall Contralateral= {num_QL_CL} datapoints')
logging.debug(f'Ipsilateral: {num_QL_IL} datapoints')
logging.debug(f'Bilateral/Non-lateralising: {num_QL_BL} datapoints. This is not utilised.')
logging.debug(f'Dominant Hemisphere: {num_QL_DomH} datapoints')
logging.debug(f'Non-Dominant Hemisphere: {num_QL_NonDomH} datapoints')
logging.debug(f'lateralising col sum: {num_QL_lat}. total_QL_lat: {total_QL_lat}.')
# Global initialisation:
gifs_right, gifs_left = gifs_lat(gif_lat_file)
# convert to binary R vs L values
Right, Left = \
summarise_overall_lat_values(all_combined_gifs,
side_of_symptoms_signs,
pts_dominant_hemisphere_R_or_L,
IL=num_QL_IL,
CL=num_QL_CL,
DomH=num_QL_DomH,
NonDomH=num_QL_NonDomH,
)
Total = Right+Left
if Total == 0:
no_lateralising_data = True
if Right == Left:
Right_equal_Left = True
elif Right != Left:
Right_equal_Left = False
# remove NaNs to allow div by 2 and multiplication by RR_norm:
all_combined_gifs.fillna(value=0, inplace=True)
# -------------Scenario 1: No lateralising data or equal R and L---------------
# the localising values should be split equally in half between the right and left GIFs
# this is different to the default behaviour of the original QUERY_LATERALISTION, but more intuitive
if no_lateralising_data or Right_equal_Left:
logging.debug('\n\nMEGA Q_L_G_B: no_lateralising_data or Right_equal_Left')
all_combined_gifs['pt #s'] = all_combined_gifs['pt #s']/2
# -------------END---------------------------------------------------------
# If lateralising data, find lowest value of R or L, proprotions and RR and RR_norm:
elif not no_lateralising_data and not Right_equal_Left:
lower_postn = np.argmin([Right, Left])
if lower_postn == 0:
isin_lower = gifs_right # reduce right sided intensities/pt #s
isin_higher = gifs_left
elif lower_postn == 1:
isin_lower = gifs_left
isin_higher = gifs_right
lower_value = [Right, Left][lower_postn]
higher_value = [Right, Left]
higher_value.remove(lower_value)
RR = lower_value / Total
OR = lower_value / higher_value
# now should be able to use above to lateralise the localising gif parcellations:
# if there are 100 localisations in one row, and only 1 IL And 3 CL, it would be too much
# to say the IL side gets one third of the CL side as number of lat is too low
# hence normalise by dividing by proportion_lateralising (which is between (0,1])
# # set the scale of influence of lateralisation on the gif parcellations
# # in case there are missing laterlisations vs CL/IL/Dom/NonDom numbers, use total_QL_lat:
# # this normalising lat to loc isn't yet used and if required for Bayesian posterior, need to ensure we pass it in as a value num_QL_LOC
if normalise_lat_to_loc == True:
# see comments on section above about why we should normalise
raise Exception("normalising lateralisation to localisation isn't yet supported")
proportion_lateralising = total_QL_lat / num_QL_LOC
RR_norm = RR * proportion_lateralising
if RR_norm > 1:
RR_norm = 1
logging.debug('normalised RR capped at 1: lateralising > localising data')
elif normalise_lat_to_loc == False:
# default counter argument: clinically we treat lat and loc entirely separately
RR_norm = RR
logging.debug(f'\n\nMEGA Q_LAT_GLOBAL\n\tRR_norm - \t{RR_norm}')
# -------------Scenario 2: Unequal lateralising data: RR_norm and 1-RR_norm---------------
df_lower_lat_to_be_reduced = all_combined_gifs.loc[all_combined_gifs.index.isin(list(isin_lower))].copy()
# now make these values lower by a proportion = RR_norm (in this case RR_norm = RR as denom is 1)
reduce_these = df_lower_lat_to_be_reduced.loc[:, 'pt #s'].copy()
df_lower_lat_to_be_reduced.loc[:, 'pt #s'] = RR_norm * reduce_these
# re attribute these corrected reduced lateralised values to the entire row's data:
all_combined_gifs.loc[df_lower_lat_to_be_reduced.index, :] = df_lower_lat_to_be_reduced
# now repeat the above steps for the higher values i.e. contralateral side
df_higher_lat_to_be_reduced = all_combined_gifs.loc[all_combined_gifs.index.isin(list(isin_higher))].copy()
reduce_these = df_higher_lat_to_be_reduced.loc[:, 'pt #s'].copy()
df_higher_lat_to_be_reduced.loc[:, 'pt #s'] = (1-RR_norm) * reduce_these
all_combined_gifs.loc[df_higher_lat_to_be_reduced.index, :] = df_higher_lat_to_be_reduced
# -------------END----------------------------------------------
# # pivot_table the values
# fixed = all_combined_gifs.pivot_table(
# columns='Gif Parcellations', values='pt #s', aggfunc='sum')
# fixed2 = fixed.melt(value_name='pt #s')
# fixed2.insert(0, 'Semiology Term', np.nan)
# all_combined_gifs = fixed2
logging.debug(f'\n\n!!Bayesian Global lat returns: all_combined_gifs = {all_combined_gifs}')
return all_combined_gifs | thenineteen/Semiology-Visualisation-Tool | mega_analysis/crosstab/mega_analysis/QUERY_LATERALISATION_GLOBAL.py | QUERY_LATERALISATION_GLOBAL.py | py | 19,998 | python | en | code | 9 | github-code | 36 | [
{
"api_name": "pandas.options",
"line_number": 102,
"usage_type": "attribute"
},
{
"api_name": "logging.debug",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "group_columns.late... |
12991191723 | import os
from telegram.ext import Updater
from telegram.error import BadRequest, Unauthorized
TOKEN = os.environ.get('TELEGRAM_TOKEN')
updater = Updater(token=TOKEN)
bot = updater.dispatcher.bot
def send_message(chat_id: int, text: str):
try:
bot.send_message(chat_id=chat_id, text=text)
return True
except BadRequest:
return False
except Unauthorized:
print(f'Unauthorized: {chat_id}')
return False
| OpenSUTD/evs-notifications | apis/telemsg/src/bot.py | bot.py | py | 456 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "os.environ.get",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "telegram.ext.Updater",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "telegram.error.BadRe... |
20298727355 | from flask import Flask, render_template, request, redirect, url_for, flash
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///weight.db'
db = SQLAlchemy(app)
class WeightEntry(db.Model):
id = db.Column(db.Integer, primary_key=True)
weight = db.Column(db.Float, nullable=False)
@app.route('/')
def index():
weight_entries = WeightEntry.query.all()
return render_template('index.html', weight_entries=weight_entries)
@app.route('/add_weight', methods=['POST'])
def add_weight():
try:
weight = float(request.form.get('weight'))
if weight <= 0:
flash('Вага повинна бути більше 0', 'danger')
else:
new_entry = WeightEntry(weight=weight)
db.session.add(new_entry)
db.session.commit()
flash('Вага успішно додана', 'success')
except ValueError:
flash('Некоректні дані введені', 'danger')
return redirect(url_for('index'))
if __name__ == '__main__':
db.create_all()
app.secret_key = 'your_secret_key'
app.run(debug=True)
| kozhydlo/Weight-tracking | main.py | main.py | py | 1,176 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "flask_sqlalchemy.SQLAlchemy",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "flask.req... |
19415250824 | import azureml.core
from azureml.core import Workspace, Experiment
from azureml.core.authentication import ServicePrincipalAuthentication
from azureml.core.compute import ComputeTarget, DatabricksCompute
from azureml.exceptions import ComputeTargetException
from azureml.pipeline.steps import PythonScriptStep
from azureml.pipeline.core import Pipeline, PipelineData
from azureml.core.dataset import Dataset
from azureml.pipeline.core.graph import PipelineParameter
from azureml.core.runconfig import RunConfiguration, CondaDependencies
import os
from azureml.core.authentication import AzureCliAuthentication
cli_auth = AzureCliAuthentication()
##from azureml.core.authentication import MsiAuthentication
##msi_auth = MsiAuthentication()
##------------- Get Workspace
subscriptionId = "<your subscription id>" # make this a parameter
resourceGroup = "<your resource group>" # make this a parameter
workspaceName = "<your ml workspace name>" # make this a parameter
ws = Workspace(subscriptionId, resourceGroup, workspaceName, auth=cli_auth)
print("Workspace => ",ws.name)
##------------- Pipeline Configuration
sourceDirectory = "./code"
remoteComputeTargetName = "default" # make this a parameter
computeTarget = ws.compute_targets[remoteComputeTargetName]
run_config = RunConfiguration(conda_dependencies=CondaDependencies.create(
conda_packages=['numpy', 'pandas',
'scikit-learn', 'tensorflow', 'keras'],
pip_packages=['azure', 'azureml-core',
'azure-storage',
'azure-storage-blob',
'azureml-dataprep']))
run_config.environment.docker.enabled = True
##------------- Pipeline Parameters
datasetName = PipelineParameter(name="datasetName", default_value="qualitydataset")
datasetStorePath = PipelineParameter(name="datasetStorePath", default_value="/inputdata/train.csv")
modelName = PipelineParameter(name="modelName", default_value="quality_gbm_model.pkl")
##------------- Data preprocessing step
preprocessingStep = PythonScriptStep(name="preprocess_step",
script_name="preprocess.py",
source_directory=sourceDirectory,
compute_target=computeTarget,
arguments=[
"--datasetName", datasetName,
"--datasetStorePath", datasetStorePath,
],
runconfig=run_config,
allow_reuse=False)
print("Data preprocessing Step created")
##------------- Model Training step
trainingStep = PythonScriptStep(name="training_step",
script_name="train.py",
source_directory=sourceDirectory,
compute_target=computeTarget,
arguments=[
"--datasetName", datasetName,
"--modelName", modelName
],
runconfig=run_config,
allow_reuse=False)
print("Model Training Step created")
##------------- Scoring and Output step
scoringStep = PythonScriptStep(name="scoring_step",
script_name="score.py",
source_directory=sourceDirectory,
compute_target=computeTarget,
arguments=[
"--modelName", modelName
],
runconfig=run_config,
allow_reuse=False)
print("Scoring and output Step created")
##------------- Create Pipeline
trainingStep.run_after(preprocessingStep)
scoringStep.run_after(trainingStep)
qualityMLPipeline = Pipeline(workspace=ws, steps=[scoringStep])
print ("Quality Prediction pipeline is built")
qualityMLPipeline.validate()
print("Quality Prediction pipeline simple validation complete")
##------------- Submit an Experiement using the Pipeline
pipelineRun = Experiment(ws, 'quality_prediction_gb').submit(qualityMLPipeline)
print("Quality Prediction pipeline submitted for execution")
##------------- Publish Pipeline
#publishedPipeline = qualityMLPipeline.publish(name="NewQualityPrediction-Pipeline", description="Quality Prediction pipeline",version="0.1")
publishedPipeline = pipelineRun.publish_pipeline(name="NewQualityPrediction-Pipeline", description="Quality Prediction pipeline",version="0.1")
print("Newly published pipeline id => ",publishedPipeline.id)
print("Newly published pipeline endpoint => ", publishedPipeline.endpoint)
| jomit/SecureAzureMLWorkshop | aml_pipeline/build_pipeline.py | build_pipeline.py | py | 4,596 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "azureml.core.authentication.AzureCliAuthentication",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "azureml.core.Workspace",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "azureml.core.runconfig.RunConfiguration",
"line_number": 34,
"u... |
72502764263 | import itertools
import math
import pickle
from collections import defaultdict,Counter
import collections
import copy
import spacy
from spacy.lang.en.stop_words import STOP_WORDS
Q = 'Los The Angeles Boston Times Globe Washington Post'
DoE = {'Los Angeles Times':0, 'The Boston Globe':1,'The Washington Post':2, 'Star Tribune':3}
tokens = Q.split()
#token list
counter = collections.Counter(tokens)
#token_count
token_dictionary=dict(counter)
subset_of_doe = []
DoE_list = list(DoE.keys())
for L in range(1, len(tokens)+1):
for subset in itertools.combinations(tokens, L):
string = ' '.join(list(subset))
#print(string)
if string in DoE_list:
if string not in subset_of_doe:
subset_of_doe.append(string)
combination_of_subset = []
for L in range(0, len(subset_of_doe)+1):
for subset in itertools.combinations(subset_of_doe, L):
combination_of_subset.append(list(subset))
entities = []
for i in combination_of_subset:
n = []
if i != []:
for j in i:
n += j.split()
counter = collections.Counter(n)
dictionary=dict(counter)
if(all((k in token_dictionary and token_dictionary[k]>=v) for k,v in dictionary.items())):
entities.append(i)
else:
entities.append(i)
splits = {}
count = 0
for e in entities:
splits[count] = {}
token_list = copy.deepcopy(tokens)
n = []
if e != []:
for j in e:
n += j.split()
for item in n:
token_list.remove(item)
splits[count] = {'tokens':token_list}
else:
splits[count] = {'tokens':token_list}
splits[count]['entities'] = e
count += 1
return splits | Xin1896/COMP6714 | proj1-partA/test3.py | test3.py | py | 1,707 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "collections.Counter",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "itertools.combinations",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "itertools.combinations",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "col... |
18362653657 | """
Class containing the display of the Chip8 emulator.
"""
import pygame
class Chip8Display:
def __init__(self, width, height, scale=10):
###########################
# CONSTANTS
###########################
self.BG_COLOR = (0, 0, 0)
self.MAIN_COLOR = (255, 255, 255)
###########################
# VARIABLES
###########################
self.width = width
self.height = height
self.scale = scale
self.display = None
def create_display(self):
"""
Creates the display of the Chip8 emulator.
"""
if self.display is None:
self.display = pygame.display.set_mode(
(self.width * self.scale, self.height * self.scale)
)
self.clear_display()
self.update_display()
def clear_display(self):
"""
Clears the display of the Chip8 emulator.
"""
self.display.fill(self.BG_COLOR)
self.update_display()
def update_display(self):
"""
Updates the display of the Chip8 emulator.
"""
pygame.display.update()
def draw_byte(self, x_pos, y_pos, byte):
"""
Opcode Dxyn - DRW Vx, Vy, nibble
Draws a sprite at coordinate (Vx, Vy) with width 8 pixels and height n pixels.
:param x_pos: x-coordinate of the sprite to draw.
:param y_pos: y-coordinate of the sprite to draw.
:param byte: The byte to draw.
:return: True if pixel was overwritten, False otherwise.
"""
overwritten = False
for i in range(8):
if self.is_bit_set(byte, i):
# Wrap pixel around the screen if it goes out of bounds
new_x_pos = (x_pos + i) % self.width
# Adjust y_pos if out of range values are passed
new_y_pos = (y_pos) % self.height
draw_ret_val = self.draw_pixel(new_x_pos, new_y_pos)
if draw_ret_val:
overwritten = True
return overwritten
def is_bit_set(self, byte, index):
"""
Returns True if the bit at index is set in byte.
Where index is 0-7, and index 0 is the most significant bit.
:param byte: The byte to check.
:param index: The index of the bit to check.
:return: True if the bit is set, False otherwise.
"""
if index < 0 or index > 7:
raise ValueError("Index must be between 0 and 7")
return (byte & (1 << (7 - index))) != 0
def draw_pixel(self, x_pos, y_pos):
"""
Draws a pixel at coordinate (x_pos, y_pos).
Xors with the pixel on the screen so that pixels already set are unset
if bit to be drawn is already set.
:param x_pos: x-coordinate of the pixel to draw.
:param y_pos: y-coordinate of the pixel to draw.
:return: True if pixel was overwritten, False otherwise.
"""
overwrite = self.is_drawn_pixel_present(x_pos, y_pos)
if not overwrite:
# Draw pixel
pygame.draw.rect(
self.display,
self.MAIN_COLOR,
(x_pos * self.scale, y_pos * self.scale, self.scale, self.scale),
)
self.update_display()
else:
# Clear pixel
pygame.draw.rect(
self.display,
self.BG_COLOR,
(x_pos * self.scale, y_pos * self.scale, self.scale, self.scale),
)
return overwrite
def is_drawn_pixel_present(self, x_pos, y_pos):
"""
Returns True if the pixel at (x_pos, y_pos) is already drawn.
:param x_pos: x-coordinate of the pixel to check.
:param y_pos: y-coordinate of the pixel to check.
"""
return self.display.get_at((x_pos * self.scale, y_pos * self.scale)) == self.MAIN_COLOR
| trd-db/PyChip8 | chip8/Chip8Display.py | Chip8Display.py | py | 4,194 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pygame.display.set_mode",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.update",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "pyg... |
5536685399 | import json
import os
import pathlib
import sys
from PyQt5 import QtCore, QtGui
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtGui import QFontDatabase
from PyQt5.QtWidgets import *
import Globals
def Log(*args):
# 0 Non Issue
# 1 Minor Issue
# 2 Non Essential issue
# 3 Essential Issue
# 4 Possible Crashing Source
# 5 Likely Crasing Source
Level = args[0]
Type = "Print"
Tolerance = 0
Separator = ''' || '''
Message = f'''{Level}: '''
for i in range(len(args)-1):
Message += str(args[i+1]) + Separator
else:
Message = Message[:-len(Separator)]
if Type == "Print" and Level >= Tolerance:
print(Message)
class MainWindow(QMainWindow):
resized = QtCore.pyqtSignal()
def __init__(self, parent=None):
super(MainWindow, self).__init__(parent)
QFontDatabase.addApplicationFont("Resources/OtherResources/Segoe UI.ttf")
DisclaimerAgreed = 0
Globals.Layouts["MainF"] = self
self.setWindowTitle('Another Chance')
self.setWindowIcon(QtGui.QIcon('logo.jpg'))
self.resize(1600, 1024)
self.setMaximumWidth(1600)
self.setMinimumWidth(1600)
self.setMaximumHeight(1024)
self.setMinimumHeight(700)
self.LayoutsBox = QHBoxLayout()
self.LayoutsWidget = QWidget(self)
self.LayoutsWidget.setLayout(self.LayoutsBox)
self.LayoutsWidget.setGeometry(0,0,1650,1024)
self.LayoutsBox.setContentsMargins(0, 0, 0, 0)
self.setStyleSheet('''
.QWidget{
background-color:rgb(35,35,35);
}
.QWidget#Transparent{
background:none;
border:none;
}
.QWidget[Color = "Dark"]{
border:1px solid black;
background-color:rgb(23,23,23);
}
.QGridLayout{
border:none;
background:none;
}
.QVBoxLayout{
border:none;
background:none;
}
.QGroupBox{
border:none;
background:none;
}
.QScrollArea{
border:none;
background-color:rgba(23,23,23,0)
}
.QScrollArea[Color = "Dark"]{
background-color:rgb(23,23,23);
border: 1px solid black;
}
.QScrollArea[Color = "Light"]{
background-color:rgb(35,35,35);
border: 1px solid black;
}
QPushButton{
color:white;
font-size: 14pt;
font-family: Segoe UI;
background-color:rgb(35,35,35);
}
QPushButton:hover{
color:yellow;
}
QPushButton[Color = "Dark"]{
background-color:rgb(23,23,23);
}
QPushButton[Color = "Light"]{
background-color:rgb(35,35,35);
}
QPushButton[Enabled = "0"]{
color:grey;
}
QPushButton[Enabled = "1"]{
}
QLabel{
color:white;
border:1px solid black;
font-family: Segoe UI;
font-size: 12pt;
}
QLabel:hover{
color:yellow;
}
QLabel#MainTitle{
font-size: 28pt;
}
QLabel#Title{
font-size: 16pt;
}
QLabel#SubTitle{
font-size: 14pt;
}
QLabel#SmallText{
font-size: 10pt;
}
QLabel[Selected = "1"]{
color:yellow;
}
QLabel[Selected = "-1"]{
color:grey;
}
QLabel[Color = "Dark"]{
background-color:rgb(23,23,23);
}
QLabel[Color = "Light"]{
background-color:rgb(35,35,35);
}
QLabel[Color = "None"]{
background:none;
border:none;
}
QLabel[Border = "None"]{
border:none;
}
QLabel[Border = "Selected"]{
border: 1px solid yellow;
}
QLineEdit{
font-size: 14pt;
font-family: Segoe UI;
color:white;
border:1px solid black;
background-color:rgb(35,35,35);
}
QLineEdit[Color = "Dark"]{
background-color:rgb(23,23,23);
}
QLineEdit[Color = "Light"]{
background-color:rgb(35,35,35);
}
QTextEdit{
color:white;
border:1px solid black;
background-color:rgb(35,35,35);
}
QComboBox{
background-color:rgb(23,23,23);
color:white;
}
QComboBox:hover{
color:yellow;
}
QComboBox QAbstractItemView {
border: 1px solid grey;
color: white;
selection-color: yellow;
}
QRadioButton{
color:white;
}
QRadioButton:hover{
color:yellow;
}
QToolTip{
background-color: rgb(23,23,23);
color: white;
border: 1px solid black;
}
''')
def SD(Object, Type):
if Type == "Font":
return "Segoe UI"
if Type == "FontSize":
if isinstance(Object, QPushButton):
return 14
else:
ObjectName = Object.objectName()
if ObjectName == "MainTitle": return 20
elif ObjectName == "Title": return 16
elif ObjectName == "SubTitle": return 14
elif ObjectName == "SmallText": return 10
else: return 12
self.StyleData = SD
self.Log = Log
self.LayoutsBox.LayoutsList = []
try:
with pathlib.Path.open('CurrentSession.json', 'rb') as f:
Globals.CurrentSession = json.load(f)
except:
Globals.CurrentSession = {}
# CurrentPath = os.path.dirname(os.path.realpath(__file__))
# CurrentPath = os.path.abspath( pathlib.Path() / "Resources" / "SoLResources" / "FilledHeart" )
CurrentPath = pathlib.Path()
# IMPORTS THE BASIC MOD AND FUNCTIONS
Reference = __import__("Globals")
Reference.Initialize(self, Reference)
Reference = __import__("SoLFunctions")
Reference.Initialize(self, Reference)
ModsPath = os.path.abspath( pathlib.Path() / "SoL" / "Mods" )
# ModsPath = CurrentPath + "\\SoL\\Mods"
if ModsPath not in sys.path:
sys.path.insert(0, ModsPath)
try:
Reference = __import__("BasicMod")
Globals.References["BasicMod"] = Reference
Reference.Initialize(self, Reference)
except Exception as e:
Log(5, "ERROR INITIALIZE BASIC MOD", e)
# IMPORTS THE BASIC FILES
FileList = os.listdir()
for File in FileList:
if File.endswith(".py") and File != "main.py":
try:
Reference = __import__(File[:-3])
Globals.References[File[:-3]] = Reference
Reference.Initialize(self, Reference)
except Exception as e:
Log(4, "ERROR INITIALIZE FILES", e, File)
# IMPORTS THE MODS
try:
ModsPath = os.path.abspath( pathlib.Path() / "SoL" / "Mods" )
# ModsPath = CurrentPath + "\\SoL\\Mods"
if ModsPath not in sys.path:
sys.path.insert(0, ModsPath)
# FileList = os.listdir("SoL/Mods")
FileList = os.listdir(os.path.abspath( pathlib.Path() / "SoL" / "Mods" ))
for File in FileList:
try:
if File.endswith(".py") and File != "BasicMod.py":
Reference = __import__(File[:-3])
Globals.References[File[:-3]] = Reference
Reference.Initialize(self, Reference)
except Exception as e:
Log(3, "ERROR INITIALIZE Mods", e, File)
except Exception as e:
Log(4, "ERROR INITIALIZE Mods", e)
# IMPORTS THE LOCATIONS
try:
LocationsPath = os.path.abspath( pathlib.Path() / "SoL" / "Locations" )
# LocationsPath = CurrentPath + "\\SoL\\Locations"
if LocationsPath not in sys.path:
sys.path.insert(0, LocationsPath)
# FileList = os.listdir("SoL/Locations")
FileList = os.listdir(os.path.abspath( pathlib.Path() / "SoL" / "Locations" ))
for File in FileList:
try:
if File.endswith(".py"):
Reference = __import__(File[:-3])
Globals.References[File[:-3]] = Reference
Reference.Initialize(self, Reference)
except Exception as e:
Log(2, "ERROR INITIALIZE Locations", e, File)
except Exception as e:
Log(4, "ERROR INITIALIZE Locations", e)
# IMPORTS THE COMMANDS
try:
CommandsPath = os.path.abspath( pathlib.Path() / "SoL" / "Commands" )
# CommandsPath = CurrentPath + "\\SoL\\Commands"
if CommandsPath not in sys.path:
sys.path.insert(0, CommandsPath)
# FileList = os.listdir("SoL/Commands")
FileList = os.listdir(os.path.abspath( pathlib.Path() / "SoL" / "Commands" ))
for File in FileList:
try:
if File.endswith(".py"):
Reference = __import__(File[:-3])
Globals.References[File[:-3]] = Reference
Reference.Initialize(self, Reference)
except Exception as e:
Log(2, "ERROR INITIALIZE Commands", e, File)
except Exception as e:
Log(4, "ERROR INITIALIZE Commands", e)
# IMPORTS THE ABILITIES
try:
AbilitiesPath = os.path.abspath( pathlib.Path() / "SoL" / "Abilities" )
# AbilitiesPath = CurrentPath + "\\SoL\\Abilities"
if AbilitiesPath not in sys.path:
sys.path.insert(0, AbilitiesPath)
FileList = os.listdir(os.path.abspath( pathlib.Path() / "SoL" / "Abilities" ))
for File in FileList:
try:
if File.endswith(".py"):
Reference = __import__(File[:-3])
Globals.References[File[:-3]] = Reference
Reference.Initialize(self, Reference)
except Exception as e:
Log(2, "ERROR INITIALIZE Abilities", e, File)
except Exception as e:
Log(4, "ERROR INITIALIZE Abilities", e)
# IMPORTS THE TRAITS
try:
TraitsPath = os.path.abspath( pathlib.Path() / "SoL" / "Traits" )
# TraitsPath = CurrentPath + "\\SoL\\Traits"
if TraitsPath not in sys.path:
sys.path.insert(0, TraitsPath)
# FileList = os.listdir("SoL/Traits")
FileList = os.listdir(os.path.abspath( pathlib.Path() / "SoL" / "Traits" ))
for File in FileList:
try:
if File.endswith(".py"):
Reference = __import__(File[:-3])
Globals.References[File[:-3]] = Reference
Reference.Initialize(self, Reference)
except Exception as e:
Log(2, "ERROR INITIALIZE Traits", e, File)
except Exception as e:
Log(4, "ERROR INITIALIZE Traits", e)
# IMPORTS THE PERSONALITIES
try:
PersonalitiesPath = os.path.abspath( pathlib.Path() / "SoL" / "Personalities" )
# PersonalitiesPath = CurrentPath + "\\SoL\\Personalities"
if PersonalitiesPath not in sys.path:
sys.path.insert(0, PersonalitiesPath)
# FileList = os.listdir("SoL/Personalities")
FileList = os.listdir(os.path.abspath( pathlib.Path() / "SoL" / "Personalities" ))
for File in FileList:
try:
if File.endswith(".py"):
Reference = __import__(File[:-3])
Globals.References[File[:-3]] = Reference
Reference.Initialize(self, Reference)
except Exception as e:
Log(2, "ERROR INITIALIZE Personalities", e, File)
except Exception as e:
Log(4, "ERROR INITIALIZE Personalities", e)
# IMPORTS THE CLOTHES
try:
ClothesPath = os.path.abspath( pathlib.Path() / "SoL" / "Clothes" )
# ClothesPath = CurrentPath + "\\SoL\\Clothes"
if ClothesPath not in sys.path:
sys.path.insert(0, ClothesPath)
# FileList = os.listdir("SoL/Clothes")
FileList = os.listdir(os.path.abspath( pathlib.Path() / "SoL" / "Clothes" ))
for File in FileList:
try:
if File.endswith(".py"):
Reference = __import__(File[:-3])
Globals.References[File[:-3]] = Reference
Reference.Initialize(self, Reference)
except Exception as e:
Log(2, "ERROR INITIALIZE Clothes", e, File)
except Exception as e:
Log(4, "ERROR INITIALIZE Clothes", e)
# IMPORTS THE NPC FUNCTIONS
try:
for NPCFullID in os.listdir("NPCData"):
# if os.path.isdir("NPCData/" + NPCFullID):
if pathlib.Path.is_dir( pathlib.Path() / "NPCData" / NPCFullID ):
try:
# FilesList = os.listdir(f"NPCData/{NPCFullID}")
FilesList = os.listdir(os.path.abspath( pathlib.Path() / "NPCData" / NPCFullID ))
if f"{NPCFullID}Functions.py" in FilesList:
# Path = f'''{CurrentPath}\\NPCData\\{NPCFullID}'''
Path = os.path.abspath( pathlib.Path() / "NPCData" / NPCFullID )
if Path not in sys.path:
sys.path.insert(0, Path)
File = f"{NPCFullID}Functions.py"
Reference = __import__(File[:-3])
Globals.References[File[:-3]] = Reference
Reference.Initialize(self, Reference)
except Exception as e:
Log(2, "ERROR INITIALIZE NPCFunctions", e, File)
except Exception as e:
Log(4, "ERROR INITIALIZE NPCFunctions", e)
if DisclaimerAgreed == 0:
self.gotoLayout("DisclaimerUI")
else:
self.gotoLayout("MainMenuUI")
self.show()
def gotoLayout(self, Layout):
try:
Object = Globals.Layouts[Layout]
Object.comesFrom = Globals.LayoutsData["Active"]
Globals.LayoutsData["Previous"] = Globals.LayoutsData["Active"]
Globals.LayoutsData["Active"] = Object
try:
if Object.GUI not in self.LayoutsBox.LayoutsList:
self.LayoutsBox.LayoutsList.append(Object.GUI)
self.LayoutsBox.addWidget(Object.GUI)
except:
Object.UI()
if Object.GUI not in self.LayoutsBox.LayoutsList:
self.LayoutsBox.LayoutsList.append(Object.GUI)
self.LayoutsBox.addWidget(Object.GUI)
for LayoutOther in self.LayoutsBox.LayoutsList:
LayoutOther.hide()
if LayoutOther == Object.GUI:
Object.Refresh()
try:
Globals.LayoutsData["Active"].ResizeEvent()
except:
""
LayoutOther.show()
self.show()
except Exception as e:
Log(4, "ERROR SWITCHING LAYOUT", e, Layout)
def gotoPreviousLayout(self):
Globals.LayoutsData["Active"].GUI.hide()
Globals.LayoutsData["Active"].comesFrom.GUI.show()
Globals.LayoutsData["Active"] = Globals.LayoutsData["Active"].comesFrom
Globals.LayoutsData["Active"].Refresh()
try:
Globals.LayoutsData["Active"].ResizeEvent()
except:
""
def resizeEvent(self, event):
try:
Globals.LayoutsData["Active"].ResizeEvent()
except:
""
self.resized.emit()
def someFunction(self):
try:
Globals.Layouts["BattleMenu"].ressEvent()
except:
""
# overriding key press event
def keyList(self, key):
if key == 17: # Control
key = "Control"
elif key == 16: # Shift
key = "Shift"
elif key == 13: # Enter
key = "Enter"
elif key == 18: # Alt
key = "Alt"
if key == 27: # Esc
key = "Esc"
elif key == 192: # Tilde, key to the left of 1
key = "Tilde"
elif key == 20: # Mayus
key = "Mayus"
elif key == 81: # Q
key = "Q"
elif key == 87: # W
key = "W"
elif key == 69: # E
key = "E"
elif key == 65: # A
key = "A"
elif key == 83: # S
key = "S"
elif key == 68: # D
key = "D"
elif key == 90: # Z
key = "Z"
elif key == 88: # X
key = "X"
elif key == 67: # C
key = "C"
elif key == 32: # Space
key = "Space"
elif key == 48: # 0
key = "0"
elif key == 49: # 1
key = "1"
elif key == 50: # 2
key = "2"
elif key == 51: # 3
key = "3"
elif key == 52: # 4
key = "4"
elif key == 53: # 5
key = "5"
elif key == 54: # 6
key = "6"
elif key == 55: # 7
key = "7"
elif key == 56: # 8
key = "8"
elif key == 57: # 9
key = "9"
elif key == 84: # T
key = "T"
return key
def keyPressEvent(self, e):
try:
key = e.nativeVirtualKey() # To prevent Shift+1 to turn into ! instead of 1
key = self.keyList(key)
Globals.Keys[key] = e
# Globals.Layouts[]
# Globals.Layouts["BattleMenu"].KeyHandling("Pressed", key, e)
except:
""
try:
if "Control" in Globals.Keys and "Shift" in Globals.Keys and "1" in Globals.Keys:
try:
self.gotoPreviousLayout()
except:
""
if "Control" in Globals.Keys and "Shift" in Globals.Keys and "2" in Globals.Keys:
try:
if Globals.LayoutsData["Active"] != Globals.Layouts["MainMenuUI"]:
self.gotoLayout("MainMenuUI")
except Exception as e:
""
except Exception as e:
""
def keyReleaseEvent(self, e):
try:
key = e.nativeVirtualKey() # To prevent Shift+1 to turn into ! instead of 1
key = self.keyList(key)
# Globals.Layouts["BattleMenu"].KeyHandling("Released", key, e)
Globals.Keys.pop(key)
except:
""
def closeEvent(self, event):
""
# report_session()
# pid = os.getpid()
# print(pid)
if __name__ == "__main__":
app = QApplication(sys.argv)
MainWindow()
sys.exit(app.exec_())
| AntCDev/Another-Chance | main.py | main.py | py | 21,012 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "PyQt5.QtCore.pyqtSignal",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtCore",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtGui.QFontDatabase.addApplicationFont",
"line_number": 40,
"usage_type": "call"
},
{
... |
74424262825 | import pandas as pd
import urllib.request
import bs4 as bs
import re
class Movie_Titles:
def __init__(self, movie_df, refresh_on_start, engine):
self.df = movie_df
self.refresh_on_start = refresh_on_start
self.engine = engine
def get_titles(self):
"""Obtain the titles of the movies in the movies dataframe.
Outputs a dataframe with movie URLs and Title."""
if self.refresh_on_start == True:
con = self.engine.connect()
con.execute('drop table if exists TITLES_OLD')
con.execute('alter table if exists TITLES rename to TITLES_OLD')
con.execute('create table TITLES ( Code varchar not null, Title varchar)')
df = self.df
movie_idx=df['Code'].unique()
df_title = pd.DataFrame()
pattern = '"name":(.*),"description"'
for i in movie_idx:
source = urllib.request.urlopen(i)
soup = bs.BeautifulSoup(source, "html.parser")
for gen in soup.find_all('script', {'type': "application/ld+json"}):
i_json = str(gen)
title = re.search(pattern, i_json).group(1)
temp_dict = [
{
'Code': i,
'Title': title,
}
]
temp = pd.DataFrame.from_records(temp_dict)
temp.to_sql('TITLES', self.engine, if_exists='append', index = False)
df_title = pd.concat([df_title, temp])
print("TITLES have been updated")
return df_title
| chiaracapuano/WhatToWatch | docker_files/update-database/Movies_Titles.py | Movies_Titles.py | py | 1,707 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.DataFrame",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "urllib.request.request.urlopen",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "urllib.request.request",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_n... |
31471841488 | from typing import Optional
import pytest
from robotoff.insights.ocr.label import LABELS_REGEX
XX_BIO_XX_OCR_REGEX = LABELS_REGEX["xx-bio-xx"][0]
ES_BIO_OCR_REGEX = LABELS_REGEX["xx-bio-xx"][1]
@pytest.mark.parametrize(
"input_str,is_match,output",
[
("ES-ECO-001-AN", True, "en:es-eco-001-an"),
("ES-ECO-001", False, None),
("ES-ECO-001-", False, None),
("FR-BIO-01", False, None),
],
)
def test_es_ocr_regex(input_str: str, is_match: bool, output: Optional[str]):
regex = ES_BIO_OCR_REGEX.regex
match = regex.match(input_str)
assert (match is not None) == is_match
if is_match:
assert ES_BIO_OCR_REGEX.processing_func(match) == output # type: ignore
| alexouille123/robotoff | tests/insights/ocr/test_label.py | test_label.py | py | 727 | python | en | code | null | github-code | 36 | [
{
"api_name": "robotoff.insights.ocr.label.LABELS_REGEX",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "robotoff.insights.ocr.label.LABELS_REGEX",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 20,
"usage_type": "name"... |
73313411305 | # -*- coding: utf-8 -*-
# @Time : 2022 09
# @Author : yicao
import csv
import os
import time
import numpy as np
import torch
import math
from utils import log_util
class RecordTest:
def __init__(self, log_file, test_file):
self.log_file = log_file
self.test_file = test_file + ' test.csv'
self.test_acc_sum = 0.0
self.test_cnt = 0
with open(self.test_file, 'w') as f:
csv_write = csv.writer(f)
csv_write.writerow(['epoch', 'Test Acc'])
print(f"创建记录文件:{self.test_file}")
def record_test(self, outputs, labels):
_, predicted = torch.max(outputs.data, 1)
correct = (predicted == labels).sum().item()
acc = 100. * correct / labels.size(0)
self.test_acc_sum += acc
self.test_cnt += 1
def print_test(self, epoch=0):
test_acc = round((self.test_acc_sum / self.test_cnt), 4)
self.log("---------------------Test---------------------")
self.log(f"Test Acc: {test_acc}%")
self.log("---------------------Test---------------------")
with open(self.test_file, 'a+') as f:
csv_write = csv.writer(f)
csv_write.writerow([epoch, test_acc])
self.test_acc_sum, self.test_cnt = 0, 0
return test_acc
def log(self, line):
log_util.log(self.log_file, line)
class RecordTrain:
def __init__(self, log_file, train_file, print_freq):
self.acc_sum = 0
self.loss_sum = 0
self.cnt = 0
self.log_file = log_file
self.train_file = train_file + ' train.csv'
self.print_freq = print_freq
self.last_time = time.time()
def record_init(self):
self.acc_sum = 0
self.loss_sum = 0
self.last_time = time.time()
def record_train(self, loss, outputs, labels, msg="", epoch=0):
_, predicted = torch.max(outputs.data, 1)
correct = (predicted == labels).sum().item()
acc = 100. * correct / labels.size(0)
self.acc_sum += acc
self.loss_sum += loss
if self.cnt % self.print_freq == (self.print_freq - 1):
self.log(
f"{msg}epoch: {epoch}, iter: {self.cnt}, acc: {round(self.acc_sum / self.print_freq, 4)}%,"
f" loss: {round(self.loss_sum / self.print_freq, 4)}, time: {round(time.time() - self.last_time, 3)}s")
with open(self.train_file, 'a+') as f:
csv_write = csv.writer(f)
csv_write.writerow([epoch, self.cnt, round(self.acc_sum / self.print_freq, 4),
round(self.loss_sum / self.print_freq, 4)])
self.record_init()
self.cnt += 1
def log(self, line):
log_util.log(self.log_file, line)
class RecordAccUtil:
def __init__(self, batch_size, train_number=1, test_number=1, print_batch=100, log_file="", csv_name=None):
self.log_file = log_file
self.csv_name = csv_name
self.loss_list = [] # 存储每个batch的loss
self.acc_list = [] # 存储每个batch的acc
self.epoch = 0
self.total_iter = 0 # 当前迭代次数
self.batch_size = batch_size
self.train_number = train_number # 训练集大小
self.test_number = test_number # 测试集大小
self.epoch_batch_size = math.ceil(self.train_number / self.batch_size) # 一个epoch的batch_size数
self.print_batch = print_batch # 打印周期
self.print_train_acc = 0
self.print_loss = 0
self.print_test_acc = 0
self.last_time = time.time()
self.train_csv_name = csv_name + ' train.csv'
self.test_csv_name = csv_name + ' test.csv'
with open(self.train_csv_name, 'w') as f:
csv_write = csv.writer(f)
csv_write.writerow(['epoch', 'iter', 'acc', 'loss'])
with open(self.test_csv_name, 'w') as f:
csv_write = csv.writer(f)
csv_write.writerow(['epoch', 'Test Acc'])
print(f"创建记录文件:{self.train_csv_name}")
print(f"创建记录文件:{self.test_csv_name}")
def record_train(self, loss, outputs, labels, msg="", epoch=0):
_, predicted = torch.max(outputs.data, 1)
correct = (predicted == labels).sum().item()
acc = 100. * correct / labels.size(0)
self.acc_list.append(acc)
self.loss_list.append(loss)
self.print_train_acc += acc
self.print_loss += loss
if self.total_iter % self.print_batch == (self.print_batch - 1):
self.log(
f"{msg}epoch: {self.epoch}, iter: {self.total_iter}, acc: {round(self.print_train_acc / self.print_batch, 4)}%,"
f" loss: {round(self.print_loss / self.print_batch, 4)}, time: {round(time.time() - self.last_time, 3)}s")
with open(self.train_csv_name, 'a+') as f:
csv_write = csv.writer(f)
csv_write.writerow([epoch, self.total_iter, round(self.print_train_acc / self.print_batch, 4),
round(self.print_loss / self.print_batch, 4)])
self.print_train_acc, self.print_loss = 0, 0
self.last_time = time.time()
self.total_iter += 1
self.epoch = int(self.total_iter / self.epoch_batch_size)
def record_train_nll(self, loss, outputs, labels, msg="", epoch=0):
label_pred = outputs.max(dim=1)[1]
correct = len(outputs) - torch.sum(torch.abs(label_pred - labels)) # 正确的个数
acc = (100. * correct / labels.size(0)).item()
self.acc_list.append(acc)
self.loss_list.append(loss)
self.print_train_acc += acc
self.print_loss += loss
if self.total_iter % self.print_batch == (self.print_batch - 1):
self.log(
f"{msg}epoch: {self.epoch}, iter: {self.total_iter}, acc: {round(self.print_train_acc / self.print_batch, 4)}%,"
f" loss: {round(self.print_loss / self.print_batch, 4)}, time: {round(time.time() - self.last_time, 3)}s")
with open(self.train_csv_name, 'a+') as f:
csv_write = csv.writer(f)
csv_write.writerow([epoch, self.total_iter, round(self.print_train_acc / self.print_batch, 4),
round(self.print_loss / self.print_batch, 4)])
self.print_train_acc, self.print_loss = 0, 0
self.last_time = time.time()
self.total_iter += 1
self.epoch = int(self.total_iter / self.epoch_batch_size)
def record_test(self, outputs, labels):
_, predicted = torch.max(outputs.data, 1)
correct = (predicted == labels).sum().item()
acc = 100. * correct / labels.size(0)
self.print_test_acc += acc
def record_test_nnl(self, outputs, labels):
label_pred = outputs.max(dim=1)[1]
acc = len(outputs) - torch.sum(torch.abs(label_pred - labels)) # 正确的个数
self.print_test_acc += acc.detach().cpu().numpy()
def print_test_accuracy(self, epoch):
test_acc = round((self.print_test_acc / self.test_number) * 100, 4)
self.log("---------------------Test---------------------")
self.log(f"Test Acc: {test_acc}%")
self.log("---------------------Test---------------------")
with open(self.test_csv_name, 'a+') as f:
csv_write = csv.writer(f)
csv_write.writerow([epoch, test_acc])
self.print_test_acc = 0
return test_acc
def log(self, line):
log_util.log(self.log_file, line)
class RecordCompressUtil:
def __init__(self, model_length):
self.total_compress = 0 # 总压缩率
self.total_idx = 0
self.epoch_compress = 0 # 单个epoch的压缩率
self.epoch_idx = 0
self.model_length = model_length
def record_compress(self, bitmaps: np.ndarray):
self.epoch_compress += np.count_nonzero(bitmaps)
self.epoch_idx += 1
def epoch_end(self):
log_util.log(self.epoch_compress)
log_util.log(self.epoch_idx)
compress = self.epoch_compress / self.epoch_idx
log_util.log(compress)
log_util.log(f"epoch训练结束, 平均稀疏率为:{round(compress * 100 / self.model_length, 2)}%")
self.epoch_compress = 0
self.epoch_idx = 0
self.total_compress += compress
self.total_idx += 1
def train_end(self):
compress = self.total_compress / self.total_idx
log_util.log(f"全部训练结束, 平均稀疏率为:{round(compress * 100 / self.model_length, 2)}%")
class RecordCompressLayerUtil:
def __init__(self, optim: torch.optim.Optimizer, mod_len, print_size, create_csv=False, csv_name=None):
self.mod_len = mod_len
self.layer_size = []
self.m = 0
self.print_size = print_size
self.print_t = 0
self.create_csv = create_csv
self.csv_name = csv_name
# 计算模型层数m和各层的参数量q
for param in optim.param_groups[0]['params']:
params = param.data.view(-1)
self.m += 1
self.layer_size.append(len(params))
self.rs = np.zeros(self.m)
self.layer_size = np.array(self.layer_size)
if self.create_csv & (self.csv_name is not None):
with open(os.path.join('results', self.csv_name), 'w') as f:
csv_write = csv.writer(f)
csv_write.writerow(range(self.m))
def record_compress(self, bitmap: np.ndarray):
# 根据bitmap记录本轮各层传递的参数量
start = 0
for idx, x in enumerate(self.layer_size):
self.rs[idx] += np.count_nonzero(bitmap[start:start + x])
start += x
self.print_t += 1
if self.print_size == self.print_t:
self.print_t = 0
self.rs = self.rs / self.print_size / self.layer_size
print_list = list(map(lambda k: round(k, 3) if round(k, 3) != 0 else k, self.rs))
self.rs = np.zeros(self.m)
# log_util.log(f"各层稀疏率为: {print_list}")
if self.create_csv & (self.csv_name is not None):
with open(os.path.join('results', self.csv_name), 'a+') as f:
csv_write = csv.writer(f)
csv_write.writerow(print_list)
| zhengLabs/FedLSC | utils/record_util.py | record_util.py | py | 10,384 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "csv.writer",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "torch.max",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "csv.writer",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "utils.log_util.log",
"line_number... |
16636179375 | from torch.nn import BCEWithLogitsLoss
from pytorch_toolbelt import losses as L
import numpy as np
import torch
import matplotlib.pyplot as plt
def main():
losses = {
"bce": BCEWithLogitsLoss(),
# "focal": L.BinaryFocalLoss(),
# "jaccard": L.BinaryJaccardLoss(),
# "jaccard_log": L.BinaryJaccardLogLoss(),
# "dice": L.BinaryDiceLoss(),
# "dice_log": L.BinaryDiceLogLoss(),
# "sdice": L.BinarySymmetricDiceLoss(),
# "sdice_log": L.BinarySymmetricDiceLoss(log_loss=True),
"bce+lovasz": L.JointLoss(BCEWithLogitsLoss(), L.BinaryLovaszLoss()),
# "lovasz": L.BinaryLovaszLoss(),
# "bce+jaccard": L.JointLoss(BCEWithLogitsLoss(),
# L.BinaryJaccardLoss(), 1, 0.5),
# "bce+log_jaccard": L.JointLoss(BCEWithLogitsLoss(),
# L.BinaryJaccardLogLoss(), 1, 0.5),
# "bce+log_dice": L.JointLoss(BCEWithLogitsLoss(),
# L.BinaryDiceLogLoss(), 1, 0.5)
# "reduced_focal": L.BinaryFocalLoss(reduced=True)
}
dx = 0.01
x_vec = torch.arange(-5, 5, dx).view(-1, 1).expand((-1, 100))
f, ax = plt.subplots(3, figsize=(16, 16))
for name, loss in losses.items():
x_arr = []
y_arr = []
target = torch.tensor(1.0).view(1).expand((100))
for x in x_vec:
y = loss(x, target).item()
x_arr.append(float(x[0]))
y_arr.append(float(y))
ax[0].plot(x_arr, y_arr, label=name)
ax[1].plot(x_arr, np.gradient(y_arr, dx))
ax[2].plot(x_arr, np.gradient(np.gradient(y_arr, dx), dx))
f.legend()
f.show()
if __name__ == "__main__":
main()
| BloodAxe/pytorch-toolbelt | demo/demo_losses.py | demo_losses.py | py | 1,750 | python | en | code | 1,447 | github-code | 36 | [
{
"api_name": "torch.nn.BCEWithLogitsLoss",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pytorch_toolbelt.losses.JointLoss",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "pytorch_toolbelt.losses",
"line_number": 19,
"usage_type": "name"
},
{
... |
34678033656 | import logging
import re
from streamlink.plugin import Plugin, pluginmatcher
from streamlink.stream import HLSStream
log = logging.getLogger(__name__)
@pluginmatcher(
re.compile(r"https?://(?:www\.)?auftanken\.tv/livestream/?")
)
class AuftankenTV(Plugin):
_hls_url_re = re.compile(r"(https://.+?/http_adaptive_streaming/\w+\.m3u8)")
PLAYER_URL = "https://webplayer.sbctv.ch/auftanken/"
def get_title(self):
return "auftanken.TV Livestream"
def _get_streams(self):
res = self.session.http.get(self.PLAYER_URL)
m = self._hls_url_re.search(res.text)
if m:
for s in HLSStream.parse_variant_playlist(self.session, m.group(1)).items():
yield s
__plugin__ = AuftankenTV
| oe-mirrors/streamlink-plugins | auftanken.py | auftanken.py | py | 753 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "streamlink.plugin.Plugin",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "re.compile",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "streamlink.strea... |
10476978002 | # main_before.py
# 강화학습 전에 실행하는 모듈
# 주식 데이터를 읽고, 차트 데이터와 학습 데이터를 준비하고, 주식투자 강화학습을 실행하는 모듈
import os
import logging
import settings
import datetime
from data import data_management, save_csv
from learner import Learner
def main_before_run(before_start_date, before_end_date, before_min_unit,
before_max_unit, before_delayed, before_learning,
before_balance, before_epoch, before_epsilon):
code_list = save_csv.load_skyrocket_list()
for i in range(len(code_list)):
stock_code = code_list[i]
# 로그 기록
log_dir = os.path.join(settings.BASE_DIR, 'result/logs/%s' % stock_code)
timestr = settings.get_time_str()
if not os.path.isdir(log_dir):
os.makedirs(log_dir)
file_handler = logging.FileHandler(filename=os.path.join(
log_dir, "%s_%s.log" % (stock_code, timestr)), encoding='utf-8')
stream_handler = logging.StreamHandler()
file_handler.setLevel(logging.DEBUG)
stream_handler.setLevel(logging.INFO)
logging.basicConfig(format="%(message)s",
handlers=[file_handler, stream_handler], level=logging.DEBUG)
# 데이터 준비
chart_data = data_management.load_chart_data(
os.path.join(settings.BASE_DIR, 'data/csv_data/{}.csv'.format(stock_code)))
prep_data = data_management.preprocess(chart_data)
training_data = data_management.build_training_data(prep_data)
# 기간 필터링
training_data = training_data[(training_data['date'] >= before_start_date) &
(training_data['date'] <= before_end_date)]
training_data = training_data.dropna()
# 차트 데이터 분리
features_chart_data = ['date', 'open', 'high', 'low', 'close', 'volume']
chart_data = training_data[features_chart_data]
# 학습 데이터 분리
features_training_data = [
'open_lastclose_ratio', 'high_close_ratio', 'low_close_ratio',
'close_lastclose_ratio', 'volume_lastvolume_ratio',
'close_ma5_ratio', 'volume_ma5_ratio',
'close_ma10_ratio', 'volume_ma10_ratio',
'close_ma20_ratio', 'volume_ma20_ratio',
'close_ma60_ratio', 'volume_ma60_ratio',
'close_ma120_ratio', 'volume_ma120_ratio']
training_data = training_data[features_training_data]
# 강화학습 시작
learner = Learner(
stock_code=stock_code, # 종목 코드
chart_data=chart_data, # 차트 데이터
training_data=training_data, # 학습 데이터
min_trading_unit=before_min_unit, # 최소 투자 단위
max_trading_unit=before_max_unit, # 최대 투자 단위
delayed_reward_threshold=before_delayed, # 지연 보상 임계치
lr=before_learning) # 학습 속도
learner.fit(balance=before_balance, # 초기 자본금
num_epoches=before_epoch, # 수행할 Epoch 수
discount_factor=0, # 할인 요인
start_epsilon=before_epsilon) # 초기 탐험률
# 정책 신경망을 파일로 저장
date = datetime.datetime.strftime(datetime.datetime.today(), '%Y%m%d')
model_dir = os.path.join(settings.BASE_DIR, 'result/models/%s' % stock_code)
if not os.path.isdir(model_dir):
os.makedirs(model_dir)
model_path = os.path.join(model_dir, 'model_%s.h5' % date)
learner.network.save_model(model_path)
| 100th/AjouStock | main_before.py | main_before.py | py | 3,938 | python | ko | code | 23 | github-code | 36 | [
{
"api_name": "data.save_csv.load_skyrocket_list",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "data.save_csv",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "os.path"... |
15443990274 | #!/usr/bin/python3.1
# anti-docx, a .docx-to-text converter written by Albin Stjerna
#
# Please, feel free to do what you want with this code. It is way too short
# for a proper license. :)
import zipfile, sys, textwrap
from xml.sax import parse, ContentHandler
from optparse import OptionParser
def extract_document(fn):
"""return a file pointer to the XML file in the .docx
containing the actual document"""
wf=zipfile.ZipFile(fn, "r")
docfile=wf.open("word/document.xml", "r")
wf.close()
return docfile
def get_word_paragraphs(xmlfile):
"""Return a list of paragraphs from
the docx-formatted xml file at xmlfile"""
class tagHandler(ContentHandler):
def __init__(self):
self.paragraphMarker = "w:p"
self.textMarker = "w:t"
self.paragraphs = []
self.string = ""
self.inText = False
self.inParagraph = False
def startElement(self, name, attr):
if name == self.textMarker:
self.inText = True
elif name == self.paragraphMarker:
self.inParagraph = True
def endElement(self, name):
if name == self.textMarker:
self.inText = False
elif name == self.paragraphMarker:
self.inParagraph == False
self.paragraphs.append(self.string)
self.string = ""
def characters(self, ch):
if self.inText:
self.string+=ch
handler = tagHandler()
parse(xmlfile, handler)
return handler.paragraphs
def main():
parser = OptionParser()
parser.add_option("-w", "--wrap-lines",
action="store_true", dest="wraplines", default=False,
help="wrap long lines in output")
(options, args) = parser.parse_args()
if len(args) != 1:
parser.error("incorrect number of arguments (did you specify a file name?)")
filename = args[0]
wrapper = textwrap.TextWrapper()
for a in get_word_paragraphs(extract_document(filename)):
if options.wraplines:
print(wrapper.fill(a))
else:
print(a)
if __name__ == "__main__":
main()
| amandasystems/anti-docx | anti-docx.py | anti-docx.py | py | 2,244 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "zipfile.ZipFile",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "xml.sax.ContentHandler",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "xml.sax.parse",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "optparse.OptionP... |
73609824105 | import os
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import shutil
import tikzplotlib
from cued.plotting.colormap import whitedarkjet
from cued.plotting import contourf_remove_white_lines, label_inner, init_matplotlib_config, unit, symb
from cued.utility import ConversionFactors as CoFa, rmdir_mkdir_chdir, cued_copy, chdir
from cued.kpoint_mesh import hex_mesh, rect_mesh
init_matplotlib_config()
def conditional_pdflatex(name_data, name_tex):
"""
Check wheter pdflatex compiler exists before proceeding.
"""
print("======================")
if shutil.which("pdflatex"):
print("Creating PDF")
print(name_data)
os.system("pdflatex " + name_tex + " > /dev/null 2>&1")
os.system("pdflatex " + name_tex + " > /dev/null 2>&1")
print("Done")
else:
print("No LaTeX (pdflatex) compiler found, only keeping .tex files and logo.")
print("Can be compiled by hand by using 'pdflatex' on the .tex file.")
print("======================")
def write_and_compile_latex_PDF(T, W, P, sys, Mpi):
t_fs = T.t*CoFa.au_to_fs
num_points_for_plotting = 960
t_idx = get_time_indices_for_plotting(T.E_field, t_fs, num_points_for_plotting)
f_idx = get_freq_indices_for_plotting(W.freq/P.f, num_points_for_plotting, freq_max=30)
t_idx_whole = get_indices_for_plotting_whole(t_fs, num_points_for_plotting, start=0)
f_idx_whole = get_indices_for_plotting_whole(W.freq, num_points_for_plotting, start=f_idx[0])
high_symmetry_path_BZ = get_symmetry_path_in_BZ(P, num_points_for_plotting)
latex_dir = P.header + "latex_pdf_files"
rmdir_mkdir_chdir(latex_dir)
cued_copy('plotting/tex_templates/CUED_summary.tex', '.')
cued_copy('plotting/tex_templates/CUED_aliases.tex', '.')
cued_copy('branding/logo.pdf', '.')
write_parameters(P, Mpi)
tikz_time(T.E_field*CoFa.au_to_MVpcm, t_fs, t_idx, r'E-field ' + unit['E'], "Efield")
tikz_time(T.A_field*CoFa.au_to_MVpcm*CoFa.au_to_fs, t_fs, t_idx, r'A-field ' + unit['A'], "Afield")
K = BZ_plot(P, T.A_field)
bandstruc_and_dipole_plot_high_symm_line(high_symmetry_path_BZ, P, num_points_for_plotting, sys)
dipole_quiver_plots(K, P, sys)
density_matrix_plot(P, T, K)
tikz_time(T.j_E_dir, t_fs, t_idx,
r'Current $j_{\parallel}(t)$ parallel to $\bE$ in atomic units', "j_E_dir")
tikz_time(T.j_E_dir, t_fs, t_idx_whole,
r'Current $j_{\parallel}(t)$ parallel to $\bE$ in atomic units', "j_E_dir_whole_time")
tikz_time(T.j_ortho, t_fs, t_idx,
r'Current $j_{\bot}(t)$ orthogonal to $\bE$ in atomic units', "j_ortho")
tikz_time(T.j_ortho, t_fs, t_idx_whole,
r'Current $j_{\bot}(t)$ orthogonal to $\bE$ in atomic units', "j_ortho_whole_time")
tikz_freq(W.I_E_dir, W.I_ortho, W.freq/P.f, f_idx_whole,
r'Emission intensity in atomic units', "Emission_para_ortho_full_range", two_func=True,
label_1="$\;I_{\parallel}(\w)$", label_2="$\;I_{\\bot}(\w)$")
tikz_freq(W.I_E_dir, W.I_ortho, W.freq/P.f, f_idx,
r'Emission intensity in atomic units', "Emission_para_ortho", two_func=True,
label_1="$\;I_{\parallel}(\w)$", label_2="$\;I_{\\bot}(\w)$")
tikz_freq(W.I_E_dir + W.I_ortho, None, W.freq/P.f, f_idx,
r'Emission intensity in atomic units', "Emission_total", two_func=False,
label_1="$\;I(\w) = I_{\parallel}(\w) + I_{\\bot}(\w)$")
tikz_freq(W.I_E_dir_hann + W.I_ortho_hann, W.I_E_dir_parzen+W.I_ortho_parzen, W.freq/P.f, f_idx,
r'Emission intensity in atomic units', "Emission_total_hann_parzen", two_func=True,
label_1="$\;I(\w)$ with $\\bj(\w)$ computed using the Hann window",
label_2="$\;I(\w)$ with $\\bj(\w)$ computed using the Parzen window", dashed=True)
replace("semithick", "thick", "*")
conditional_pdflatex(P.header.replace('_', ' '), 'CUED_summary.tex')
chdir()
def write_parameters(P, Mpi):
if P.BZ_type == 'rectangle':
if P.angle_inc_E_field == 0:
replace("PH-EFIELD-DIRECTION", "$\\\\hat{e}_\\\\phi = \\\\hat{e}_x$")
elif P.angle_inc_E_field == 90:
replace("PH-EFIELD-DIRECTION", "$\\\\hat{e}_\\\\phi = \\\\hat{e}_y$")
else:
replace("PH-EFIELD-DIRECTION", "$\\\\phi = "+str(P.angle_inc_E_field)+"^\\\\circ$")
elif P.BZ_type == 'hexagon':
if P.align == 'K':
replace("PH-EFIELD-DIRECTION", "$\\\\Gamma$-K direction")
elif P.align == 'M':
replace("PH-EFIELD-DIRECTION", "$\\\\Gamma$-M direction")
if P.user_defined_field:
replace("iftrue", "iffalse")
else:
replace("PH-E0", str(P.E0_MVpcm))
replace("PH-FREQ", str(P.f_THz))
replace("PH-CHIRP", str(P.chirp_THz))
eps = 1.0E-13
if P.phase > np.pi/2-eps and P.phase < np.pi/2+eps:
replace("PH-CEP", "\\\\pi\/2")
elif P.phase > np.pi-eps and P.phase < np.pi+eps:
replace("PH-CEP", "\\\\pi")
elif P.phase > 3*np.pi/2-eps and P.phase < 3*np.pi/2+eps:
replace("PH-CEP", "3\\\\pi\/2")
elif P.phase > 2*np.pi-eps and P.phase < 2*np.pi+eps:
replace("PH-CEP", "2\\\\pi")
else:
replace("PH-CEP", str(P.phase))
replace("PH-SIGMA", str(P.sigma_fs))
replace("PH-FWHM", '{:.3f}'.format(P.sigma_fs*2*np.sqrt(np.log(2))))
replace("PH-BZ", P.BZ_type)
replace("PH-NK1", str(P.Nk1))
replace("PH-NK2", str(P.Nk2))
replace("PH-T2", str(P.T2_fs))
replace("PH-RUN", '{:.1f}'.format(P.run_time))
replace("PH-MPIRANKS", str(Mpi.size))
def tikz_time(func_of_t, time_fs, t_idx, ylabel, filename):
_fig, (ax1) = plt.subplots(1)
_lines_exact_E_dir = ax1.plot(time_fs[t_idx], func_of_t[t_idx], marker='')
t_lims = (time_fs[t_idx[0]], time_fs[t_idx[-1]])
ax1.grid(True, axis='both', ls='--')
ax1.set_xlim(t_lims)
ax1.set_xlabel(unit['t'])
ax1.set_ylabel(ylabel)
# ax1.legend(loc='upper right')
tikzplotlib.save(filename + ".tikz", axis_height=r'\figureheight',
axis_width=r'\figurewidth' )
# Need to explicitly close figure after writing
plt.close(_fig)
def tikz_freq(func_1, func_2, freq_div_f0, f_idx, ylabel, filename, two_func, \
label_1=None, label_2=None, dashed=False):
xlabel = r'Harmonic order = ' + unit['ff0']
_fig, (ax1) = plt.subplots(1)
_lines_exact_E_dir = ax1.semilogy(freq_div_f0[f_idx], func_1[f_idx], marker='', label=label_1)
if two_func:
if dashed:
_lines_exact_E_dir = ax1.semilogy(freq_div_f0[f_idx], func_2[f_idx], marker='', label=label_2, \
linestyle='--')
else:
_lines_exact_E_dir = ax1.semilogy(freq_div_f0[f_idx], func_2[f_idx], marker='', label=label_2)
f_lims = (freq_div_f0[f_idx[0]], freq_div_f0[f_idx[-1]])
ax1.grid(True, axis='both', ls='--')
ax1.set_xlim(f_lims)
ax1.set_xlabel(xlabel)
ax1.set_ylabel(ylabel)
ax1.legend(loc='upper right')
ax1.set_xticks(np.arange(f_lims[1]+1))
tikzplotlib.save(filename + ".tikz",
axis_height='\\figureheight',
axis_width ='\\figurewidth' )
replace("xmax=30,", "xmax=30, xtick={0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20"+\
",21,22,23,24,25,26,27,28,29,30}, xticklabels={,1,,,,5,,,,,10,,,,,15,,,,,20,,,,,25,,,,,30},", \
filename=filename + ".tikz")
# Need to explicitly close figure after writing
plt.close(_fig)
def replace(old, new, filename="CUED_summary.tex"):
os.system("sed -i -e \'s/"+old+"/"+new+"/g\' "+filename)
def get_time_indices_for_plotting(E_field, time_fs, num_t_points_max):
E_max = np.amax(np.abs(E_field))
threshold = 1.0E-3
index_t_plot_start = np.argmax(np.abs(E_field) > threshold*E_max)
index_t_plot_end = E_field.size + 1 - np.argmax(np.abs(E_field[::-1]) > threshold*E_max)
if index_t_plot_end - index_t_plot_start < num_t_points_max:
step = 1
else:
step = (index_t_plot_end - index_t_plot_start)//num_t_points_max
t_idx = range(index_t_plot_start, index_t_plot_end, step)
return t_idx
def get_indices_for_plotting_whole(data, num_points_for_plotting, start):
n_data_points = data.size
step = n_data_points//num_points_for_plotting
idx = range(start, n_data_points-1, step)
return idx
def get_freq_indices_for_plotting(freq_div_f0, num_points_for_plotting, freq_min=-1.0E-8, freq_max=30):
index_f_plot_start = np.argmax(freq_div_f0 > freq_min)
index_f_plot_end = np.argmax(freq_div_f0 > freq_max)
if index_f_plot_end - index_f_plot_start < num_points_for_plotting:
step = 1
else:
step = (index_f_plot_end - index_f_plot_start)//num_points_for_plotting
f_idx = range(index_f_plot_start, index_f_plot_end, step)
return f_idx
def get_symmetry_path_in_BZ(P, num_points_for_plotting):
Nk_per_line = num_points_for_plotting//2
delta = 1/(2*Nk_per_line)
neg_array_direct = np.linspace(-1.0+delta, 0.0-delta, num=Nk_per_line)
neg_array_reverse = np.linspace( 0.0+delta, -1.0-delta, num=Nk_per_line)
pos_array_direct = np.linspace( 0.0+delta, 1.0-delta, num=Nk_per_line)
pos_array_reverse = np.linspace( 1.0-delta, 0.0+delta, num=Nk_per_line)
if P.BZ_type == 'hexagon':
R = 4.0*np.pi/(3*P.a)
r = 2.0*np.pi/(np.sqrt(3)*P.a)
vec_M = np.array( [ r*np.cos(-np.pi/6), r*np.sin(-np.pi/6) ] )
vec_K = np.array( [ R, 0] )
path = []
for alpha in pos_array_reverse:
kpoint = alpha*vec_K
path.append(kpoint)
for alpha in pos_array_direct:
kpoint = alpha*vec_M
path.append(kpoint)
elif P.BZ_type == 'rectangle':
vec_k_E_dir = 0.5*P.length_BZ_E_dir*P.E_dir
vec_k_ortho = 0.5*P.length_BZ_ortho*np.array([P.E_dir[1], -P.E_dir[0]])
path = []
for alpha in pos_array_reverse:
kpoint = alpha*vec_k_E_dir
path.append(kpoint)
for alpha in pos_array_direct:
kpoint = alpha*vec_k_ortho
path.append(kpoint)
return np.array(path)
def BZ_plot(P, A_field):
"""
Function that plots the Brillouin zone
"""
BZ_fig = plt.figure(figsize=(10, 10))
plt.plot(np.array([0.0]), np.array([0.0]), color='black', marker="o", linestyle='None')
plt.text(0.01, 0.01, symb['G'])
default_width = 0.87
if P.BZ_type == 'hexagon':
R = 4.0*np.pi/(3*P.a_angs)
r = 2.0*np.pi/(np.sqrt(3)*P.a_angs)
plt.plot(np.array([r*np.cos(-np.pi/6)]), np.array([r*np.sin(-np.pi/6)]), color='black', marker="o", linestyle='None')
plt.text(r*np.cos(-np.pi/6)+0.01, r*np.sin(-np.pi/6)-0.05, symb['M'])
plt.plot(np.array([R]), np.array([0.0]), color='black', marker="o", linestyle='None')
plt.text(R, 0.02, r'K')
kx_BZ = R*np.array([1,2,1,0.5,1, 0.5,-0.5,-1, -0.5,-1,-2,-1,-0.5,-1, -0.5,0.5, 1, 0.5, 1])
tmp = np.sqrt(3)/2
ky_BZ = R*np.array([0,0,0,tmp,2*tmp,tmp,tmp, 2*tmp,tmp, 0, 0, 0, -tmp,-2*tmp,-tmp,-tmp,-2*tmp,-tmp,0])
plt.plot(kx_BZ, ky_BZ, color='black' )
length = 5.0/P.a_angs
length_x = length
length_y = length
ratio_yx = default_width
dist_to_border = 0.1*length
elif P.BZ_type == 'rectangle':
# polar angle of upper right point of a rectangle that is horizontally aligned
alpha = np.arctan(P.length_BZ_ortho/P.length_BZ_E_dir)
beta = P.angle_inc_E_field/360*2*np.pi
dist_edge_to_Gamma = np.sqrt(P.length_BZ_E_dir**2+P.length_BZ_ortho**2)/2/CoFa.au_to_as
kx_BZ = dist_edge_to_Gamma*np.array([np.cos(alpha+beta),np.cos(np.pi-alpha+beta),np.cos(alpha+beta+np.pi),np.cos(2*np.pi-alpha+beta),np.cos(alpha+beta)])
ky_BZ = dist_edge_to_Gamma*np.array([np.sin(alpha+beta),np.sin(np.pi-alpha+beta),np.sin(alpha+beta+np.pi),np.sin(2*np.pi-alpha+beta),np.sin(alpha+beta)])
plt.plot(kx_BZ, ky_BZ, color='black' )
X_x = (kx_BZ[0]+kx_BZ[3])/2
X_y = (ky_BZ[0]+ky_BZ[3])/2
Y_x = (kx_BZ[0]+kx_BZ[1])/2
Y_y = (ky_BZ[0]+ky_BZ[1])/2
plt.plot(np.array([X_x,Y_x]), np.array([X_y,Y_y]), color='black', marker="o", linestyle='None')
plt.text(X_x, X_y, symb['X'])
plt.text(Y_x, Y_y, symb['Y'])
dist_to_border = 0.1*max(np.amax(kx_BZ), np.amax(ky_BZ))
length_x = np.amax(kx_BZ) + dist_to_border
length_y = np.amax(ky_BZ) + dist_to_border
ratio_yx = length_y/length_x*default_width
if P.gauge == "velocity":
Nk1_max = 120
Nk2_max = 30
else:
Nk1_max = 24
Nk2_max = 6
if P.Nk1 <= Nk1_max and P.Nk2 <= Nk2_max:
printed_paths = P.paths
Nk1_plot = P.Nk1
Nk2_plot = P.Nk2
else:
Nk1_safe = P.Nk1
Nk2_safe = P.Nk2
P.Nk1 = min(P.Nk1, Nk1_max)
P.Nk2 = min(P.Nk2, Nk2_max)
Nk1_plot = P.Nk1
Nk2_plot = P.Nk2
P.Nk = P.Nk1*P.Nk2
if P.BZ_type == 'hexagon':
dk, kweight, printed_paths, printed_mesh = hex_mesh(P)
elif P.BZ_type == 'rectangle':
dk, kweight, printed_paths, printed_mesh = rect_mesh(P)
P.Nk1 = Nk1_safe
P.Nk2 = Nk2_safe
P.Nk = P.Nk1*P.Nk2
plt.xlim(-length_x, length_x)
plt.ylim(-length_y, length_y)
plt.xlabel(unit['kx'])
plt.ylabel(unit['ky'])
for path in printed_paths:
num_k = np.size(path[:,0])
plot_path_x = np.zeros(num_k+1)
plot_path_y = np.zeros(num_k+1)
plot_path_x[0:num_k] = 1/CoFa.au_to_as*path[0:num_k, 0]
plot_path_x[num_k] = 1/CoFa.au_to_as*path[0, 0]
plot_path_y[0:num_k] = 1/CoFa.au_to_as*path[0:num_k, 1]
plot_path_y[num_k] = 1/CoFa.au_to_as*path[0, 1]
if P.gauge == "length":
plt.plot(plot_path_x, plot_path_y)
plt.plot(plot_path_x, plot_path_y, color='gray', marker="o", linestyle='None')
A_min = np.amin(A_field)/CoFa.au_to_as
A_max = np.amax(A_field)/CoFa.au_to_as
A_diff = A_max - A_min
adjusted_length_x = length_x - dist_to_border/2
adjusted_length_y = length_y - dist_to_border/2
anchor_A_x = -adjusted_length_x + abs(P.E_dir[0]*A_min)
anchor_A_y = adjusted_length_y - abs(A_max*P.E_dir[1])
neg_A_x = np.array([anchor_A_x + A_min*P.E_dir[0], anchor_A_x])
neg_A_y = np.array([anchor_A_y + A_min*P.E_dir[1], anchor_A_y])
pos_A_x = np.array([anchor_A_x + A_max*P.E_dir[0], anchor_A_x])
pos_A_y = np.array([anchor_A_y + A_max*P.E_dir[1], anchor_A_y])
anchor_A_x_array = np.array([anchor_A_x])
anchor_A_y_array = np.array([anchor_A_y])
plt.plot(pos_A_x, pos_A_y, color="green")
plt.plot(neg_A_x, neg_A_y, color="red")
plt.plot(anchor_A_x_array, anchor_A_y_array, color='black', marker="o", linestyle='None')
tikzplotlib.save("BZ.tikz", axis_height='\\figureheight', axis_width ='\\figurewidth' )
plt.close()
replace("scale=0.5", "scale=1", filename="BZ.tikz")
replace("mark size=3", "mark size=1", filename="BZ.tikz")
replace("PH-SMALLNK1", str(Nk1_plot))
replace("PH-SMALLNK2", str(Nk2_plot))
replace("1.00000000000000000000", str(ratio_yx))
replace("figureheight,", "figureheight, scale only axis=true,", filename="BZ.tikz")
class BZ_plot_parameters():
pass
K = BZ_plot_parameters()
K.kx_BZ = kx_BZ
K.ky_BZ = ky_BZ
K.length_x = length_x
K.length_y = length_y
return K
def bandstruc_and_dipole_plot_high_symm_line(high_symmetry_path_BZ, P, num_points_for_plotting, sys):
Nk1 = P.Nk1
P.Nk1 = num_points_for_plotting
path = high_symmetry_path_BZ
sys.eigensystem_dipole_path(path, P)
P.Nk1 = Nk1
abs_k = np.sqrt(path[:,0]**2 + path[:,1]**2)
k_in_path = np.zeros(num_points_for_plotting)
for i_k in range(1,num_points_for_plotting):
k_in_path[i_k] = k_in_path[i_k-1] + np.abs( abs_k[i_k] - abs_k[i_k-1] )
_fig, (ax1) = plt.subplots(1)
for i_band in range(P.n):
_lines_exact_E_dir = ax1.plot(k_in_path, sys.e_in_path[:,i_band]*CoFa.au_to_eV, marker='', \
label="$n=$ "+str(i_band))
plot_it(P, r"Band energy " + unit['e(k)'], "bandstructure.tikz", ax1, k_in_path)
plt.close(_fig)
_fig, (ax2) = plt.subplots(1)
d_min = 1.0E-10
if P.dm_dynamics_method == 'semiclassics':
for i_band in range(P.n):
abs_connection = (np.sqrt( np.abs(sys.Ax_path[:, i_band, i_band])**2 + \
np.abs(sys.Ay_path[:, i_band, i_band])**2 ) + 1.0e-80)*CoFa.au_to_as
_lines_exact_E_dir= ax2.semilogy(k_in_path, abs_connection, marker='', \
label="$n=$ "+str(i_band))
d_min = max(d_min, np.amin(abs_connection))
plot_it(P, r'Berry connection ' + unit['dn'], "abs_dipole.tikz", ax2, k_in_path, d_min)
else:
for i_band in range(P.n):
for j_band in range(P.n):
if j_band >= i_band: continue
abs_dipole = (np.sqrt(np.abs(sys.dipole_path_x[:, i_band, j_band])**2 + \
np.abs(sys.dipole_path_y[:, i_band, j_band])**2) + 1.0e-80)*CoFa.au_to_as
_lines_exact_E_dir = ax2.semilogy(k_in_path, abs_dipole, marker='', \
label="$n=$ "+str(i_band)+", $m=$ "+str(j_band))
d_min = max(d_min, np.amin(abs_dipole))
plot_it(P, r'Dipole ' + unit['dnm'], "abs_dipole.tikz", ax2, k_in_path, d_min)
plt.close(_fig)
_fig, (ax3) = plt.subplots(1)
d_min = 1.0E-10
if P.dm_dynamics_method == 'semiclassics':
for i_band in range(P.n):
proj_connection = (np.abs( sys.Ax_path[:,i_band,i_band]*P.E_dir[0] + \
sys.Ay_path[:, i_band, i_band]*P.E_dir[1] ) + 1.0e-80)*CoFa.au_to_as
_lines_exact_E_dir = ax3.semilogy(k_in_path, proj_connection, marker='',
label="$n=$ "+str(i_band))
d_min = max(d_min, np.amin(proj_connection))
plot_it(P, unit['ephi_dot_dn'], "proj_dipole.tikz", ax3, k_in_path, d_min)
else:
for i_band in range(P.n):
for j_band in range(P.n):
if j_band >= i_band: continue
proj_dipole = (np.abs( sys.dipole_path_x[:,i_band,j_band]*P.E_dir[0] + \
sys.dipole_path_y[:,i_band,j_band]*P.E_dir[1] ) + 1.0e-80)/CoFa.au_to_as
_lines_exact_E_dir = ax3.semilogy(k_in_path, proj_dipole, marker='', \
label="$n=$ "+str(i_band)+", $m=$ "+str(j_band))
d_min = max(d_min, np.amin(proj_dipole))
plot_it(P, unit['ephi_dot_dnm'], "proj_dipole.tikz", ax3, k_in_path, d_min)
plt.close(_fig)
def plot_it(P, ylabel, filename, ax1, k_in_path, y_min=None):
num_points_for_plotting = k_in_path.size
k_lims = ( k_in_path[0], k_in_path[-1] )
ax1.grid(True, axis='both', ls='--')
ax1.set_ylabel(ylabel)
ax1.legend(loc='upper left')
ax1.set_xlim(k_lims)
if y_min is not None:
ax1.set_ylim(bottom=y_min)
ax1.set_xticks( [k_in_path[0], k_in_path[num_points_for_plotting//2], k_in_path[-1]] )
if P.BZ_type == 'hexagon':
ax1.set_xticklabels([symb['K'], symb['G'], symb['M']])
elif P.BZ_type == 'rectangle':
ax1.set_xticklabels([symb['X'], symb['G'], symb['Y']])
tikzplotlib.save(filename, axis_height='\\figureheight', axis_width ='\\figurewidth' )
def dipole_quiver_plots(K, P, sys):
Nk1 = P.Nk1
Nk2 = P.Nk2
if P.BZ_type == 'rectangle':
Nk_plot = 10
P.Nk1 = Nk_plot
P.Nk2 = Nk_plot
length_BZ_E_dir = P.length_BZ_E_dir
length_BZ_ortho = P.length_BZ_ortho
P.length_BZ_E_dir = max(length_BZ_E_dir, length_BZ_ortho)
P.length_BZ_ortho = max(length_BZ_E_dir, length_BZ_ortho)
elif P.BZ_type == 'rectangle':
P.Nk1 = 24
P.Nk2 = 6
Nk_combined = P.Nk1*P.Nk2
d_x = np.zeros([Nk_combined, P.n, P.n], dtype=np.complex128)
d_y = np.zeros([Nk_combined, P.n, P.n], dtype=np.complex128)
k_x = np.zeros( Nk_combined )
k_y = np.zeros( Nk_combined )
if P.BZ_type == 'hexagon':
dk, kweight, printed_paths, printed_mesh = hex_mesh(P)
elif P.BZ_type == 'rectangle':
dk, kweight, printed_paths, printed_mesh = rect_mesh(P)
for k_path, path in enumerate(printed_paths):
sys.eigensystem_dipole_path(path, P)
d_x[k_path*P.Nk1:(k_path+1)*P.Nk1, :, :] = sys.dipole_path_x[:,:,:]*CoFa.au_to_as
d_y[k_path*P.Nk1:(k_path+1)*P.Nk1, :, :] = sys.dipole_path_y[:,:,:]*CoFa.au_to_as
k_x[k_path*P.Nk1:(k_path+1)*P.Nk1] = path[:,0]/CoFa.au_to_as
k_y[k_path*P.Nk1:(k_path+1)*P.Nk1] = path[:,1]/CoFa.au_to_as
num_plots = P.n**2
num_plots_vert = (num_plots+1)//2
fig, ax = plt.subplots(num_plots_vert, 2, figsize=(15, 6.2*num_plots_vert))
for i_band in range(P.n):
plot_x_index = i_band//2
plot_y_index = i_band%2
title = r"$\mb{{d}}_{{{:d}{:d}}}(\mb{{k}})$ (diagonal dipole matrix elements are real)"\
.format(i_band, i_band)
colbar_title = r"$\log_{{10}}\; \lvert (\mb{{d}}_{{{:d}{:d}}}(\mb{{k}}))/\si{{\As}} \rvert$"\
.format(i_band, i_band)
plot_single_dipole(d_x.real, d_y.real, i_band, i_band, plot_x_index, plot_y_index,
K, k_x, k_y, fig, ax, title, colbar_title)
counter = 0
for i_band in range(P.n):
for j_band in range(P.n):
if i_band >= j_band: continue
plot_index = P.n + counter
plot_x_index = plot_index//2
plot_y_index = plot_index%2
counter += 1
title = r"$\rRe \mb{{d}}_{{{:d}{:d}}}(\mb{{k}})$"\
.format(i_band, j_band)
colbar_title = r"$\log_{{10}}\; \lvert \rRe(\mb{{d}}_{{{:d}{:d}}}(\mb{{k}}))/\si{{\As}} \rvert$"\
.format(i_band, j_band)
plot_single_dipole(d_x.real, d_y.real, i_band, j_band, plot_x_index, plot_y_index, \
K, k_x, k_y, fig, ax, title, colbar_title)
plot_index = P.n + counter
plot_x_index = plot_index//2
plot_y_index = plot_index%2
counter += 1
title = r"$\rIm \mb{{d}}_{{{:d}{:d}}}(\mb{{k}})$"\
.format(i_band, j_band)
colbar_title = r"$\log_{{10}}\; \lvert \rIm (\mb{{d}}_{{{:d}{:d}}}(\mb{{k}}))/\si{{\As}} \rvert$"\
.format(i_band, j_band)
plot_single_dipole(d_x.imag, d_y.imag, i_band, j_band, plot_x_index, plot_y_index, \
K, k_x, k_y, fig, ax, title, colbar_title)
filename = 'dipoles.pdf'
plt.savefig(filename, bbox_inches='tight')
plt.close(fig)
P.Nk1 = Nk1
P.Nk2 = Nk2
if P.BZ_type == 'rectangle':
P.length_BZ_E_dir = length_BZ_E_dir
P.length_BZ_ortho = length_BZ_ortho
def plot_single_dipole(d_x, d_y, i_band, j_band, x, y, K, k_x, k_y, fig, ax, \
title, colbar_title):
d_x_ij = d_x[:, i_band, j_band]
d_y_ij = d_y[:, i_band, j_band]
abs_d_ij = np.maximum(np.sqrt(d_x_ij**2 + d_y_ij**2), 1.0e-10*np.ones(np.shape(d_x_ij)))
norm_d_x_ij = d_x_ij / abs_d_ij
norm_d_y_ij = d_y_ij / abs_d_ij
ax[x,y].plot(K.kx_BZ, K.ky_BZ, color='gray' )
plot = ax[x,y].quiver(k_x, k_y, norm_d_x_ij, norm_d_y_ij, np.log10(abs_d_ij),
angles='xy', cmap=whitedarkjet, width=0.007 )
ax[x,y].set_title(title)
ax[x,y].axis('equal')
ax[x,y].set_xlabel(unit['kx'])
ax[x,y].set_ylabel(unit['ky'])
ax[x,y].set_xlim(-K.length_x, K.length_x)
ax[x,y].set_ylim(-K.length_y, K.length_y)
plt.colorbar(plot, ax=ax[x,y], label=colbar_title)
def density_matrix_plot(P, T, K):
i_band, j_band = 1, 1
reshaped_pdf_dm = np.zeros((P.Nk1*P.Nk2, P.Nt_pdf_densmat, P.n, P.n), dtype=P.type_complex_np)
for i_k1 in range(P.Nk1):
for j_k2 in range(P.Nk2):
combined_k_index = i_k1 + j_k2*P.Nk1
reshaped_pdf_dm[combined_k_index, :, :, :] = T.pdf_densmat[i_k1, j_k2, :, :, :]
n_vert = (P.Nt_pdf_densmat+1)//2
for i_band in range(P.n):
filename = 'dm_' + str(i_band) + str(i_band) + '.pdf'
plot_dm_for_all_t(reshaped_pdf_dm.real, P, T, K, i_band, i_band, '', filename, n_vert)
for i_band in range(P.n):
for j_band in range(P.n):
if i_band >= j_band: continue
filename = 'Re_dm_' + str(i_band) + str(j_band) + '.pdf'
plot_dm_for_all_t(reshaped_pdf_dm.real, P, T, K, i_band, j_band, 'Re', filename, n_vert)
filename = 'Im_dm_' + str(i_band) + str(j_band) + '.pdf'
plot_dm_for_all_t(reshaped_pdf_dm.imag, P, T, K, i_band, j_band, 'Im', filename, n_vert)
replace("bandindex in {0,...,1}", "bandindex in {0,...," + str(P.n-1) + "}")
def plot_dm_for_all_t(reshaped_pdf_dm, P, T, K, i_band, j_band, prefix_title, \
filename, n_plots_vertical):
fig, ax = plt.subplots(n_plots_vertical, 2, figsize=(15, 6.2*n_plots_vertical*K.length_y/K.length_x))
for t_i in range(P.Nt_pdf_densmat):
i = t_i//2
j = t_i%2
minval = np.amin(reshaped_pdf_dm[:, :, i_band, j_band].real)
maxval = np.amax(reshaped_pdf_dm[:, :, i_band, j_band].real)
if maxval - minval < 1E-6:
minval -= 1E-6
maxval += 1E-6
if P.Nk2 > 1:
im = ax[i, j].tricontourf(P.mesh[:, 0].astype('float64')/CoFa.au_to_as, P.mesh[:, 1].astype('float64')/CoFa.au_to_as,
reshaped_pdf_dm[:, t_i, i_band, j_band].astype('float64'),
np.linspace(minval, maxval, 100), cmap=whitedarkjet)
# Aesthetics
contourf_remove_white_lines(im)
fig.colorbar(im, ax=ax[i, j])
ax[i, j].plot(K.kx_BZ, K.ky_BZ, color='black')
ax[i, j].set_ylim(-K.length_y, K.length_y)
ax[i, j].set_ylabel(unit['ky'])
else:
im = ax[i,j].plot(P.mesh[:, 0]/CoFa.au_to_as, reshaped_pdf_dm[:, t_i, i_band, j_band])
ax[i, j].set_xlabel(unit['kx'])
ax[i, j].set_xlim(-K.length_x, K.length_x)
ax[i, j].set_title(prefix_title +
r' $\rho_{{{:d},{:d}}}(\mb{{k}},t)$ at $t = {:.1f}\si{{\fs}}$'\
.format(i_band, j_band, T.t_pdf_densmat[t_i]*CoFa.au_to_fs))
plt.savefig(filename, bbox_inches='tight')
plt.close(fig)
def tikz_screening_one_color(S, num_points_for_plotting, title):
'''
Plot a screening (like CEP) plot in frequency range given by ff0 and screening_output
'''
# Find global min and max between 0th and 30th harmonic
# Save all relevant outputs to plot in 3 horizontal plots
num_subplots = 3
fidx = np.empty(num_subplots, dtype=slice)
I_min, I_max = np.empty(num_subplots, dtype=np.float64), np.empty(num_subplots, dtype=np.float64)
screening_output = []
freq_min = np.array([0, 10, 20])
freq_max = freq_min + 10
for i in range(freq_min.size):
fidx[i] = get_freq_indices_for_plotting(S.ff0, num_points_for_plotting, freq_min[i], freq_max[i])
screening_output.append(S.screening_output[:, fidx[i]])
I_min[i], I_max[i] = screening_output[i].min(), screening_output[i].max()
I_min, I_max = I_min.min(), I_max.max()
S.I_max_in_plotting_range = '{:.4e}'.format(I_max)
screening_output_norm = np.array(screening_output)/I_max
I_min_norm, I_max_norm = I_min/I_max, 1
I_min_norm_log, I_max_norm_log = np.log10(I_min_norm), np.log10(I_max_norm)
fig, ax = plt.subplots(num_subplots)
contourlevels = np.logspace(I_min_norm_log, I_max_norm_log, 1000)
mintick, maxtick = int(I_min_norm_log), int(I_max_norm_log)
tickposition = np.logspace(mintick, maxtick, num=np.abs(maxtick - mintick) + 1)
cont = np.empty(num_subplots, dtype=object)
for i, idx in enumerate(fidx):
ff0 = S.ff0[idx]
F, P = np.meshgrid(ff0, S.screening_parameter_values)
cont[i] = ax[i].contourf(F, P, screening_output_norm[i], levels=contourlevels, locator=mpl.ticker.LogLocator(),
cmap=whitedarkjet, norm=mpl.colors.LogNorm(vmin=I_min_norm, vmax=I_max_norm))
# Aesthetics
contourf_remove_white_lines(cont[i])
ax[i].set_xticks(np.arange(freq_min[i], freq_max[i] + 1))
ax[i].set_ylabel(S.screening_parameter_name_plot_label)
ax[-1].set_xlabel(r'Harmonic order ' + unit['ff0'])
# Colorbar for ax[0]
divider = make_axes_locatable(ax[0])
cax = divider.append_axes('top', '7%', pad='2%')
cbar = fig.colorbar(cont[0], cax=cax, orientation='horizontal')
cax.tick_params(axis='x', which='major', top=True, pad=0.05)
cax.xaxis.set_ticks_position('top')
cax.invert_xaxis()
cax.set_ylabel(r'$I_\mr{hh}/I_\mr{hh}^\mr{max}$', rotation='horizontal')
cax.yaxis.set_label_coords(-0.1, 1.00)
cbar.set_ticks(tickposition)
# Disable every second tick label
for label in cbar.ax.xaxis.get_ticklabels()[0::2]:
label.set_visible(False)
plt.suptitle(title)
plt.savefig(S.screening_filename_plot, bbox_inches='tight')
plt.close(fig)
def tikz_screening_per_color(S, num_points_for_plotting, title):
'''
Plot a screening (like CEP) plot in frequency range given by ff0 and screening_output
'''
# Find global min and max between 0th and 30th harmonic
# Save all relevant outputs to plot in 3 horizontal plots
num_subplots = 3
fidx = np.empty(num_subplots, dtype=slice)
I_min, I_max = np.empty(num_subplots, dtype=np.float64), np.empty(num_subplots, dtype=np.float64)
screening_output = []
freq_min = np.array([0, 10, 20])
freq_max = freq_min + 10
for i in range(freq_min.size):
fidx[i] = get_freq_indices_for_plotting(S.ff0, num_points_for_plotting, freq_min[i], freq_max[i])
screening_output.append(S.screening_output[:, fidx[i]])
I_min[i], I_max[i] = screening_output[i].min(), screening_output[i].max()
S.I_max_in_plotting_range = ['{:.4e}'.format(I_buf) for I_buf in I_max]
screening_output_norm = np.array([screening_output[i]/I_max[i] for i in range(num_subplots)])
I_min_norm, I_max_norm = I_min/I_max, np.ones(num_subplots)
I_min_norm_log, I_max_norm_log = np.log10(I_min_norm), np.log10(I_max_norm)
fig, ax = plt.subplots(num_subplots)
contourlevels = np.logspace(I_min_norm_log, I_max_norm_log, 1000)
mintick, maxtick = I_min_norm_log.astype(int), I_max_norm_log.astype(int)
tickposition = [np.logspace(mintick[i], maxtick[i], num=np.abs(maxtick[i] - mintick[i]) + 1)
for i in range(num_subplots)]
for i, idx in enumerate(fidx):
ff0 = S.ff0[idx]
F, P = np.meshgrid(ff0, S.screening_parameter_values)
cont = ax[i].contourf(F, P, screening_output_norm[i], levels=contourlevels[:, i], locator=mpl.ticker.LogLocator(),
cmap=whitedarkjet, norm=mpl.colors.LogNorm(vmin=I_min_norm[i], vmax=I_max_norm[i]))
# Per plot remove white lines
contourf_remove_white_lines(cont)
ax[i].set_xticks(np.arange(freq_min[i], freq_max[i] + 1))
ax[i].set_ylabel(S.screening_parameter_name_plot_label)
# Per plot axis label
label_inner(ax[i], idx=i)
# Per plot colorbar
divider = make_axes_locatable(ax[i])
cax = divider.append_axes('right', '7%', pad='2%')
cbar = fig.colorbar(cont, cax=cax)
cax.tick_params(axis='y', which='major', top=False, pad=0.05)
# Set label only for first colorbar
if i == 0:
cax.set_ylabel(r'$I_\mr{hh}/I_\mr{hh}^\mr{max}$', rotation='horizontal')
cax.yaxis.set_label_coords(1, 1.19)
cbar.set_ticks(tickposition[i])
ax[-1].set_xlabel(r'Harmonic order ' + unit['ff0'])
ax[0].set_title(title)
# Adjust plot name
S.screening_filename = S.screening_filename + 'split_'
plt.savefig(S.screening_filename_plot, bbox_inches='tight')
plt.close(fig)
def write_and_compile_screening_latex_PDF(S):
num_points_for_plotting = 960
cued_copy('plotting/tex_templates/CUED_screening_summary.tex', '.')
cued_copy('plotting/tex_templates/CUED_aliases.tex', '.')
cued_copy('branding/logo.pdf', '.')
# Sets I_max_in_plotting_range to single number
tikz_screening_one_color(S[0], num_points_for_plotting, title='Intensity parallel to E-field direction')
replace('PH-EDIR-PLOT', S[0].screening_filename_plot, filename="CUED_screening_summary.tex")
replace('PH-EDIR-IMAX', S[0].I_max_in_plotting_range, filename="CUED_screening_summary.tex")
replace('PH-PARAMETER', S[0].screening_parameter_name, filename="CUED_screening_summary.tex")
# Sets I_max_in_plotting_range to list with 3 entries
tikz_screening_per_color(S[0], num_points_for_plotting, title='Intensity parallel to E-field direction')
replace('PH-EDIR-S-PLOT', S[0].screening_filename_plot, filename="CUED_screening_summary.tex")
replace('PH-EDIR-A-IMAX', S[0].I_max_in_plotting_range[0], filename="CUED_screening_summary.tex")
replace('PH-EDIR-B-IMAX', S[0].I_max_in_plotting_range[1], filename="CUED_screening_summary.tex")
replace('PH-EDIR-C-IMAX', S[0].I_max_in_plotting_range[2], filename="CUED_screening_summary.tex")
tikz_screening_one_color(S[1], num_points_for_plotting, title='Intensity orthogonal to E-field direction')
replace('PH-ORTHO-PLOT', S[1].screening_filename_plot, filename="CUED_screening_summary.tex")
replace('PH-ORTHO-IMAX', S[1].I_max_in_plotting_range, filename="CUED_screening_summary.tex")
replace('PH-PARAMETER', S[1].screening_parameter_name, filename="CUED_screening_summary.tex")
tikz_screening_per_color(S[1], num_points_for_plotting, title='Intensity orthogonal to E-field direction')
replace('PH-ORTHO-S-PLOT', S[1].screening_filename_plot, filename="CUED_screening_summary.tex")
replace('PH-ORTHO-A-IMAX', S[1].I_max_in_plotting_range[0], filename="CUED_screening_summary.tex")
replace('PH-ORTHO-B-IMAX', S[1].I_max_in_plotting_range[1], filename="CUED_screening_summary.tex")
replace('PH-ORTHO-C-IMAX', S[1].I_max_in_plotting_range[2], filename="CUED_screening_summary.tex")
tikz_screening_one_color(S[2], num_points_for_plotting, title='Summed Intensity')
replace('PH-FULL-PLOT', S[2].screening_filename_plot, filename="CUED_screening_summary.tex")
replace('PH-FULL-IMAX', S[2].I_max_in_plotting_range, filename="CUED_screening_summary.tex")
replace('PH-PARAMETER', S[2].screening_parameter_name, filename="CUED_screening_summary.tex")
tikz_screening_per_color(S[2], num_points_for_plotting, title='Summed Intensity')
replace('PH-FULL-S-PLOT', S[2].screening_filename_plot, filename="CUED_screening_summary.tex")
replace('PH-FULL-A-IMAX', S[2].I_max_in_plotting_range[0], filename="CUED_screening_summary.tex")
replace('PH-FULL-B-IMAX', S[2].I_max_in_plotting_range[1], filename="CUED_screening_summary.tex")
replace('PH-FULL-C-IMAX', S[2].I_max_in_plotting_range[2], filename="CUED_screening_summary.tex")
conditional_pdflatex(S[0].screening_filename.replace('_E_dir_split_', '').replace('_', ' '),
'CUED_screening_summary.tex')
| ccmt-regensburg/CUED | cued/plotting/latex_output_pdf.py | latex_output_pdf.py | py | 32,799 | python | en | code | 11 | github-code | 36 | [
{
"api_name": "cued.plotting.init_matplotlib_config",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "shutil.which",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.system",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.system... |
5127076250 | # coding:utf-8
import numpy as np
from chainer import cuda, Function, gradient_check, report, training, utils, Variable
from chainer import datasets, iterators, optimizers, serializers
import chainer.functions as F
import chainer.links as L
import sys
import argparse
import _pickle as pickle
import MeCab
from LSTM import LSTM
BOS_INDEX = 0
EOS_INDEX = 1
# arguments
parser = argparse.ArgumentParser()
parser.add_argument('--unit_size', type=int, default=100)
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--gpu', type=int, default=-1)
args = parser.parse_args()
xp = cuda.cupy if args.gpu >= 0 else np
xp.random.seed(args.seed)
mecab = MeCab.Tagger ("-Ochasen")
vocab = pickle.load(open('data/vocab.bin','rb'))
train_data = pickle.load(open('data/train_data.bin', 'rb'))
rnn = LSTM(len(vocab),args.unit_size)
model = L.Classifier(rnn)
if args.gpu >= 0:
print('use GPU!')
cuda.get_device(args.gpu).use()
model.to_gpu()
serializers.load_npz('data/latest.model',model)
# vocabのキーと値を入れ替えたもの
ivocab = {}
for c, i in vocab.items():
ivocab[i] = c
def get_index_a(_model):
_model.predictor.reset_state()
_sentence_index_a = []
index = BOS_INDEX
while index != EOS_INDEX:
y = _model.predictor(xp.array([index], dtype=xp.int32))
probability = F.softmax(y)
probability.data[0] /= sum(probability.data[0])
try:
#確率によって、ランダムに1つ単語を選択
#index = np.argmax(probability.data[0])
index = xp.random.choice(range(len(probability.data[0])), p=probability.data[0])
if index!=EOS_INDEX:
#終了<EOS>でなかった場合
_sentence_index_a.append(index)
except Exception as e:
print('probability error')
break
return _sentence_index_a
print('\n-=-=-=-=-=-=-=-')
for i in range(10):
sentence_index_a = get_index_a(model)
for index in sentence_index_a:
if index in ivocab:
sys.stdout.write( ivocab[index].split("::")[0] )
print('\n-=-=-=-=-=-=-=-')
print('generated!') | SPJ-AI/lesson | text_generator/generate.py | generate.py | py | 2,202 | python | en | code | 6 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "chainer.cuda.cupy",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "chainer.cuda",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "MeCab.Tag... |
16930492443 | import aiosqlite
from datetime import datetime
DB = 'db.sqlite'
async def search_db_entry(user_id, type, kind, location, distance):
curdate = str(datetime.now().strftime('%Y%m%d'))
query = f"SELECT * FROM geteilt where expires_at > {curdate}"
query += f" AND user_id <> {user_id}"
if type != 'all':
query += f" AND type = '{type}'"
if kind != 'all':
query += f" AND kind = '{kind}'"
if location is not None:
dist = int(distance) * 1000
query += f" AND (PtDistWithin(geteilt.latlng, PointFromText('POINT({location.longitude} {location.latitude})', 4326), {dist})=TRUE)"
query += ";"
res = []
async with aiosqlite.connect(DB) as db:
await db.enable_load_extension(True)
await db.load_extension('mod_spatialite')
async with db.execute(query) as cursor:
async for row in cursor:
res.append(row)
return res
async def delete_db_entry(entry_uid):
async with aiosqlite.connect(DB) as db:
await db.execute(f"DELETE FROM geteilt WHERE id = {entry_uid};")
await db.commit()
async def search_db_own_entry(user_id):
query = f"SELECT * FROM geteilt WHERE user_id = {user_id};"
res = []
async with aiosqlite.connect(DB) as db:
async with db.execute(query) as cursor:
async for row in cursor:
res.append(row)
return res
async def add_db_entry(user_id, user_lang, type, kind, location, description, expires_at):
currentDateTime = datetime.now().strftime('%Y%m%d')
async with aiosqlite.connect(DB) as db:
await db.enable_load_extension(True)
await db.execute("SELECT load_extension('mod_spatialite');")
last_row = None
async with db.execute("INSERT INTO geteilt(user_id, user_lang, type, kind, lat, lng, desc, inserted_at, expires_at) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?);",
(user_id, user_lang, type, kind, location.latitude, location.longitude,
description, currentDateTime, str(expires_at.strftime('%Y%m%d')))) as cursor:
last_row = cursor.lastrowid
await db.execute(f"UPDATE geteilt SET latlng = PointFromText('POINT({location.longitude} {location.latitude})', 4326) WHERE id = {last_row};")
await db.commit()
async def add_db_subscription(user_id, user_lang, type, kind, location, distance):
currentDateTime = datetime.now().strftime('%Y%m%d')
async with aiosqlite.connect(DB) as db:
await db.enable_load_extension(True)
await db.execute("SELECT load_extension('mod_spatialite');")
lat = None
lng = None
if location is not None:
lat = location.latitude
lng = location.longitude
last_row = None
if distance == 'search_everywhere':
distance = None
else:
distance = int(distance) * 1000
async with db.execute("INSERT INTO subscriptions(user_id, user_lang, type, kind, lat, lng, distance, inserted_at) VALUES (?, ?, ?, ?, ?, ?, ?, ?);",
(user_id, user_lang, type, kind, lat, lng, distance, currentDateTime)) as cursor:
last_row = cursor.lastrowid
if location is not None:
await db.execute(f"UPDATE subscriptions SET latlng = PointFromText('POINT({location.longitude} {location.latitude})', 4326) WHERE id = {last_row};")
await db.commit()
async def delete_db_subscription(entry_uid):
async with aiosqlite.connect(DB) as db:
await db.execute(f"DELETE FROM subscriptions WHERE id = {entry_uid};")
await db.commit()
async def search_db_subscriptions(user_id, type, kind, location):
query = f"""SELECT user_id, user_lang, distance FROM subscriptions WHERE
user_id <> {user_id}
AND (type = '{type}' OR type = 'all') AND (kind = '{kind}' or kind = 'all')
AND (subscriptions.latlng is NULL OR
(PtDistWithin(subscriptions.latlng, PointFromText('POINT({location.longitude} {location.latitude})', 4326), subscriptions.distance)=TRUE))"""
res = []
async with aiosqlite.connect(DB) as db:
await db.enable_load_extension(True)
await db.load_extension('mod_spatialite')
async with db.execute(query) as cursor:
async for row in cursor:
res.append(row)
return res
async def search_db_own_subscriptions(user_id):
query = f"SELECT * FROM subscriptions WHERE user_id = {user_id};"
res = []
async with aiosqlite.connect(DB) as db:
async with db.execute(query) as cursor:
async for row in cursor:
res.append(row)
return res
async def check_point_col_exists(db):
point_col_exists = False
async with db.execute("SELECT sql FROM sqlite_master WHERE type = 'table' AND name = 'geteilt';") as cursor:
async for row in cursor:
point_col_exists = 'POINT' in row[0]
return point_col_exists
async def init_db():
async with aiosqlite.connect(DB) as db:
point_col_exists = await check_point_col_exists(db)
if not point_col_exists:
await db.enable_load_extension(True)
await db.execute("SELECT load_extension('mod_spatialite');")
await db.execute("SELECT InitSpatialMetaData();")
await db.execute("""CREATE TABLE IF NOT EXISTS geteilt (
id INTEGER PRIMARY KEY AUTOINCREMENT,
user_id INTEGER,
user_lang VARCHAR(2),
type VARCHAR(10),
kind VARCHAR(10),
lat FLOAT,
lng FLOAT,
desc TEXT,
inserted_at TEXT,
expires_at TEXT
);""")
await db.execute("SELECT AddGeometryColumn('geteilt', 'latlng', 4326, 'POINT', 'XY');")
await db.execute("SELECT CreateSpatialIndex('geteilt', 'latlng');")
await db.execute("""CREATE TABLE IF NOT EXISTS subscriptions (
id INTEGER PRIMARY KEY AUTOINCREMENT,
user_id INTEGER,
user_lang VARCHAR(2),
type VARCHAR(10),
kind VARCHAR(10),
lat FLOAT,
lng FLOAT,
distance INTEGER,
inserted_at TEXT
);""")
await db.execute("SELECT AddGeometryColumn('subscriptions', 'latlng', 4326, 'POINT', 'XY');")
await db.execute("SELECT CreateSpatialIndex('subscriptions', 'latlng');")
await db.commit() | subkultur/teilwas_bot | tw_db.py | tw_db.py | py | 6,545 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "datetime.datetime.now",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "aiosqlite.connect",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "aiosqlite.con... |
6848009196 | from django.shortcuts import render, redirect
from users.forms import CustomUserCreationForm, CustomUserChangeForm
from django.contrib.auth.decorators import login_required
from users.models import CustomUser
# Create your views here.
@login_required(login_url='/login/')
def home(request):
"""show users view"""
users = CustomUser.objects.all()
if request.method == 'POST':
form = CustomUserCreationForm(request.POST)
if form.is_valid():
data = form.cleaned_data
user = form.save()
return redirect('/users/home/')
else:
return render(request, 'users.html', {'form': form, 'users': users})
else:
form = CustomUserCreationForm()
return render(request, 'users.html', {'form': form, 'users': users})
@login_required(login_url='/login/')
def edit_user(request, id_user):
"""allow edit a user"""
user = CustomUser.objects.get(pk=id_user)
if request.method == 'GET':
# Get se usa para obtener datos y Post para enviar datos
form = CustomUserChangeForm(instance=user)
else:
# Aqui guardamos el cliente
form = CustomUserChangeForm(request.POST, instance=user)
if form.is_valid():
form.save()
return redirect('/users/home/')
return render(request, 'edit_user.html', {'form': form})
@login_required(login_url='/login/')
def view_user(request, id_user):
"""can view data user"""
user = CustomUser.objects.get(pk=id_user)
return render(request, 'view_user.html', {'user': user})
@login_required(login_url='/login/')
def delete_user(request, id_user):
"""delete a user"""
user = CustomUser.objects.get(pk=id_user)
user.delete()
return redirect('/users/home/') | sistematizaref/lecesse | lecesse/users/views.py | views.py | py | 1,771 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "users.forms",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "users.models.CustomUser.objects.all",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "users.models.CustomUser.objects",
"line_number": 11,
"usage_type": "attribute"
},
{
... |
13880556989 | #!/usr/bin/python3
# -*- coding:utf-8 -*-
import sys
import os
from configparser import ConfigParser
import time
from ctypes import *
import logging
import time
from PIL import Image,ImageDraw,ImageFont
import traceback
import calendar
import datetime
from datetime import date
from dateutil.parser import parse
from dateutil.rrule import rrule, DAILY
logging.basicConfig(level=logging.DEBUG)
calendar.setfirstweekday(firstweekday=0)
runningDir = os.path.dirname(os.path.realpath(__file__))
festivalsDays={}
holiDays={}
workDays={}
#panel config
panel_width = 800
panel_hight = 600
zone0_x = 0
zone0_y = 10
zone1_x = panel_width/3
zone1_y = 10
zone2_x = panel_width/3*2
zone2_y = 10
zone3_x = 0
zone3_y = panel_hight/2.68 + 10
zone4_x = panel_width/2
zone4_y = panel_hight/2.68 + 10
def DrawScreen(draw):
today=datetime.date.today()
Year = today.strftime('%Y')
Month = today.strftime('%m')
Day = today.strftime('%d')
currentMonth = int(Month)
currentYear = int(Year)
position0Month = currentMonth - 1
position0Year = currentYear
position2Month = currentMonth + 1
position2Year = currentYear
if(position0Month <= 0):
position0Month = position0Month + 12
position0Year = currentYear - 1
if position2Month > 12:
position2Month = position2Month - 12
position2Year = position2Year + 1
SetDayStatus(festivalsDays, holiDays, workDays, str(position0Year))
SetDayStatus(festivalsDays, holiDays, workDays, str(currentYear))
SetDayStatus(festivalsDays, holiDays, workDays, str(position2Year))
# draw.line((zone1_x, 0, zone1_x, zone3_y), fill = 0)
# draw.line((zone2_x, 0, zone2_x, zone3_y), fill = 0)
draw.line((0, zone3_y, panel_width, zone3_y), fill = 0)
draw.line((zone4_x, zone4_y, zone4_x, panel_hight), fill = 0)
DrawMonth(zone0_x, zone0_y, position0Year, position0Month)
DrawMonth(zone1_x, zone1_y, currentYear, currentMonth)
DrawMonth(zone2_x, zone2_y, position2Year, position2Month)
DrawBeginTask(zone3_x, zone3_y)
DrawDueTask(zone4_x,zone4_y)
def DrawBeginTask(original_x, original_y):
cfg = ConfigParser()
cfg.read(runningDir + '/tasks/task.ini')
row = 0
fontTitle = ImageFont.truetype(runningDir + '/pic/Font.ttc', 20)
fontTask = ImageFont.truetype(runningDir + '/pic/Font.ttc', 19)
fontTime = ImageFont.truetype(runningDir + '/pic/Font.ttc', 17)
draw.text((original_x + 162, original_y + 10), u'待办事项', font = fontTitle, fill = 0)
for f in cfg['BeginTasks']:
if row > 10:
draw.text((original_x + 12, original_y + 40 + row*28), "......", font = fontTask, fill = 0)
break
task = cfg.get(u'BeginTasks',f)
title = task.split('|')[0]
beginTime = task.split('|')[1]
dueTime = task.split('|')[2]
title = TrimString(title, 15)
draw.text((original_x + 4, original_y + 48 + row*28), title, font = fontTask, fill = 0)
if(beginTime != "null"):
beginTime = beginTime.split(' ')[2][0:5]
draw.text((original_x + 305, original_y + 48 + row*28), beginTime, font = fontTime, fill = 0)
if(dueTime != "null"):
dueTime = dueTime.split(' ')[2][0:5]
draw.text((original_x + 355, original_y + 48 + row*28), dueTime, font = fontTime, fill = 0)
row = row + 1
def DrawDueTask(original_x, original_y):
cfg = ConfigParser()
cfg.read(runningDir + '/tasks/task.ini')
row = 0
fontTitle = ImageFont.truetype(runningDir + '/pic/Font.ttc', 20)
fontTask = ImageFont.truetype(runningDir + '/pic/Font.ttc', 19)
fontTime = ImageFont.truetype(runningDir + '/pic/Font.ttc', 18)
draw.text((original_x + 162, original_y + 10), u'今日截止', font = fontTitle, fill = 0)
for f in cfg['DueTasks']:
if row > 10:
draw.text((original_x + 12, original_y + 40 + row*28), "......", font = fontTask, fill = 0)
break
task = cfg.get(u'DueTasks',f)
title = task.split('|')[0]
beginTime = task.split('|')[1]
dueTime = task.split('|')[2]
title = TrimString(title, 15)
draw.text((original_x + 4, original_y + 48 + row*28), title, font = fontTask, fill = 0)
if(beginTime != "null"):
beginTime = beginTime.split(' ')[2][0:5]
draw.text((original_x + 305, original_y + 48 + row*28), beginTime, font = fontTime, fill = 0)
if(dueTime != "null"):
dueTime = dueTime.split(' ')[2][0:5]
draw.text((original_x + 355, original_y + 48 + row*28), dueTime, font = fontTime, fill = 0)
row = row + 1
def TrimString(srcString, width):
TrimedString = ""
count = 0
for i in range(len(srcString)):
if len(srcString[i]) == len(srcString[i].encode('utf-8')):
count = count + 0.5
else:
count = count + 1
if count <= (width):
return srcString
else:
count = 0
for i in range(len(srcString)):
if len(srcString[i]) == len(srcString[i].encode('utf-8')):
count = count + 0.5
else:
count = count + 1
if count >= width:
break
TrimedString = TrimedString + srcString[i]
return TrimedString + "..."
def DrawMonth(original_x, original_y,year, month):
dateMatrix = calendar.monthcalendar(year, month)
fontTitle = ImageFont.truetype(runningDir + '/pic/Font.ttc', 18)
draw.text((original_x + 100, original_y), str(year)+u'年'+str(month)+u'月', font = fontTitle, fill = 0)
weekList = [u'一',u'二',u'三',u'四',u'五',u'六',u'日']
for i in range(0,7):
draw.text((original_x + 2 + i*36, original_y + 25), weekList[i], font = fontTitle, fill = 0)
for row in range(len(dateMatrix)):
for col in range(len(dateMatrix[row])):
DrawDate(dateMatrix, row, col,original_x,original_y,year,month)
def DrawDate(dateMatrix, row, col,original_x,original_y,year,month):
fontDate = ImageFont.truetype(runningDir + '/pic/Font.ttc', 16)
fontFestival = ImageFont.truetype(runningDir + '/pic/Font.ttc', 15)
today = datetime.date.today()
currentDay = 0
rowSpan = 34
colSpan = 36
monthValue = str(month)
dayValue = str(dateMatrix[row][col])
if month < 10:
monthValue = '0' + str(month)
if dateMatrix[row][col] < 10:
dayValue = '0' + str(dateMatrix[row][col])
keyValue = str(year) + monthValue + dayValue
if today.strftime('%Y%m%d') == keyValue :
currentDay = 1
draw.rectangle((original_x + 0 + col*colSpan, original_y + 55 + row*rowSpan, original_x + col*colSpan + 19, original_y + 72 + row*rowSpan), fill = 0)
else:
currentDay = 0
if dateMatrix[row][col] == 0:
return
if dateMatrix[row][col] < 10:
draw.text((original_x + 5 + col*colSpan, original_y + 55 + row*rowSpan), str(dateMatrix[row][col]), font = fontDate, fill = currentDay)
else:
draw.text((original_x + 1 + col*colSpan, original_y + 55 + row*rowSpan), str(dateMatrix[row][col]), font = fontDate, fill = currentDay)
#绘制节日名称
if (keyValue in festivalsDays.keys()):
draw.text((original_x + 20 + col*colSpan, original_y + 48 + row*rowSpan), festivalsDays[keyValue][0], font = fontFestival, fill = 0)
draw.text((original_x + 20 + col*colSpan, original_y + 64 + row*rowSpan), festivalsDays[keyValue][1], font = fontFestival, fill = 0)
#绘制放假标志
if (keyValue in holiDays.keys()):
draw.rectangle((original_x + 0 + col*colSpan, original_y + 55 + row*rowSpan, original_x + col*colSpan + 19, original_y + 72 + row*rowSpan), outline = 0)
#绘制上班标志
if (keyValue in workDays.keys()):
draw.arc((original_x + 1 + col*colSpan, original_y + 55 + row*rowSpan, original_x + col*colSpan + 19, original_y + 72 + row*rowSpan), 0, 360, fill = 0)
def SetDayStatus(festivalsDays,holidays, workDays, year):
cfg = ConfigParser()
cfg.read(runningDir + '/days/'+year+'.ini')
for f in cfg['节日日期']:
dates = cfg.get(u'节日日期',f)
festivalsDays[year+cfg.get(u'节日日期',f)] = f
for f in cfg['放假日期']:
dates = cfg.get(u'放假日期',f)
if '-' in dates:
start = int(dates.split('-')[0]) + int(year)*10000
end = int(dates.split('-')[1]) + int(year)*10000
for i in rrule(DAILY, dtstart=parse(str(start)), until=parse(str(end))):
holidays[i.date().strftime('%Y%m%d')] = f
else:
holidays[year+cfg.get(u'放假日期',f)] = f
for f in cfg['调休上班日期']:
dates = cfg.get(u'调休上班日期',f)
if dates != '':
dateList = dates.split(',')
for d in dateList:
workDays[year + d] = f
def TaskFileUpdated(LastSyncTime):
Cfg = ConfigParser()
Cfg.read(runningDir + '/tasks/task.ini')
if(not Cfg.has_option('Info','Last sync time')):
return 0
CfgSyncTime = Cfg.get('Info','Last sync time')
if(LastSyncTime[0] != CfgSyncTime):
LastSyncTime[0] = CfgSyncTime
return 1
else:
return 0
try:
logging.info("The daemon starts...")
LastSyncTime = ['']
while 1:
if(TaskFileUpdated(LastSyncTime)):
logging.info("Start to update...")
Himage = Image.new('1', (800, 600), 255) # 255: clear the frame
draw = ImageDraw.Draw(Himage)
DrawScreen(draw)
Himage.save(runningDir + '/pic/epd.bmp', 'bmp')
epdLib = cdll.LoadLibrary(runningDir + "/IT8951/epd.so")
epdLib.epd_6inch_init()
epdLib.Display_epd_bmp()
epdLib.edp_6inch_deinit()
logging.info("Finish...")
time.sleep(1)
except IOError as e:
logging.info(e)
except KeyboardInterrupt:
logging.info("ctrl + c:")
exit()
| f1ynng8/omnifocus-eink | raspberrypi/daemon.py | daemon.py | py | 10,089 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.basicConfig",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "calendar.setfirstweekday",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.pa... |
33161621406 | import csv
from scipy.interpolate import interp1d
from typing import Dict
def app() -> None:
# Separated govt bonds and c bonds because calculating benchmark yield spread
# checks 1 corporate bond against all govt bonds
f_name = input("csv name (with.csv): ")
g_bonds, c_bonds = load_csv(f_name)
print_benchmark_yield_spread(c_bonds, g_bonds)
print("\n")
print_interpolated_yield_spread(c_bonds, g_bonds)
assert len(c_bonds) > 0 and len(g_bonds) > 0
def load_csv(file_name: str) -> (Dict, Dict):
g_bonds = {}
c_bonds = {}
with open(file_name, mode='r') as bond_file:
csv_reader = csv.DictReader(bond_file)
for row in csv_reader:
bond_id, term, _yield, bond_type = row['bond'], row['term'], row['yield'], row['type']
term = float(term.split(' ')[0])
_yield = float(_yield[:-2])
bond_info = {'term': term, 'yield': _yield}
if bond_type == "government":
g_bonds[bond_id] = bond_info
else:
c_bonds[bond_id] = bond_info
return g_bonds, c_bonds
def print_benchmark_yield_spread(c_bonds, g_bonds) -> None:
"""
@c_bonds: Dict[str,[Dict[str, float]]
@g_bonds: Dict[str,[Dict[str, float]]
Prints the benchmark yield spread of all corporate bonds
"""
for cb_id, info in c_bonds.items():
bm_bond_id, yield_spread = calc_benchmark_yield_spread(cb_id, c_bonds, g_bonds)
print("bond,benchmark,spread_to_benchmark")
print(f"{cb_id},{bm_bond_id}, {yield_spread:.2f}%")
def print_interpolated_yield_spread(c_bonds, g_bonds) -> None:
"""
@c_bonds: Dict[str, Dict[str, float]]
@g_bonds: Dict[str, Dict[str, float]]
Prints the interpolated yield spread of all corporate bonds
"""
for cb_id, cb in c_bonds.items():
yield_spread_to_curve = calc_spread_to_curve(cb_id, c_bonds, g_bonds)
print("bond,spread_to_curve")
print(f"{cb_id}, {yield_spread_to_curve:.2f}%")
def calc_benchmark_yield_spread(cb_id, c_bonds, g_bonds) -> (str, float):
"""
@cb_id: str
@c_bonds: Dict[str, Dict[str, float]]
@g_bonds: Dict[str, Dict[str, float]]
Returns the yield spread and the corresponding govt benchmark bond for <c_bond>
"""
min_term_diff = float('inf')
bm_bond = ""
for gb in g_bonds:
term_diff = abs(c_bonds[cb_id]['term'] - g_bonds[gb]['term'])
if term_diff < min_term_diff:
min_term_diff = term_diff
bm_bond = gb
yield_spread = c_bonds[cb_id]['yield'] - g_bonds[bm_bond]['yield'] # Govt bm yield < corporate bond yield
return f"{bm_bond}", yield_spread
def calc_spread_to_curve(cb_id, c_bonds, g_bonds) -> float:
"""
@cb_id: str
@c_bonds: Dict[str, Dict[str, float]]
@g_bonds: Dict[str, Dict[str, float]]
Returns interpolated spread
prerequisite:
interpolation is always possible
Returns interpolated yield spread
"""
inter_func = interp1d([b_info['term'] for b_info in g_bonds.values()], [b_info['yield'] for b_info in g_bonds.values()])
interpolated_yield = inter_func(c_bonds[cb_id]['term'])
spread_to_curve = c_bonds[cb_id]['yield'] - interpolated_yield
return spread_to_curve
if __name__ == "__main__":
app()
| shermansjliu/overbond-dev-test-submission | app.py | app.py | py | 3,333 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "csv.DictReader",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "typing.Dict",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "scipy.interpolate.interp1d",
"line_number": 97,
"usage_type": "call"
}
] |
9817545855 | from PyQt5 import QtGui
from PyQt5.QtGui import QIcon
from PyQt5.QtWidgets import QWidget
import ResourceNavigator
from back.css.style import cssLoader
from back.osuLoader.AppBackend import AppBackendInit
from view.window.osuLoader.layout.OsuLoaderWindowLayout import OsuLoaderWindowLayout
class OsuLoaderWindow(QWidget):
osuLoaderWindowLayout = OsuLoaderWindowLayout
appBackend = AppBackendInit
def __init__(self):
super(OsuLoaderWindow, self).__init__()
print("Open OsuLoaderWindow")
self.initUI()
def initUI(self):
print("Init UI...")
self.setWindowTitle(ResourceNavigator.Variables.Strings.osuLoaderWindowName)
self.setWindowIcon(QIcon(ResourceNavigator.MaterialNavigator.icoLogo))
self.initFonts()
self.initStyles()
self.osuLoaderWindowLayout = OsuLoaderWindowLayout()
self.appBackend = AppBackendInit(self.osuLoaderWindowLayout)
self.setLayout(self.osuLoaderWindowLayout)
self.setMinimumSize(1280, 720)
self.appBackend.postInit()
self.show()
def initStyles(self):
print("Init styles...")
loader = cssLoader()
css = loader.getStyleSheet()
self.setStyleSheet(css)
def initFonts(self):
print("Init fonts...")
QtGui.QFontDatabase.addApplicationFont(ResourceNavigator.FontsNavigator.fontExoRegular)
QtGui.QFontDatabase.addApplicationFont(ResourceNavigator.FontsNavigator.fontExoThin)
QtGui.QFontDatabase.addApplicationFont(ResourceNavigator.FontsNavigator.fontExoBold)
| animousen4/osuLoader-2.0 | view/window/osuLoader/OsuLoaderWindow.py | OsuLoaderWindow.py | py | 1,590 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "PyQt5.QtWidgets.QWidget",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "view.window.osuLoader.layout.OsuLoaderWindowLayout.OsuLoaderWindowLayout",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "back.osuLoader.AppBackend.AppBackendInit",
"... |
20255004169 | from blog_api import api
from flask import json
def test_blog_post():
# Post method test for blog posting
# It includes data and check response out
response = api.test_client().post(
'/api/post',
data=json.dumps({'title': '1', 'body': '2', 'author': '3'}),
content_type='application/json',
)
assert response.status_code == 200
assert response.data == b'1 posted successfully!'
def test_comment_post():
# Post method test for commenting
# It includes data and check response out
response = api.test_client().post(
'/api/comment/0',
data=json.dumps({'body': '2', 'author': '3'}),
content_type='application/json',
)
assert response.status_code == 200
assert response.data == b'comment posted successfully!'
def test_blog_get():
# Get method test for retrieving blog posting data
# It gathers response and ensure data is valid
response = api.test_client().get(
'/api/post/0'
)
print(response.data)
assert response.status_code == 200
assert b'"title":"1"' in response.data
assert b'"body":"2"' in response.data
assert b'"author":"3"' in response.data
def test_comment_get():
# Get method test for retrieving comments
# It validates all answer fields
response = api.test_client().get(
'/api/comment/0'
)
print(response.data)
assert response.status_code == 200
assert b'"body":"2"' in response.data
assert b'"author":"3"' in response.data
if __name__ == '__main__':
test_blog_post()
test_comment_post()
test_blog_get()
test_comment_get() | hebertsonm/blog-assignment | pytest.py | pytest.py | py | 1,650 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "blog_api.api.test_client",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "blog_api.api",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "flask.json.dumps",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "flask.json",
... |
11920060681 | import random, hashlib
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from cryptography.x509.oid import NameOID
from playground.common import CipherUtil
from ..contants import PATH_PREFIX
class CertFactory(object):
def __init__(self):
super(CertFactory, self).__init__()
@classmethod
def getPrivateKeyForAddr(cls, addr):
pem_data = cls.getContent(PATH_PREFIX + 'r2d2.pem')
return serialization.load_pem_private_key(pem_data, password=None, backend=default_backend())
@classmethod
def getCertsForAddr(cls, addr):
chain = []
chain.append(cls.getContent(PATH_PREFIX + 'r2d2.crt'))
chain.append(cls.getContent(PATH_PREFIX + 'bb8.crt'))
chain.append(cls.getRootCert())
return chain
@classmethod
def getRootCert(cls):
return cls.getContent(PATH_PREFIX + 'root.crt')
@classmethod
def getPreKey(cls):
seed = random.randint(0, 2 ** 64).to_bytes(8, byteorder='big')
return hashlib.sha1(seed).digest()[:16]
@classmethod
def getPubkFromCert(cls, cert):
cert_object = x509.load_pem_x509_certificate(cert, default_backend())
# return cert_object.public_key().public_bytes(Encoding.PEM, PublicFormat.SubjectPublicKeyInfo)
return cert_object.public_key()
@classmethod
def getContent(cls, path):
content = b''
with open(path, 'rb') as fp:
content = fp.read()
if len(content) == 0:
raise ValueError('No Content!')
else:
return content
@classmethod
def GetCommonName(cls, certBytes):
cert = CipherUtil.getCertFromBytes(certBytes)
commonNameList = cert.subject.get_attributes_for_oid(NameOID.COMMON_NAME)
if len(commonNameList) != 1:
return None
commonNameAttr = commonNameList[0]
return commonNameAttr.value
| Pandafriendd/peep | src/factory/CertFactory.py | CertFactory.py | py | 1,996 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "contants.PATH_PREFIX",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "cryptography.hazmat.primitives.serialization.load_pem_private_key",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "cryptography.hazmat.primitives.serialization",
"line_n... |
36531588851 | # The Purpose of the app: track what movies the user has watched and the order
# in which the user has watched them.
import sys
import os
home = os.path.expanduser("~")
desktop_rel_path = '/Desktop/MOOC_work/udemy/complete_python_and_postgres_dev_course/section6_movie_system'
if home + desktop_rel_path not in sys.path:
sys.path.insert(0, home+desktop_rel_path)
#if os.getcwd() != home + desktop_rel_path:
# os.chdir(home + desktop_rel_path)
from user import User
import json # for importing json-like objects from file.
def file_exists(filename):
return os.path.isfile(filename)
def menu():
name = input("Enter your name: ")
filename = "{}.txt".format(name)
if file_exists(filename):
with open(filename, 'r') as f:
json_data = json.load(f)
user = User.from_json(json_data)
else:
user = User(name)
user_input = input("Enter 'a' to add a movie, 's' to see the list of movies,"
"'w' to set a movie as watched, 'd' to delete a movie, 'l' to see the list of watched movies,"
" 'sv' to save or 'q' to quit: ")
while user_input != 'q':
if user_input == 'a':
movie_name = input("Enter the movie name: ")
movie_genre = input("Enter the movie genre: ")
user.add_movie(movie_name, movie_genre)
elif user_input == 's':
for movie in user.movies:
print("Name: {} Genre: {} Watched: {}".format(movie.name, movie.genre, movie.watched))
elif user_input == 'w':
movie_name = input("Enter the movie name to set as watched: ")
user.set_watched(movie_name)
elif user_input == 'd':
movie_name = input("Enter the movie name to delete: ")
user.delete_movie(movie_name)
elif user_input == 'l':
for movie in user.watched_movies():
print("Name: {} Genre: {} Watched: {}".format(movie.name, movie.genre, movie.watched))
elif user_input == 'sv':
with open(filename, 'w') as f:
json.dump(user.json(), f)
user_input = input("Enter 'a' to add a movie, 's' to see the list of movies,"
"'w' to set a movie as watched, 'd' to delete a movie, 'l' to see the list of watched movies,"
" 'sv' to save or 'q' to quit: ") | BrandonHoeft/mooc-work | udemy/complete_python_and_postgres_dev_course/section6_movie_system/app.py | app.py | py | 2,405 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.expanduser",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "sys.path",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "sys.path.insert",
"lin... |
17164740168 | from sqlite3 import Error as sqliteError
from sqlite3 import OperationalError as sqliteOperationalError
from loggingSystem import LoggingSystem
import sqlite3,sys,os,time
from typing import Union
class ProcessamentoSqlite:
def __init__(self,sqlite_db="./initial_db.db",sql_file_pattern="scripts/sqlitePattern.sql", log_file="./processadorSQlite.log",level:int=10,logging_pattern='%(name)s - %(levelname)s - %(message)s',log_name:str="gerenciador sqlite",logstash_data:dict={},thread=False):
"""
classe para gerenciar arquivos sqlite
:param loggin_name: nome do log que foi definido para a classe,altere apenas em caso seja necessário criar multiplas insstancias da função
:param log_file: nome do arquivo de log que foi definido para a classe,altere apenas em caso seja necessário criar multiplas insstancias da função
"""
self.logging = LoggingSystem(name=log_name, arquivo=log_file,level=level,formato=logging_pattern,logstash_data=logstash_data)
self.create_temporary_DB(local=sqlite_db,pattern=sql_file_pattern)
if thread is True:
self.conn = sqlite3.connect(sqlite_db,check_same_thread=False)
else:
self.conn = sqlite3.connect(sqlite_db)
self.stack_overflow_max=5
def create_temporary_DB(self,local,pattern):
"""verifica a integridade do db caso ele n exista
Args:
local (path): local onde o bd sqlite está salvo
pattern (path): local onde o arquivo sql está salvo
"""
try:
if not os.path.isfile(local):
f = open(local, "a")
f.write("")
f.close()
conn = sqlite3.connect(local)
self.execute_sqlfile_sqlite(pattern,conn)
conn.close()
self.logging.info("bd gerado com sucesso")
else:
self.logging.info("bd já existe")
except sqliteOperationalError as e:
#print("erro operacional no sqlite")
self.logging.error(e)
quit()
except sqliteError as e:
#print("erro desconhecido no sqlite")
self.logging.error(e)
except :
self.logging.error("Unexpected error:", str(sys.exc_info()[0]))
def insert_data_sqlite(self,data:dict,table:str=""):
'''
INSERT INTO "operacoes"( "tipoOperacao", "nomeBD","adicionais","dados")
VALUES(1,"empregado","[(pessoas,pessoas_id,1),(lojas,loja_id)]","{salario:1200,contratado:30/12/20}")
'''
self.logging.info("insercao dado sqlite")
insert_command="INSERT INTO "
insert_command+="'"+table+"' ("
self.logging.debug(data)
for coluna in data.keys():
insert_command+=str(coluna)
if coluna != list(data.keys())[-1]:
insert_command+=","
insert_command+=") VALUES ("
for coluna in data.keys():
if type(data[coluna]) == type("") :
insert_command+='"'+data[coluna].replace("\n","")+'"'
elif type(data[coluna]) == type({}) or type(data[coluna]) == type([]):
for i in list(data[coluna]):
i.replace("\n","")
insert_command+='"'+str(data[coluna])+'"'
else:
insert_command+=str(data[coluna])
if coluna != list(data.keys())[-1]:
insert_command+=","
insert_command+=");"
self.logging.debug(insert_command)
try:
cursor = self.conn.cursor()
cursor.execute(insert_command)
self.conn.commit()
except sqliteOperationalError as e:
#print("erro operacional no sqlite")
self.logging.error(e)
time.sleep(0.001)
try:
chamadas=LoggingSystem.full_inspect_caller()
if chamadas.count(chamadas[0])>self.stack_overflow_max:
return None
except IndexError as e:
pass
except:
raise
self.insert_data_sqlite(data=data,table=table)
except sqliteError as e:
#print("erro desconhecido no sqlite")
self.logging.error(e)
except :
self.logging.error("Unexpected error:", sys.exc_info()[0])
def read_data_sqlite(self,table:str,filtro:Union[str,dict]="*",query:Union[str,dict]="*"):
self.logging.info("lendo sqlite")
read_command=""
read_command+="SELECT "
if filtro != "*":
read_command+="("
for key in filtro:
read_command+=key
if len(filtro)>1:
if key != filtro[-1]:
read_command+=","
read_command+=")"
else:
read_command+=filtro
read_command+=" FROM "
read_command+="'"+table+"'"
if query != "*":
read_command+=" WHERE "
for coluna in query.keys():
read_command+=str(coluna) + " IS "
if type(query[coluna])==type(""):
read_command+="'"+query[coluna]+"'"
else:
read_command+=str(query[coluna])
if coluna != list(query.keys())[-1]:
read_command+=" AND "
read_command+=";"
try:
cursor = self.conn.cursor()
self.logging.info(read_command)
cursor.execute(read_command)
self.conn.commit()
saida=cursor.fetchall()
return saida
except sqliteOperationalError as e:
#print("erro operacional no sqlite")
self.logging.error(e)
return self.read_data_sqlite(table,filtro,query)
except sqliteError as e:
#print("erro desconhecido no sqlite")
self.logging.error(e)
except :
self.logging.error("Unexpected error:", sys.exc_info()[0])
def execute_sqlfile_sqlite(self,pattern:dict,conn=None):
self.logging.info("executar arquivo sql em sqlite")
try:
if conn != None:
cursor = conn.cursor()
else:
cursor=self.conn.cursor()
sqlfile=open(pattern).read().split(";\n")
for sqlstatement in sqlfile:
if sqlstatement[-1] != ";":
sqlstatement+=";"
self.logging.debug(sqlstatement)
cursor.execute(sqlstatement)
conn.commit()
except sqliteOperationalError as e:
#print("erro operacional no sqlite")
self.logging.error(e)
return self.execute_sqlfile_sqlite(pattern,conn)
except sqliteError as e:
#print("erro desconhecido no sqlite")
self.logging.error(e)
except :
self.logging.error("Unexpected error:", sys.exc_info()[0])
def dict_all_string(self,entrada:dict)-> dict:
retorno={}
for i in entrada.keys():
if type(i)==type(""):
retorno[i]=entrada[i]
else:
retorno[i]=str(entrada[i]) | mzramna/algoritimo-de-testes-de-benchmark-de-bancos-de-dados | scripts/processamentoSqlite.py | processamentoSqlite.py | py | 7,284 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "loggingSystem.LoggingSystem",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path.i... |
5234262149 | from .models import User
from django.conf import settings
from django.templatetags import static
from rest_framework import serializers
class UserSerializer(serializers.ModelSerializer):
avatar = serializers.SerializerMethodField()
class Meta:
model = User
fields = (
'username',
'email',
'birthday',
'country',
'avatar',
'first_name',
'last_name',
'reviews_number',
'is_subscribed',
'user_permissions_category'
)
def create(self, validated_data):
return User.objects.create_user(**validated_data)
def get_avatar(self, obj):
url = obj.avatar.url if obj.avatar else static(settings.BLANK_PHOTO)
request = self.context.get("request", None)
if request is not None:
return request.build_absolute_uri(url)
return url
| Zomba4okk/MyMovies | backend/apps/users/serializers.py | serializers.py | py | 930 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "rest_framework.serializers.ModelSerializer",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.serializers",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "rest_framework.serializers.SerializerMethodField",
"line_number": 9,... |
33214014538 | import torch
import torch.nn.functional as F
from torch.autograd import Variable
import torch.nn as nn
# Credict: https://github.com/kefirski/pytorch_Highway
class Highway(nn.Module):
def __init__(self, size, num_layers, f):
super(Highway, self).__init__()
self.num_layers = num_layers
self.nonlinear = nn.ModuleList([nn.Linear(size, size) for _ in range(num_layers)])
self.linear = nn.ModuleList([nn.Linear(size, size) for _ in range(num_layers)])
self.gate = nn.ModuleList([nn.Linear(size, size) for _ in range(num_layers)])
self.f = f
def forward(self, x):
for layer in range(self.num_layers):
gate = F.sigmoid(self.gate[layer](x))
nonlinear = self.f(self.nonlinear[layer](x))
linear = self.linear[layer](x)
x = gate * nonlinear + (1 - gate) * linear
return x
class Model(nn.Module):
def __init__(self, weeks, time_slots, features, drop_out, device=None):
super(Model, self).__init__()
self.device = device
self.weeks = weeks
self.time_slots = time_slots
self.time_slot_GRU_1 = nn.GRU(features, 256, 1, batch_first=True, bidirectional=True)
self.time_slot_Linear_1 = nn.Linear(256*2, 256)
self.norm_1 = torch.nn.BatchNorm1d(time_slots)
self.time_slot_Linear_3 = nn.Linear(256, 256)
self.time_slot_MaxPool = nn.MaxPool1d(256)
self.last_two_GRU_1 = nn.GRU(time_slots, 256, 1, batch_first=True, bidirectional=True)
self.last_two_Linear_1 = nn.Linear(256*2, 256)
self.week_GRU_1 = nn.GRU(time_slots, 256, 1, batch_first=True, bidirectional=True)
self.week_linear_1 = nn.Linear(256*2, 256)
self.norm_3 = torch.nn.BatchNorm1d(weeks)
self.week_GRU_2 = nn.GRU(256, 256, 1, batch_first=True, bidirectional=True)
self.MaxPool = nn.MaxPool1d(256*2)
self.norm_4 = torch.nn.BatchNorm1d(34)
self.highway_1 = Highway(34, 3, f=torch.nn.functional.relu)
self.linear_5 = torch.nn.Linear(34, 28)
self.drop = nn.Dropout(drop_out)
def forward(self, x):
x = x.permute(1, 0, 2, 3)
collect = torch.Tensor().type(torch.FloatTensor).to(self.device)
for idx in x:
layer_1, _ = self.time_slot_GRU_1(idx)
layer_1 = self.time_slot_Linear_1(self.drop(layer_1.contiguous().view(-1, layer_1.size(2))))
layer_1 = F.selu(self.norm_1(layer_1.view(-1, idx.size(1), 256)))
layer_2 = self.time_slot_Linear_3(self.drop(layer_1))
layer_2 = self.time_slot_MaxPool(layer_2).squeeze(2)
collect = torch.cat((collect, layer_2.unsqueeze(0)), dim=0)
last_two = collect[collect.size(0)-2:]
collect = collect.permute(1, 0, 2)
last_two, _ = self.last_two_GRU_1(last_two)
last_two = self.last_two_Linear_1(last_two.contiguous().view(-1, last_two.size(2)))
last_two = last_two.view(-1, 2, 256)
layer_3, _ = self.week_GRU_1(collect)
layer_3 = self.week_linear_1(layer_3.contiguous().view(-1, layer_3.size(2)))
layer_3 = F.selu(self.norm_3(layer_3.view(-1, collect.size(1), 256)))
layer_4 = (torch.cat((layer_3, last_two), dim=1))
layer_4 = self.MaxPool(layer_4).squeeze(2)
x = self.norm_4(layer_4)
x = self.drop(x)
x = self.highway_1(x)
x = self.drop(x)
x = self.linear_5(x)
return x | dwaydwaydway/KKStream-Deep-Learning-Workshop | Model.py | Model.py | py | 3,619 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "torch.nn.ModuleList",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_... |
15974646573 | # *-* coding: utf-8 *-*
"""
Created on dim 07 fév 2021 09:27:34 UTC
@author: vekemans
"""
import time
import math as mt
import numpy as np
import scipy.sparse as spr
import matplotlib.pyplot as plt
nfig = 1
pi = mt.pi
# -----------------------------------------------
# Orginal Signal
N = np.power(2,12)
h = 2*pi / N
x = -pi + np.arange(1,N+1)*h
u = np.exp(np.sin(x))
uprime = np.cos(x) * u
plt.figure(nfig)
plt.plot(x,u, 'k--', label=r"$u$")
plt.plot(x,uprime, 'k', label=r"$u'$")
plt.title("Function $e^{sin(x)}$ and its derivative")
plt.legend()
plt.grid()
nfig += 1
# -----------------------------------------------
Nvec = np.power(2, np.arange(3,13))
timevec = np.zeros(Nvec.shape)
plt.figure(nfig)
for (i,N) in enumerate(Nvec):
t = time.time()
h = 2*pi / N
x = -pi + np.arange(1,N+1)*h
u = np.exp(np.sin(x))
uprime = np.cos(x)*u
# Construct the differentiation matrix
e = np.ones(N)
row = np.arange(0,N)
col1 = np.append(np.arange(1,N), [0])
col2 = np.append(np.arange(2,N), [0,1])
D = spr.csr_matrix((2.*e/3., (row,col1)), shape=(N,N)) \
- spr.csr_matrix((e/12., (row,col2)), shape=(N,N))
D = (D - D.transpose())/h
# Plot max(abs(D*u - uprime))
error = max(np.abs(D*u - uprime))
plt.loglog(N, error, 'ko')
timevec[i] = time.time() - t
plt.loglog(Nvec, np.float64(Nvec)**(-4), 'k--')
plt.text(100, 1e-10, '$N^{-4}$', fontsize=14)
plt.title("Convergence of fourth-order finite differences")
plt.xlabel('$N$')
plt.ylabel('Error')
plt.grid(which='both', linestyle='--', linewidth=.5)
nfig += 1
# plt.figure(nfig)
# plt.loglog(Nvec, timevec, 'ko')
# plt.title(" ")
# plt.xlabel("$N$")
# plt.ylabel("Time [s]")
# plt.grid()
# nfig += 1
# -----------------------------------------------
plt.show()
| abbarn/lmeca2300 | homeworks/p1.py | p1.py | py | 1,733 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "math.pi",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "numpy.power",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "numpy.exp",
"line_number": ... |
12522230634 | from hmac import new
import streamlit as st
import pandas as pd
from google.cloud import bigquery
import pandas_gbq
from datetime import datetime, timedelta
import yagmail
import os
st.set_page_config(layout="wide")
days_mapping = {
'Monday': 'lunedì',
'Tuesday': 'martedì',
'Wednesday': 'mercoledì',
'Thursday': 'giovedì',
'Friday': 'venerdì',
'Saturday': 'sabato',
'Sunday': 'domenica'
}
project_id = "bigquery-to-streamlit"
dataset_id = "turni_babbuz"
table_id = "df"
st.title("Turni Babbuz")
st.markdown("Cliccate due volte sulla cella in corrispondenza del giorno scelto e scrivete il vostro nome. \nUna volta terminato, cliccate su **'Salva'** e attendente qualche secondo per il messaggio di conferma del salvataggio, in verde.")
# SQL query to select data from the table and order by date
sql = f"""
SELECT *
FROM `{dataset_id}.{table_id}`
ORDER BY data
"""
# Load data into a DataFrame
df = pandas_gbq.read_gbq(sql, project_id=project_id)
# First, ensure the 'data' column is of datetime type for comparison
df['data'] = pd.to_datetime(df['data'], format='%d/%m/%Y')
df = df.sort_values(by='data').reset_index(drop=True)
# Get today's date
today = datetime.now().date()
# Initialize a counter for deleted rows
deleted_rows = 0
# Check if the dataframe's first row is before today's date
while df['data'].iloc[0].date() < today:
# Remove the first row
df = df.iloc[1:].reset_index(drop=True)
deleted_rows += 1
# Add new rows equal to the number of deleted rows
for i in range(deleted_rows):
new_date = df['data'].iloc[-1].date() + timedelta(days=1)
# If new_date is not already in the dataframe
if not (df['data'].dt.date == new_date).any():
new_day_name = days_mapping[new_date.strftime('%A')]
new_row = {
'data': new_date.strftime('%d/%m/%Y'), # Convert date to string in 'dd/mm/yyyy' format
'giorno': new_day_name,
'notte': None,
'informazioni': None
}
new_row_df = pd.DataFrame([new_row])
# Append the new_row to the dataframe
df = pd.concat([df, new_row_df], ignore_index=True)
# Update the last date for the next iteration
df['data'] = pd.to_datetime(df['data'], format='%d/%m/%Y')
df['data'] = df['data'].dt.strftime('%d/%m/%Y')
df = df.set_index("data")
def color_rows(row):
if not row['notte']:
color = '#ffcccc' # Light red
# elif row['giorno'] in ['sabato', 'domenica']:
# color = '#e6ffe6'
else:
color = ''
return ['background-color: {}'.format(color) for _ in row]
df = df.style.apply(color_rows, axis=1)
df = st.data_editor(df,use_container_width=True, disabled=("data","giorno"),key="data_editor")
# df = st.data_editor(
# df,
# use_container_width=True,
# disabled=("data","giorno"),
# column_config={
# "notte": st.column_config.SelectboxColumn(options=["...","Mamma","Nemi","Marta","Reby","Raky","Fili"," Sandi","Shad","Alex","DaniP","DaniF","DaniD"]),
# "casa": st.column_config.SelectboxColumn(options=["...","nemi","rebi","sandi","marta"])
# },
# hide_index=True,
# key="data_editor"
# )
val = st.session_state["data_editor"]
if st.button('Salva',type="primary"):
df.to_gbq('{}.{}'.format(dataset_id, table_id), project_id, if_exists='replace')
st.success("Salvataggio riuscito!")
sender_email = os.environ.get('SENDER_EMAIL')
sender_password = os.environ.get('SENDER_PASSWORD')
receiver_email = os.environ.get('RECEIVER_EMAIL')
user = yagmail.SMTP(user=sender_email, password=sender_password)
user.send(to=receiver_email, subject="Modifica Turni Babbuz", contents=f"Modifica effettuata e salvata. Ecco cosa: {val}")
| davins90/editable_table | prod/app.py | app.py | py | 3,761 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "streamlit.set_page_config",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "streamlit.title",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "streamlit.markdown",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "pandas_g... |
19090035035 | from datetime import datetime
from ueaglider.data.db_session import create_session
from ueaglider.data.db_classes import Pins, Audit, Missions, Targets, Gliders, Dives, ArgosTags
from ueaglider.services.argos_service import tag_info
from ueaglider.services.glider_service import glider_info
from ueaglider.services.user_service import find_user_by_id
def edit_waypoint_info(waypoint_id, info_txt):
if not waypoint_id:
return None
session = create_session()
waypoint = session.query(Pins).filter(Pins.WaypointsID == waypoint_id).first()
waypoint.Info = info_txt
session.commit()
session.close()
return
def create_waypoint(missionid, name, lat, lon, info):
session = create_session()
waypoint = Pins()
waypoint.MissionID = missionid
waypoint.Name = name
waypoint.Latitude = lat
waypoint.Longitude = lon
waypoint.Info = info
session.add(waypoint)
session.commit()
session.close()
return waypoint
def delete_pin(pin_id):
session = create_session()
pin = session.query(Pins) \
.filter(Pins.WaypointsID == pin_id) \
.first()
session.delete(pin)
session.commit()
session.close()
return pin
def create_target(missionid, name, lat, lon, radius, goto):
session = create_session()
target = Targets()
target.MissionID = missionid
target.Name = name
target.Latitude = lat
target.Longitude = lon
target.Radius = radius
target.Goto = goto
session.add(target)
session.commit()
session.close()
return target
def delete_target(target_id):
session = create_session()
target = session.query(Targets) \
.filter(Targets.TargetsID == target_id) \
.first()
session.delete(target)
session.commit()
session.close()
return target
def delete_glider(glider_num):
session = create_session()
target = session.query(Gliders) \
.filter(Gliders.Number == glider_num) \
.first()
session.delete(target)
session.commit()
session.close()
return
def create_mission(number, name, start, end, info):
mission = Missions()
mission.Number = number
mission.Name = name
mission.StartDate = start
mission.EndDate = end
mission.Info = info
session = create_session()
session.add(mission)
session.commit()
session.close()
return mission
def delete_mission(mission_id):
session = create_session()
mission = session.query(Missions) \
.filter(Missions.Number == mission_id) \
.first()
session.delete(mission)
session.commit()
return mission
def delete_dive(dive_id):
session = create_session()
dive = session.query(Dives) \
.filter(Dives.DiveInfoID == dive_id) \
.first()
session.delete(dive)
session.commit()
return dive
def delete_multiple_dives(glider_id):
dive = None
session = create_session()
dives = session.query(Dives) \
.filter(Dives.MissionID == 1)\
.filter(Dives.GliderID == glider_id) \
.all()
for dive in dives:
session.delete(dive)
session.commit()
return dive
def create_glider(number, name, info, mission_id, ueaglider):
glider = Gliders()
glider.Number = number
glider.Name = name
glider.Info = info
glider.MissionID = mission_id
glider.UEAGlider = ueaglider
session = create_session()
session.add(glider)
session.commit()
session.close()
return glider
def assign_glider(number, mission_id):
glider, __ = glider_info(number)
glider.MissionID = mission_id
session = create_session()
session.add(glider)
session.commit()
session.close()
return glider
def create_tag(number, mission_id, glider_id):
tag = ArgosTags()
tag.TagNumber = number
tag.MissionID = mission_id
tag.GliderID = glider_id
session = create_session()
session.add(tag)
session.commit()
session.close()
return tag
def assign_tag(number, mission_id, glider_id):
tag= tag_info(number)
tag.MissionID = mission_id
tag.GliderID = glider_id
session = create_session()
session.add(tag)
session.commit()
session.close()
return tag
def delete_tag(tag_num):
session = create_session()
target = session.query(ArgosTags) \
.filter(ArgosTags.TagNumber == tag_num) \
.first()
session.delete(target)
session.commit()
session.close()
return
def audit_entry(user_id: int, message: str):
user = find_user_by_id(user_id)
if not user:
return None
audit = Audit()
audit.UserID = user_id
audit.Date = datetime.now()
audit.Info = message
session = create_session()
session.add(audit)
session.commit()
session.close()
return audit
| ueaglider/ueaglider-web | ueaglider/services/db_edits.py | db_edits.py | py | 4,822 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "ueaglider.data.db_session.create_session",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "ueaglider.data.db_classes.Pins",
"line_number": 14,
"usage_type": "argument"
},
{
"api_name": "ueaglider.data.db_classes.Pins.WaypointsID",
"line_number": 14,
... |
8000518173 | import numpy as np
from numpy import linalg
from scipy import sparse
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
def read_dataset(path: str):
with open(path, 'r') as f:
lines = f.readlines()
x = np.zeros([len(lines), 123])
y = np.zeros([len(lines)])
for (i, line) in enumerate(lines):
y[i] = 0.0 if line[0] == '-' else 1.0
features = line.strip().split()[1:]
for feature in features:
index = int(feature.split(':')[0]) - 1
assert 0 <= index < 123
x[i, index] = 1.0
return x.T, y
def predict(X, y, w):
mu = 1.0 / (np.exp(-w.dot(X)) + 1.0) - 0.5
labels = mu >= 0
return np.sum(labels == y) / y.shape[0]
def IRLS(X, y, tX, ty, lam=0.001, threshold=0.00001, max_iter=20):
train_acc, test_acc = [], []
# X: [d, n], y: [n]
d, n = X.shape
# w: [d]
w = np.ones(d)
count = 0
while True:
# mu: [n], > 0
mu = 1.0 / (np.exp(-w.dot(X)) + 1.0)
# A, A_inv: [n, n]
s = mu * (1 - mu) + 0.01
A = sparse.diags(s)
A_inv = sparse.diags(1 / s)
# z:
z = X.T.dot(w) - A_inv.dot(mu - y)
# XAX^\intercal + \lambda I
M = (A.dot(X.T)).T.dot(X.T) + lam * np.eye(d)
M_inv = linalg.inv(M)
# wt: [d]
wt = A.dot(M_inv.dot(X).T).T.dot(z)
# delta and update
delta = np.sum(np.abs(w - wt)) / np.sum(np.abs(w))
w = wt
train_acc.append(predict(X, y, w))
test_acc.append(predict(tX, ty, w))
count += 1
if delta < threshold or count == max_iter:
break
return w, train_acc, test_acc
# Read datasets
train_x, train_y = read_dataset('a9a/a9a')
test_x, test_y = read_dataset('a9a/a9a.t')
# Test different lambdas and draw
fig, (ax1, ax2) = plt.subplots(2, 1)
ax1.set_xticks(range(1, 21))
ax2.set_xticks(range(1, 21))
for lam in [0, 0.1, 1, 10, 100, 1000]:
print('Testing \\lambda={} ...'.format(lam))
_, train_acc, test_acc = IRLS(train_x, train_y, test_x, test_y, lam=lam)
print(train_acc, test_acc)
int_axis = [i for i in range(1, len(train_acc) + 1)]
ax1.plot(int_axis, train_acc, label='$\\lambda={}$'.format(lam))
ax2.plot(int_axis, test_acc)
fig.legend()
fig.savefig('train.pdf')
| LyricZhao/IRLS | main.py | main.py | py | 2,358 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.zeros",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.exp",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 26,
... |
39366464180 | import numpy as np
import tensorflow as tf
import datetime
n = 10
A = np.random.rand(10000, 10000).astype('float32')
B = np.random.rand(10000, 10000).astype('float32')
c1 = []
c2 = []
def matpow(M, n):
if n == 1:
return M
else:
return tf.matmul(M, matpow(M, n-1))
with tf.device('/gpu:0'):
a = tf.placeholder(tf.float32, [10000, 10000])
b = tf.placeholder(tf.float32, [10000, 10000])
c1.append(matpow(a, n))
c1.append(matpow(b, n))
with tf.device('/cpu:0'):
sum = tf.add_n(c1) #Addition of all elements in c1, i.e. A^n + B^n
t1_1 = datetime.datetime.now()
#sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=True))
"""
with tf.Session(config=tf.ConfigProto\
(log_device_placement=log_device_placement)) as sess:
sess.run(sum, {a:A, b:B})
"""
with tf.Session(config=tf.ConfigProto\
(allow_soft_placement=True,\
log_device_placement=True)) as sess:
sess.run(sum, {a:A, b:B})
t2_1 = datetime.datetime.now()
print("GPU computation time: " + str(t2_1-t1_1)) | PacktPublishing/Deep-Learning-with-TensorFlow-Second-Edition | Chapter07/gpu/gpu_example.py | gpu_example.py | py | 1,141 | python | en | code | 48 | github-code | 36 | [
{
"api_name": "numpy.random.rand",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.rand",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "numpy.random",
... |
73049130343 | from collections import UserDict
from datetime import datetime
from datetime import timedelta
class CacheDict(UserDict):
times = {}
def __init__(self, dict={}, keytime=60, **kwargs):
super().__init__(dict, **kwargs)
self.keytime = keytime
def __getitem__(self, key):
if not self.times.get(key):
raise KeyError(key)
elapsed_time = datetime.now() - self.times.get(key)
if elapsed_time.seconds <= 60:
return super().__getitem__(key)
else:
self.data.pop(key)
raise KeyError(key)
def __setitem__(self, key, item):
self.times[key] = datetime.now()
return super().__setitem__(key, item)
| desk467/moto | moto/cache.py | cache.py | py | 718 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "collections.UserDict",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "datetime.... |
33520209237 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
' hipnuc protocol module '
import threading
import struct
import datetime
import time
class HipnucFrame_Exception(Exception):
def __init__(self,err='HI221GW Frame Error'):
Exception.__init__(self,err)
class HipnucFrame_NoValid_Exception(HipnucFrame_Exception):
def __init__(self,err='No valid frame received'):
Exception.__init__(self,err)
class HipnucFrame_NotCompleted_Exception(HipnucFrame_Exception):
def __init__(self,err='No full frame received'):
Exception.__init__(self,err)
class HipnucFrame_ErrorFrame_Exception(HipnucFrame_Exception):
def __init__(self,err='Error frame'):
Exception.__init__(self,err)
def _parse_data_packet_0x90(data_section:list,node_num = None):
module_id = {
"id": data_section[0],
}
return module_id
def _parse_data_packet_0xD1(data_section:list,node_num = None):
quaternion_list = []
# for pos in range(node_num):
pos = 0
t_pos = pos * 16
W = float(struct.unpack("<f", bytes(data_section[t_pos:t_pos + 4]))[0])
t_pos += 4
X = float(struct.unpack("<f", bytes(data_section[t_pos:t_pos + 4]))[0])
t_pos += 4
Y = float(struct.unpack("<f", bytes(data_section[t_pos:t_pos + 4]))[0])
t_pos += 4
Z = float(struct.unpack("<f", bytes(data_section[t_pos:t_pos + 4]))[0])
temp_dic = {
"W":round(W,3),
"X":round(X,3),
"Y":round(Y,3),
"Z":round(Z,3)
}
quaternion_list.append(temp_dic)
quaternion = {
"quat":quaternion_list
}
return quaternion
def _parse_data_packet_0xA0(data_section:list,node_num = None):
acc_list = []
# for pos in range(node_num):
pos = 0
t_pos = pos * 6
X = int(struct.unpack("<h", bytes(data_section[t_pos:t_pos + 2]))[0])
t_pos += 2
Y = int(struct.unpack("<h", bytes(data_section[t_pos:t_pos + 2]))[0])
t_pos += 2
Z = int(struct.unpack("<h", bytes(data_section[t_pos:t_pos + 2]))[0])
temp_dic = {
"X":round(X,3),
"Y":round(Y,3),
"Z":round(Z,3)
}
acc_list.append(temp_dic)
acc = {
"acc":acc_list
}
return acc
def _parse_data_packet_0xB0(data_section:list,node_num = None):
gyr_list = []
# for pos in range(node_num):
pos = 0
t_pos = pos * 6
X = int(struct.unpack("<h", bytes(data_section[t_pos:t_pos + 2]))[0])
t_pos += 2
Y = int(struct.unpack("<h", bytes(data_section[t_pos:t_pos + 2]))[0])
t_pos += 2
Z = int(struct.unpack("<h", bytes(data_section[t_pos:t_pos + 2]))[0])
temp_dic = {
"X":round(X,3),
"Y":round(Y,3),
"Z":round(Z,3)
}
gyr_list.append(temp_dic)
gyr = {
"gyr":gyr_list
}
return gyr
def _parse_data_packet_0xC0(data_section:list,node_num = None):
mag_list = []
# for pos in range(node_num):
pos = 0
t_pos = pos * 6
X = int(struct.unpack("<h", bytes(data_section[t_pos:t_pos + 2]))[0])
t_pos += 2
Y = int(struct.unpack("<h", bytes(data_section[t_pos:t_pos + 2]))[0])
t_pos += 2
Z = int(struct.unpack("<h", bytes(data_section[t_pos:t_pos + 2]))[0])
temp_dic = {
"X":round(X,3),
"Y":round(Y,3),
"Z":round(Z,3)
}
mag_list.append(temp_dic)
mag = {
"mag":mag_list
}
return mag
def _parse_data_packet_0xD0(data_section:list,node_num = None):
eul_list = []
# for pos in range(node_num):
pos = 0
t_pos = pos * 6
Pitch = struct.unpack("<h", bytes(data_section[t_pos:t_pos + 2]))[0]
Pitch = Pitch/100
t_pos += 2
Roll = struct.unpack("<h", bytes(data_section[t_pos:t_pos + 2]))[0]
Roll = Roll/100
t_pos += 2
Yaw = struct.unpack("<h", bytes(data_section[t_pos:t_pos + 2]))[0]
Yaw = Yaw/10
temp_dic = {
"Pitch":round(Pitch,2),
"Roll":round(Roll,2),
"Yaw":round(Yaw,2)
}
eul_list.append(temp_dic)
euler = {
"euler":eul_list
}
return euler
def _parse_data_packet_0x91(data_section:list,node_num = None):
pos = 0
id_temp_list = []
timestamp_temp_list = []
acc_temp_list = []
gyr_temp_list = []
mag_temp_list = []
eul_temp_list = []
quat_temp_list = []
# id
id = data_section[pos]
id_dic = {
"":id
}
id_temp_list.append(id_dic)
pos += 1
#reserved
pos += 6
#timestamp
timestamp = int(struct.unpack("<I", bytes(data_section[pos:pos + 4]))[0])
timestamp_dic = {
"(s)":round(timestamp/1000,3)
}
timestamp_temp_list.append(timestamp_dic)
pos += 4
#acc
acc_X = float(struct.unpack("<f", bytes(data_section[pos:pos + 4]))[0])
pos += 4
acc_Y = float(struct.unpack("<f", bytes(data_section[pos:pos + 4]))[0])
pos += 4
acc_Z = float(struct.unpack("<f", bytes(data_section[pos:pos + 4]))[0])
pos += 4
acc_dic = {
"X":round(acc_X,3),
"Y":round(acc_Y,3),
"Z":round(acc_Z,3)
}
acc_temp_list.append(acc_dic)
#gyr
gyr_X = float(struct.unpack("<f", bytes(data_section[pos:pos + 4]))[0])
pos += 4
gyr_Y = float(struct.unpack("<f", bytes(data_section[pos:pos + 4]))[0])
pos += 4
gyr_Z = float(struct.unpack("<f", bytes(data_section[pos:pos + 4]))[0])
pos += 4
gyr_dic = {
"X": round(gyr_X,3),
"Y": round(gyr_Y,3),
"Z": round(gyr_Z,3)
}
gyr_temp_list.append(gyr_dic)
#mag
mag_X = float(struct.unpack("<f", bytes(data_section[pos:pos + 4]))[0])
pos += 4
mag_Y = float(struct.unpack("<f", bytes(data_section[pos:pos + 4]))[0])
pos += 4
mag_Z = float(struct.unpack("<f", bytes(data_section[pos:pos + 4]))[0])
pos += 4
mag_dic = {
"X": round(mag_X),
"Y": round(mag_Y),
"Z": round(mag_Z)
}
mag_temp_list.append(mag_dic)
#eul
eul_Roll = float(struct.unpack("<f", bytes(data_section[pos:pos + 4]))[0])
pos += 4
eul_Pitch = float(struct.unpack("<f", bytes(data_section[pos:pos + 4]))[0])
pos += 4
eul_Yaw = float(struct.unpack("<f", bytes(data_section[pos:pos + 4]))[0])
pos += 4
eul_dic = {
"Roll": round(eul_Roll, 2),
"Pitch": round(eul_Pitch, 2),
"Yaw": round(eul_Yaw, 2)
}
eul_temp_list.append(eul_dic)
#quat
quat_W = float(struct.unpack("<f", bytes(data_section[pos:pos + 4]))[0])
pos += 4
quat_X = float(struct.unpack("<f", bytes(data_section[pos:pos + 4]))[0])
pos += 4
quat_Y = float(struct.unpack("<f", bytes(data_section[pos:pos + 4]))[0])
pos += 4
quat_Z = float(struct.unpack("<f", bytes(data_section[pos:pos + 4]))[0])
pos += 4
quat_dic = {
"W": round(quat_W,3),
"X": round(quat_X,3),
"Y": round(quat_Y,3),
"Z": round(quat_Z,3),
}
quat_temp_list.append(quat_dic)
temp_dic_list = {
"id":id_temp_list,
"timestamp":timestamp_temp_list,
"acc":acc_temp_list,
"gyr":gyr_temp_list,
"mag":mag_temp_list,
"euler":eul_temp_list,
"quat":quat_temp_list
}
return temp_dic_list
def _parse_data_packet_0x93(data_section:list,node_num = None):
pos = 0
id_temp_list = []
timestamp_temp_list = []
acc_temp_list = []
gyr_temp_list = []
# id
id = data_section[pos]
id_temp_list.append(id)
pos += 1
#reserved
pos += 6
#timestamp
timestamp = int(struct.unpack("<I", bytes(data_section[pos:pos + 4]))[0])
timestamp_temp_list.append(timestamp)
pos += 4
#acc
acc_X = int(struct.unpack("<h", bytes(data_section[pos:pos + 2]))[0])
pos += 2
acc_Y = int(struct.unpack("<h", bytes(data_section[pos:pos + 2]))[0])
pos += 2
acc_Z = int(struct.unpack("<h", bytes(data_section[pos:pos + 2]))[0])
pos += 2
acc_dic = {
"X":acc_X,
"Y":acc_Y,
"Z":acc_Z
}
acc_temp_list.append(acc_dic)
#gyr
gyr_X = int(struct.unpack("<h", bytes(data_section[pos:pos + 2]))[0])
pos += 2
gyr_Y = int(struct.unpack("<h", bytes(data_section[pos:pos + 2]))[0])
pos += 2
gyr_Z = int(struct.unpack("<h", bytes(data_section[pos:pos + 2]))[0])
pos += 2
gyr_dic = {
"X": gyr_X,
"Y": gyr_Y,
"Z": gyr_Z
}
gyr_temp_list.append(gyr_dic)
temp_dic = {
"id":id_temp_list,
"timestamp":timestamp_temp_list,
"acc":acc_temp_list,
"gyr":gyr_temp_list,
}
return temp_dic
rel_node_num = 0
module_node_num = 0
def _parse_data_packet_0x62(data_section:list,node_num = None):
global rel_node_num
global module_node_num
global data_packet_properties
id_temp_list = []
gwid_temp_list = []
timestamp_temp_list = []
acc_temp_list = []
gyr_temp_list = []
mag_temp_list = []
eul_temp_list = []
quat_temp_list = []
pos = 0
gwid = data_section[0]
id_dic = {
"":gwid
}
gwid_temp_list.append(id_dic)
cnt = data_section[1]
rel_node_num = cnt
module_node_num = cnt
data_packet_properties[0x62]["data_len"] = 5 + (76 * cnt)
pos += 2
#reserved
pos += 5
#0x91 packet
for node in range(cnt):
#packet id
pos += 1
# id
id = data_section[pos]
id_dic = {
"":id
}
id_temp_list.append(id_dic)
pos += 1
#reserved
pos += 6
#timestamp
timestamp = int(struct.unpack("<I", bytes(data_section[pos:pos + 4]))[0])
timestamp_dic = {
"(s)":round(timestamp/1000,3)
}
timestamp_temp_list.append(timestamp_dic)
pos += 4
#acc
acc_X = float(struct.unpack("<f", bytes(data_section[pos:pos + 4]))[0])
pos += 4
acc_Y = float(struct.unpack("<f", bytes(data_section[pos:pos + 4]))[0])
pos += 4
acc_Z = float(struct.unpack("<f", bytes(data_section[pos:pos + 4]))[0])
pos += 4
acc_dic = {
"X":round(acc_X,3),
"Y":round(acc_Y,3),
"Z":round(acc_Z,3)
}
acc_temp_list.append(acc_dic)
#gyr
gyr_X = float(struct.unpack("<f", bytes(data_section[pos:pos + 4]))[0])
pos += 4
gyr_Y = float(struct.unpack("<f", bytes(data_section[pos:pos + 4]))[0])
pos += 4
gyr_Z = float(struct.unpack("<f", bytes(data_section[pos:pos + 4]))[0])
pos += 4
gyr_dic = {
"X": round(gyr_X,3),
"Y": round(gyr_Y,3),
"Z": round(gyr_Z,3)
}
gyr_temp_list.append(gyr_dic)
#mag
mag_X = float(struct.unpack("<f", bytes(data_section[pos:pos + 4]))[0])
pos += 4
mag_Y = float(struct.unpack("<f", bytes(data_section[pos:pos + 4]))[0])
pos += 4
mag_Z = float(struct.unpack("<f", bytes(data_section[pos:pos + 4]))[0])
pos += 4
mag_dic = {
"X": round(mag_X),
"Y": round(mag_Y),
"Z": round(mag_Z)
}
mag_temp_list.append(mag_dic)
#eul
eul_Roll = float(struct.unpack("<f", bytes(data_section[pos:pos + 4]))[0])
pos += 4
eul_Pitch = float(struct.unpack("<f", bytes(data_section[pos:pos + 4]))[0])
pos += 4
eul_Yaw = float(struct.unpack("<f", bytes(data_section[pos:pos + 4]))[0])
pos += 4
eul_dic = {
"Roll": round(eul_Roll, 2),
"Pitch": round(eul_Pitch, 2),
"Yaw": round(eul_Yaw, 2)
}
eul_temp_list.append(eul_dic)
#quat
quat_W = float(struct.unpack("<f", bytes(data_section[pos:pos + 4]))[0])
pos += 4
quat_X = float(struct.unpack("<f", bytes(data_section[pos:pos + 4]))[0])
pos += 4
quat_Y = float(struct.unpack("<f", bytes(data_section[pos:pos + 4]))[0])
pos += 4
quat_Z = float(struct.unpack("<f", bytes(data_section[pos:pos + 4]))[0])
pos += 4
quat_dic = {
"W": round(quat_W,3),
"X": round(quat_X,3),
"Y": round(quat_Y,3),
"Z": round(quat_Z,3),
}
quat_temp_list.append(quat_dic)
temp_dic_list = {
"id":id_temp_list,
"timestamp":timestamp_temp_list,
"acc":acc_temp_list,
"gyr":gyr_temp_list,
"mag":mag_temp_list,
"euler":eul_temp_list,
"quat":quat_temp_list
}
temp_dic = {
"GWD":gwid_temp_list,
"id":id_temp_list,
"timestamp":timestamp_temp_list,
"acc":acc_temp_list,
"gyr":gyr_temp_list,
"mag":mag_temp_list,
"euler":eul_temp_list,
"quat":quat_temp_list
}
return temp_dic
rel_node_num = 0
module_node_num = 0
#only for 400hz acc and gyro
def _parse_data_packet_0x63(data_section:list,node_num = None):
global rel_node_num
global module_node_num
global data_packet_properties
id_temp_list = []
timestamp_temp_list = []
acc_temp_list = []
gyr_temp_list = []
pos = 0
gwid = data_section[0]
cnt = data_section[1]
rel_node_num = cnt
module_node_num = cnt
data_packet_properties[0x63]["data_len"] = 5 + (24 * cnt)
pos += 2
#reserved
pos += 5
#0x93 packet
for node in range(cnt):
#packet id
pos += 1
# id
id = data_section[pos]
id_dic = {
"":id
}
id_temp_list.append(id_dic)
pos += 1
#reserved
pos += 6
#timestamp
timestamp = int(struct.unpack("<I", bytes(data_section[pos:pos + 4]))[0])
timestamp_dic = {
"(s)":round(timestamp/1000,3)
}
timestamp_temp_list.append(timestamp_dic)
pos += 4
#acc
acc_X = float(struct.unpack("<h", bytes(data_section[pos:pos + 2]))[0])/1000
pos += 2
acc_Y = float(struct.unpack("<h", bytes(data_section[pos:pos + 2]))[0])/1000
pos += 2
acc_Z = float(struct.unpack("<h", bytes(data_section[pos:pos + 2]))[0])/1000
pos += 2
acc_dic = {
"X":round(acc_X,3),
"Y":round(acc_Y,3),
"Z":round(acc_Z,3)
}
acc_temp_list.append(acc_dic)
#gyr
gyr_X = float(struct.unpack("<h", bytes(data_section[pos:pos + 2]))[0])/10
pos += 2
gyr_Y = float(struct.unpack("<h", bytes(data_section[pos:pos + 2]))[0])/10
pos += 2
gyr_Z = float(struct.unpack("<h", bytes(data_section[pos:pos + 2]))[0])/10
pos += 2
gyr_dic = {
"X": round(gyr_X,3),
"Y": round(gyr_Y,3),
"Z": round(gyr_Z,3)
}
gyr_temp_list.append(gyr_dic)
temp_dic = {
"GWD":gwid_temp_list,
"id":id_temp_list,
"timestamp":timestamp_temp_list,
"acc":acc_temp_list,
"gyr":gyr_temp_list,
}
return temp_dic
data_packet_properties = {
# id
0x90: {
"type": "id",
"id_len": 1,
"data_len": 1,
"parse method": _parse_data_packet_0x90,
"gw_data":False
},
# acc
0xA0: {
"type": "acc",
"id_len": 1,
"data_len": 6,
"parse method": _parse_data_packet_0xA0,
"gw_data": False
},
# gyr
0xB0: {
"type": "gyr",
"id_len": 1,
"data_len": 6,
"parse method": _parse_data_packet_0xB0,
"gw_data": False
},
# mag
0xC0: {
"type": "mag",
"id_len": 1,
"data_len": 6,
"parse method": _parse_data_packet_0xC0,
"gw_data": False
},
# float_eul
0xD0: {
"type": "euler",
"id_len": 1,
"data_len": 6,
"parse method": _parse_data_packet_0xD0,
"gw_data": False
},
# quat
0xD1: {
"type": "quat",
"id_len": 1,
"data_len": 16,
"parse method": _parse_data_packet_0xD1,
"gw_data":False
},
# imusol
0x91: {
"type": "imusol",
"id_len": 1,
"data_len": 76,
"parse method": _parse_data_packet_0x91,
"gw_data": False
},
# gwimusol
0x62: {
"type": "gwsol",
"id_len": 1,
"data_len": 76 * 1,
"parse method": _parse_data_packet_0x62,
"gw_data": True
},
# imusol only raw
0x93: {
"type": "imusol_raw",
"id_len": 1,
"data_len": 24,
"parse method": _parse_data_packet_0x93,
"gw_data": False
},
# gwsol only raw
0x63: {
"type": "gwsol_raw",
"id_len": 1,
"data_len": 24 * 1,
"parse method": _parse_data_packet_0x63,
"gw_data": True
}
}
def crc16_update(buffer_list, cal_len, cal_pos, crc=0):
for temp_j in range(cal_len):
byte = buffer_list[temp_j + cal_pos]
crc ^= byte << 8
crc &= 0xffffffff
for temp_i in range(8):
temp = crc << 1
temp &= 0xffffffff
if (crc & 0x8000):
temp ^= 0x1021
temp &= 0xffffffff
crc = temp
return (crc & 0xffff)
SampleRate = 0
SamplesReceived = 0
prevSamplesReceived = 0
sample_rate_alive_flag = True
def sample_rate_timer_cb(sample_timer):
global SampleRate,SamplesReceived,prevSamplesReceived,sample_rate_alive_flag
SampleRate = SamplesReceived - prevSamplesReceived
prevSamplesReceived = SamplesReceived
print("每秒幀率:",SampleRate,datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f'))
if sample_rate_alive_flag == True:
sample_timer = threading.Timer(1.00, sample_rate_timer_cb,args=(sample_timer,))
sample_timer.start()
def sample_rate_timer_close():
global sample_rate_alive_flag
sample_rate_alive_flag = False
# 找到幀頭
def find_frameheader(buffer_list:list):
# 循環查找,直至拋出異常
while True:
# 查找幀頭的第一個標識符0x5a,若未找到,將會拋出ValueError異常
try:
header_ind = buffer_list.index(0x5a)
except ValueError:
raise HipnucFrame_NotCompleted_Exception
if header_ind + 1 > len(buffer_list) - 1:
raise HipnucFrame_NotCompleted_Exception
if buffer_list[header_ind + 1] == 0xa5:
# 找到幀頭標識符0x5aa5,返回幀頭位置
return header_ind
else:
# 未找到幀頭標識符,切片繼續查找
buffer_list = buffer_list[header_ind + 1:]
# 驗證獲取長度
def _get_frame_length(buffer_list, header_pos):
return int(struct.unpack("<h", bytes(buffer_list[header_pos + 2:header_pos + 4]))[0])
# 驗證長度是否合法
def _verify_frame_length(buffer_list:list, header_pos):
# 獲取到幀長度
frame_len = int(struct.unpack("<h", bytes(buffer_list[header_pos + 2:header_pos + 4]))[0])
# 判斷幀長度是否合法
if frame_len >= 1024:
raise HipnucFrame_ErrorFrame_Exception
elif frame_len + header_pos + 6 > len(buffer_list) :
raise HipnucFrame_NotCompleted_Exception
# 驗證crc是否正確
def _verify_frame_crc(buffer_list, header_pos=0):
# 獲取到幀長度
frame_len = int(struct.unpack("<h", bytes(buffer_list[header_pos + 2:header_pos + 2 + 2]))[0])
# 獲取幀內的crc
f_crc = int(struct.unpack("<H", bytes(buffer_list[header_pos + 4:header_pos + 4 + 2]))[0])
# 計算幀的crc
cal_crc = crc16_update(buffer_list, 4, header_pos, 0)
cal_crc = crc16_update(buffer_list, frame_len, header_pos + 6, cal_crc)
if cal_crc != f_crc:
raise HipnucFrame_ErrorFrame_Exception
# 截取一條完整且合法的幀,並將幀頭幀尾返回
def intercept_one_complete_frame(buffer_list):
# 找幀頭
header_pos = find_frameheader(buffer_list)
try:
frame_len = int(struct.unpack("<H", bytes(buffer_list[header_pos + 2:header_pos + 2 + 2]))[0])
except struct.error:
raise HipnucFrame_NotCompleted_Exception
end_pos = header_pos + 5 + frame_len
# 驗證幀長度
_verify_frame_length(buffer_list, header_pos)
# 驗證crc
_verify_frame_crc(buffer_list, header_pos)
return header_pos, end_pos
# 從完整幀中獲取資訊
def extraction_information_from_frame(frame_list:list, inf_fifo,report_datatype: dict = None):
# 幀率統計
global SamplesReceived
global rel_node_num
global module_node_num
SamplesReceived = SamplesReceived + 1
# 處理數據幀
data_dic = {}
pos = 0
data_frame_list = frame_list[6:]
#遍歷解析數據段內包含的數據
while len(data_frame_list) > 0:
if data_frame_list[0] in data_packet_properties:
temp_dic = data_packet_properties[data_frame_list[0]]["parse method"](data_frame_list[1:],module_node_num)
try:
if report_datatype[data_packet_properties[data_frame_list[0]]["type"]] == True:
data_dic.update(temp_dic)
else:
pass
except KeyError:
pass
if data_packet_properties[data_frame_list[0]]["gw_data"] == True:
rel_node_num = module_node_num
else:
rel_node_num = 1
id_len = data_packet_properties[data_frame_list[0]]["id_len"]
if data_frame_list[0] == 0x62:
data_len = 76 * rel_node_num + 8
elif data_frame_list[0] == 0x63:
data_len = 24 * rel_node_num + 8
else:
data_len = data_packet_properties[data_frame_list[0]]["data_len"] * rel_node_num
data_frame_list = data_frame_list[id_len + data_len:]
else:
# raise HipnucFrame_ErrorFrame_Exception
data_frame_list = data_frame_list[1:]
inf_fifo.put(data_dic)
| hipnuc/products | examples/Python/hipnuc_protocol.py | hipnuc_protocol.py | py | 21,962 | python | en | code | 56 | github-code | 36 | [
{
"api_name": "struct.unpack",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "struct.unpack",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "struct.unpack",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "struct.unpack",
"line_n... |
17751518002 | from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import copy
import json
import os
SPLITS = ["train", "dev", "devtest", "teststd"]
def get_image_name(scene_ids, turn_ind):
"""Given scene ids and turn index, get the image name.
"""
sorted_scene_ids = sorted(
((int(key), val) for key, val in scene_ids.items()),
key=lambda x: x[0],
reverse=True
)
# NOTE: Hardcoded to only two scenes.
if turn_ind >= sorted_scene_ids[0][0]:
scene_label = sorted_scene_ids[0][1]
else:
scene_label = sorted_scene_ids[1][1]
image_label = scene_label
if "m_" in scene_label:
image_label = image_label.replace("m_", "")
return f"{image_label}.png", scene_label
def get_object_mapping(scene_label, args):
"""Get the object mapping for a given scene.
"""
scene_json_path = os.path.join(
args["scene_json_folder"], f"{scene_label}_scene.json"
)
with open(scene_json_path, "r") as file_id:
scene_objects = json.load(file_id)["scenes"][0]["objects"]
object_map = [ii["index"] for ii in scene_objects]
return object_map
def main(args):
for split in SPLITS:
read_path = args[f"simmc_{split}_json"]
print(f"Reading: {read_path}")
with open(read_path, "r") as file_id:
dialogs = json.load(file_id)
# Reformat into simple strings with positive and negative labels.
# (dialog string, label)
ambiguous_candidates_data = []
for dialog_id, dialog_datum in enumerate(dialogs["dialogue_data"]):
history = []
for turn_ind, turn_datum in enumerate(dialog_datum["dialogue"]):
history.append(turn_datum["transcript"])
annotations = turn_datum["transcript_annotated"]
if annotations.get("disambiguation_label", False):
label = annotations["disambiguation_candidates"]
image_name, scene_label = get_image_name(
dialog_datum["scene_ids"], turn_ind
)
# If dialog contains multiple scenes, map it accordingly.
object_map = get_object_mapping(scene_label, args)
new_datum = {
"dialog_id": dialog_datum["dialogue_idx"],
"turn_id": turn_ind,
"input_text": copy.deepcopy(history),
"ambiguous_candidates": label,
"image_name": image_name,
"object_map": object_map,
}
ambiguous_candidates_data.append(new_datum)
# Ignore if system_transcript is not found (last round teststd).
if turn_datum.get("system_transcript", None):
history.append(turn_datum["system_transcript"])
print(f"# instances [{split}]: {len(ambiguous_candidates_data)}")
save_path = os.path.join(
args["ambiguous_candidates_save_path"],
f"simmc2.1_ambiguous_candidates_dstc11_{split}.json"
)
print(f"Saving: {save_path}")
with open(save_path, "w") as file_id:
json.dump(
{
"source_path": read_path,
"split": split,
"data": ambiguous_candidates_data,
},
file_id
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"--simmc_train_json", default=None, help="Path to SIMMC 2.1 train"
)
parser.add_argument(
"--simmc_dev_json", default=None, help="Path to SIMMC 2.1 dev"
)
parser.add_argument(
"--simmc_devtest_json", default=None, help="Path to SIMMC 2.1 devtest"
)
parser.add_argument(
"--simmc_teststd_json", default=None, help="Path to SIMMC 2.1 teststd (public)"
)
parser.add_argument(
"--scene_json_folder", default=None, help="Path to SIMMC scene jsons"
)
parser.add_argument(
"--ambiguous_candidates_save_path",
required=True,
help="Path to save SIMMC disambiguate JSONs",
)
try:
parsed_args = vars(parser.parse_args())
except (IOError) as msg:
parser.error(str(msg))
main(parsed_args)
| facebookresearch/simmc2 | model/ambiguous_candidates/format_ambiguous_candidates_data.py | format_ambiguous_candidates_data.py | py | 4,414 | python | en | code | 98 | github-code | 36 | [
{
"api_name": "os.path.join",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "json.load",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 48... |
23280705402 | '''
Description:
Author: weihuang
Date: 2021-11-18 15:47:44
LastEditors: weihuang
LastEditTime: 2021-11-22 22:38:26
'''
import torch
import torch.nn.functional as F
import torch.nn as nn
from torch.autograd import Variable
class CrossEntropy2d(nn.Module):
def __init__(self, reduction="mean", ignore_label=255):
super(CrossEntropy2d, self).__init__()
self.reduction = reduction
self.ignore_label = ignore_label
def forward(self, predict, target, weight=None):
"""
Args:
predict:(n, c, h, w)
target:(n, h, w)
weight (Tensor, optional): a manual rescaling weight given to each class.
If given, has to be a Tensor of size "nclasses"
"""
assert not target.requires_grad
assert predict.dim() == 4
assert target.dim() == 3
assert predict.size(0) == target.size(0), "{0} vs {1} ".format(predict.size(0), target.size(0))
assert predict.size(2) == target.size(1), "{0} vs {1} ".format(predict.size(2), target.size(1))
assert predict.size(3) == target.size(2), "{0} vs {1} ".format(predict.size(3), target.size(3))
n, c, h, w = predict.size()
target_mask = (target >= 0) * (target != self.ignore_label)
target = target[target_mask]
if not target.data.dim():
return Variable(torch.zeros(1))
predict = predict.transpose(1, 2).transpose(2, 3).contiguous()
predict = predict[target_mask.view(n, h, w, 1).repeat(1, 1, 1, c)].view(-1, c)
loss = F.cross_entropy(predict, target, weight=weight, reduction=self.reduction)
return loss
class FocalLoss(nn.Module):
def __init__(self, alpha=1, gamma=0, size_average=True):
super(FocalLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.size_average = size_average
def forward(self, inputs, targets):
ce_loss = F.cross_entropy(inputs, targets, reduction='none')
pt = torch.exp(-ce_loss)
focal_loss = self.alpha * (1-pt)**self.gamma * ce_loss
if self.size_average:
return focal_loss.mean()
else:
return focal_loss.sum()
class MSELoss(nn.Module):
def forward(self,input,target):
return torch.mean((input-target)**2)
class BCELoss(nn.Module):
def forward(self, y_pred, y_label):
y_truth_tensor = torch.FloatTensor(y_pred.size())
y_truth_tensor.fill_(y_label)
y_truth_tensor = y_truth_tensor.to(y_pred.get_device())
return nn.BCEWithLogitsLoss()(y_pred, y_truth_tensor)
class WeightedBCELoss(nn.Module):
def forward(self, input_y, target, weight):
return F.binary_cross_entropy(input_y, target, weight)
class L1Loss_weighted(nn.Module):
def forward(self, input, target, weights):
loss = weights * torch.abs(input - target)
loss = torch.mean(loss)
return loss
def weighted_l1_loss(input, target, weights):
loss = weights * torch.abs(input - target)
loss = torch.mean(loss)
return loss
def bce_loss(y_pred, y_label):
y_truth_tensor = torch.FloatTensor(y_pred.size())
y_truth_tensor.fill_(y_label)
y_truth_tensor = y_truth_tensor.to(y_pred.get_device())
return nn.BCEWithLogitsLoss()(y_pred, y_truth_tensor) | weih527/DA-ISC | scripts/loss/loss.py | loss.py | py | 3,347 | python | en | code | 14 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "torch.autograd.Variable",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
... |
13421938791 | import os
import io
import re
import sys
import pandas as pd
import ujson as json
from argparse import ArgumentParser, FileType
from rasm import rasm
RULE_GROUPS = {
'ASSIM-M': ['M1', 'M2'],
'ASSIM-N': ['N2.1.1.A', 'N2.1.1.B', 'N2.1.1.C', 'N2.1.1.D', 'N2.1.2.A', 'N2.1.2.B', 'N2.1.2.C', 'N2.1.2.D', 'N2.2.A', 'N2.2.B', 'N2.2.C',
'N2.2.D', 'N3.A', 'N3.B', 'N3.C', 'N3.D', 'N4.A', 'N4.B', 'N4.C', 'N4.D'],
'ASSIM': ['MITHL-bb', 'MITHL-dd', 'MITHL-kk', 'MITHL-ll', 'MITHL-yy', 'MITHL-hh', 'MITHL-ww', 'MITHL-tt', 'MITHL-rr', 'MITHL-ðð', 'MITHL-ff', 'MITHL-33',
'MTJNS-dt', 'MTJNS-td', 'MTJNS-tT', 'MTJNS-ṯð', 'MTJNS-lr', 'MTJNS-ðṮ', 'MTJNS-qk', 'MTJNS-bm', 't-assim'],
'MADDA': ['MADD-hmz', 'MADD-hmz-A-sil', 'MADD-hmz-sp-1', 'MADD-hmz-sp-2', 'MADD-hmz-sp-3', 'MADD-hmz-sp-4', 'MADD-lzm', 'MADD-shdd-skn', 'MADD-sp-1', 'MADD-sp-2',
'MADD-sp-3', 'MADD-sp-4', 'MADD-sp-5', 'MADD-sp-6', 'MADD-sp-7', 'MADD-sp-8', 'MADD-sp-9', 'MADD-sp-A', 'MADD-sp-B', 'MADD-sp-C', 'MADD-sp-D'],
'SHAMS': ['SHMS'],
'CLIT-H': ['HU', 'HI'],
'MIN-W': ['min-u-1', 'min-u-2', 'min-u-3', 'min-u-4', 'min-u-5', 'min-u-6', 'min-u-7', 'min-u-8'],
'MIN-Y': ['min-y-1', 'min-y-2', 'min-y-3', 'min-y-4', 'min-y-5', 'min-y-6', 'min-y-7', 'min-y-8', 'min-y-9', 'min-y-A'],
'P_SIL': ['P-sil-1', 'P-sil-2'],
'SIL': ['Sil-1', 'Sil-2', 'Sil-3', 'Sil-4', 'Sil-5', 'Sil-6', 'Sil-7', 'Sil-8', 'Sil-9', 'Sil-A', 'Sil-B', 'Sil-C', 'Sil-D', 'Sil-E', 'Sil-F', 'Sil-G'],
'SAKT': ['sakt-1', 'sakt-2', 'sakt-3'],
'HAPAX': ['hapax-1', 'hapax-2', 'hapax-3'],
}
#FIXME add reference
# According to Nicolai Sinai the following list seems to be additions:
# Q. 52:21, 53:23, 53:26–32, 69:7, 73:20, 74:31, 74:56, 78:37–40, 81:29, 84:25, 85:7–11, 87:7, 89:15–16, 89:23–24, 89:27–30, 90:17–20, 95:6, 97:4, and 103:3
EXCLUDE = [(52,21), (53,23), (53,26), (53,27), (53,28), (53,29), (53,30), (53,31), (53,32),
(69,7),
(73,20), (74,31), (74,56),
(78,37), (78,38), (78,39), (78,40),
(81,29),
(84,25), (85,7), (85,8), (85,9), (85,10), (85,11),
(87,7),
(89,15), (89,16), (89,23), (89,24), (89,27), (89,28), (89,29), (89,30), (90,17), (90,18), (90,19), (90,20),
(95,6),
(97,4),
(103,3)]
if __name__ == '__main__':
parser = ArgumentParser(description='add or remove tajweed phonetic layer to orthography in Quranic text')
parser.add_argument('infile', nargs='?', type=FileType('r'), default=sys.stdin, help='counts json file')
parser.add_argument('outfile', help='csv outfile') #DEBUG
parser.add_argument('--groups', action='store_true', help='aggregate the rules in logical groups and sum the counts')
parser.add_argument('--rm_hapax', action='store_true', help='do not include hapax rule')
parser.add_argument('--exclude', action='store_true', help='exclude verses that have been identified as possible later additions')
parser.add_argument('--restrict', action='store_true', help='include ONLY verses have been identified as possible later additions')
parser.add_argument('--chunks', metavar='SIZE', type=int, help='aggregate counts in chunks of words instead of by verses')
parser.add_argument('--debug', action='store_true', help='show debugging info')
args = parser.parse_args()
counts = json.load(args.infile)
RULE_MAPPER = {rule:gr for gr,rule_li in RULE_GROUPS.items() for rule in rule_li}
if args.groups:
_cnt = dict(zip(RULE_GROUPS, [0]*len(RULE_GROUPS)))
else:
_cnt = dict(zip(RULE_MAPPER, [0]*len(RULE_MAPPER)))
cnt_inner = dict(_cnt)
cnt_bound = dict(_cnt)
rows = []
for qara, _, _, qpal, qind in rasm(((1,1,1,1), (114,6,3,4)), paleo=True):
if args.restrict and (qind[0], qind[1]) not in EXCLUDE:
continue
if args.exclude and (qind[0], qind[1]) in EXCLUDE:
continue
for rule, tokens in counts.items():
if args.rm_hapax and rule in RULE_GROUPS['HAPAX']:
continue
if args.groups:
rule = RULE_MAPPER[rule]
for token in tokens:
if tuple(token['ind']) == qind:
if token['bound']:
cnt_bound[rule] += token['cnt']
else:
cnt_inner[rule] += token['cnt']
rows.append({**{'qindex': ':'.join(map(str, qind))},
**{k+'_I':v for k,v in cnt_inner.items()},
**{k+'_B':v for k,v in cnt_bound.items()}})
cnt_inner = dict(_cnt)
cnt_bound = dict(_cnt)
if args.chunks:
aux = []
for i in range(0, len(rows), args.chunks):
row_group = rows[i:i+args.chunks]
qindex = row_group[0]['qindex']
for r in row_group:
r.pop('qindex')
new_row = dict(zip(row_group[0].keys(), [0]*len(row_group[0])))
for row in row_group:
for k in row:
new_row[k] += row[k]
new_row['qindex'] = qindex
aux.append(new_row)
rows = aux
df = pd.DataFrame([r.values() for r in rows])
df.columns = rows[0].keys()
# move qindex from last column position to first position
cols = df.columns.tolist()
df = df[cols[-1:]+cols[:-1]]
# remove columns that only contain zeros
df = df.loc[:, (df!=0).any(axis=0)]
df.to_csv(args.outfile, index=False)
| kabikaj/tajweed | src/tajweed2df.py | tajweed2df.py | py | 5,564 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "argparse.FileType",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "sys.stdin",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "ujson.load",... |
24222891293 | import numpy as np
from collections import namedtuple
from abc import ABCMeta, abstractmethod, abstractstaticmethod
# 决策树结点
# Parameters
# ----------
# feature : 特征,需要进行比对的特征名
# val : 特征值,当特征为离散值时,如果对应的特征值等于val,将其放入左子树,否则放入右子树
# left : 左子树
# right : 右子树
# label : 所属的类
TreeNode = namedtuple("TreeNode", 'feature val left right label')
class DecisionTreeClassifier(object):
"""
决策树分类器,采用 CART 算法训练
"""
def __init__(self, max_depth=10,min_num_leaf=2):
self.max_depth = max_depth
self.min_num_leaf=min_num_leaf
@staticmethod
def devide_X(X_, feature, val):
"""切分集合
:param X_: 被切分的集合, shape=[ni_samples, ni_features + 1]
:param feature: 切分变量
:param val: 切分变量的值
:return: 左集合,有集合
"""
return X_[X_[:, feature] == val], X_[X_[:, feature] != val]
@staticmethod
def gini(D):
"""求基尼指数 Gini(D)
:param D: shape = [ni_samples]
:return: Gini(D)
"""
# 目前版本的 numpy.unique 不支持 axis 参数
_, cls_counts = np.unique(D, return_counts=True)
probability = cls_counts / cls_counts.sum()
return 1 - (probability ** 2).sum()
@staticmethod
def congini(D_, val):
"""求基尼指数 Gini(D, A)
:param D_: 被计算的列. shape=[ni_samples, 2]
:param val: 被计算的列对应的切分变量
:return: Gini(D, A)
"""
left, right = D_[D_[:, 0] == val], D_[D_[:, 0] != val]
return DecisionTreeClassifier.gini(left[:, -1]) * left.shape[0] / D_.shape[0] + \
DecisionTreeClassifier.gini(right[:, -1]) * right.shape[0] / D_.shape[0]
@staticmethod
def get_fea_best_val(D_):
"""寻找当前特征对应的最优切分变量
:param D_: 被计算的列. shape=[ni_samples, 2]
:return: 最优切分变量的值和基尼指数的最大值
"""
vals = np.unique(D_[:, :-1])
tmp = np.array([DecisionTreeClassifier.congini(D_, val) for val in vals])
return vals[np.argmax(tmp)], tmp.max()
@staticmethod
def get_best_index(X_, features):
"""寻找最优切分特征
:param X_: 候选集 shape=[ni_samples, n_features + 1]
:param features: 特征的候选集合
:return: 最优切分特征的编号和特征值
"""
ginis = [
DecisionTreeClassifier.get_fea_best_val(
np.c_[X_[:, i], X_[:, -1]]) for i in features]
ginis = np.array(ginis)
i = np.argmax(ginis[:, 1])
return features[i], ginis[i, 0]
def build(self, X_, features, depth=None):
"""建树
:param X_: 候选集 shape=[ni_samples, n_features + 1]
:param features: 候选特征集
:param depth: 当前深度
:return: 结点
"""
print(X_.shape,features,depth)
if np.unique(X_[:, -1]).shape[0] == 1:
return TreeNode(None, None, None, None, X_[0, -1])
if features.shape[0] == 0 or X_.shape[0]<=self.min_num_leaf or depth and depth >= self.max_depth:
classes, classes_count = np.unique(X_[:, -1], return_counts=True)
return TreeNode(None, None, None, None, classes[np.argmax(classes_count)])
feature_index, val = DecisionTreeClassifier.get_best_index(X_, features)
new_features = features[features != feature_index]
del features
left, right = DecisionTreeClassifier.devide_X(X_, feature_index, val)
left_branch = self.build(left, new_features, depth + 1 if depth else None)
right_branch = self.build(right, new_features, depth + 1 if depth else None)
return TreeNode(feature_index, val, left_branch, right_branch, None)
def fit(self, X, y):
"""
:param X_: shape = [n_samples, n_features]
:param y: shape = [n_samples]
:return: self
"""
features = np.arange(X.shape[1])
X_ = np.c_[X, y]
self.root = self.build(X_, features)
return self
def predict_one(self, x):
p = self.root
while p.label is None:
p = p.left if x[p.feature] == p.val else p.right
return p.label
def predict(self, X):
"""
:param X: shape = [n_samples, n_features]
:return: shape = [n_samples]
"""
return np.array([self.predict_one(x) for x in X])
cluster=np.array([
[1,2,3,0],
[2,2,3,1],
[2,3,4,0],
[1,2,3,0],
[2,2,3,1],
[2,3,4,0],
[1,2,2,0],
[2,2,3,1],
[1,2,3,0],
[2,2,3,1],
[2,3,4,0],
[1,2,3,0],
[2,2,3,1],
[2,3,4,0],
[1,2,2,0],
[2,2,3,1],
[2,2,3,1],
[2,3,4,0],
[1,2,2,0],
[2,2,3,1],
[1,2,3,0],
[2,2,3,1],
[2,3,4,0],
[1,2,3,0],
[1,2,2,0],
[2,2,3,1],
[2,2,3,1],
[2,3,4,0],
[1,2,2,0],
])
train_x = cluster[:, 0:-1]
train_y = cluster[:, -1]
iv_clf = DecisionTreeClassifier()
iv_clf.fit(train_x, train_y)
print(iv_clf.predict_one([2, 2, 4]))
| samzzyy/RootCauseAnalysisOfProductionLineFailure | PathAna/DT_IV2.py | DT_IV2.py | py | 5,356 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "collections.namedtuple",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.unique",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "numpy.unique",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"l... |
25099906773 | import ast
import re
import tokenize
import unicodedata
from sympy.parsing import sympy_parser
from sympy.core.basic import Basic
#####
# Process Unicode characters into equivalent allowed characters:
#####
# Unicode number and fraction name information:
_NUMBERS = {"ZERO": 0, "ONE": 1, "TWO": 2, "THREE": 3, "FOUR": 4, "FIVE": 5, "SIX": 6, "SEVEN": 7, "EIGHT": 8, "NINE": 9}
_FRACTIONS = {"HALF": 2, "THIRD": 3, "QUARTER": 4, "FIFTH": 5, "SIXTH": 6, "SEVENTH": 7, "EIGHTH": 8, "NINTH": 9, "TENTH": 10}
_FRACTIONS.update({"{}S".format(key): value for key, value in _FRACTIONS.items() if key != "HALF"})
# Major uppercase and lowercase Greek letters, excluding 'GREEK SMALL LETTER FINAL SIGMA' (\u03C2):
_GREEK_LETTERS_REGEX = r"[\u0391-\u03A9\u03B1-\u03C1\u03C3-\u03C9]"
def process_unicode_chars(match_object):
"""Clean a string of Unicode characters into allowed characters if possible."""
result = ""
prev_name = None
for char in match_object.group(0):
name = unicodedata.name(char, None)
if name is None:
result += char
elif name.startswith("SUPERSCRIPT") and name.split()[1] in _NUMBERS:
number = name.split()[1]
# Check if this is a continuation of a exponent, or a new one.
if prev_name is not None and prev_name.startswith("SUPERSCRIPT"):
result += "{0:d}".format(_NUMBERS[number])
else:
result += "**{0:d}".format(_NUMBERS[number])
elif name.startswith("SUBSCRIPT") and name.split()[1] in _NUMBERS:
number = name.split()[1]
# Check if this is a continuation of a subscript, or a new one.
if prev_name is not None and prev_name.startswith("SUBSCRIPT"):
result += "{0:d}".format(_NUMBERS[number])
else:
result += "_{0:d}".format(_NUMBERS[number])
elif name.startswith("VULGAR FRACTION"):
numerator_name = name.split()[2]
denominator_name = name.split()[3]
if numerator_name in _NUMBERS and denominator_name in _FRACTIONS:
result += "({0:d}/{1:d})".format(_NUMBERS[numerator_name], _FRACTIONS[denominator_name])
else:
result += char
elif name in ["MULTIPLICATION SIGN", "ASTERISK OPERATOR"]:
result += "*"
elif name in ["DIVISION SIGN", "DIVISION SLASH"]:
result += "/"
elif name in ["LESS-THAN OR EQUAL TO", "LESS-THAN OR SLANTED EQUAL TO"]:
result += "<="
elif name in ["GREATER-THAN OR EQUAL TO", "GREATER-THAN OR SLANTED EQUAL TO"]:
result += ">="
elif name in ["LOGICAL AND", "N-ARY LOGICAL AND"]:
result += "&"
elif name in ["LOGICAL OR", "N-ARY LOGICAL OR"]:
result += "|"
elif name in ["XOR", "CIRCLED PLUS"]:
result += "^"
elif name == "NOT SIGN":
result += "~"
elif re.match(_GREEK_LETTERS_REGEX, char):
# There are more Greek letters with names like the below than usually
# supported by maths systems. The regex is a quick way to filter by Unicode
# codepoint.
if name.startswith("GREEK CAPITAL LETTER"):
result += "({})".format(name.replace("GREEK CAPITAL LETTER ", "").title())
elif name.startswith("GREEK SMALL LETTER"):
result += "({})".format(name.replace("GREEK SMALL LETTER ", "").lower())
else:
# Something is wrong, skip character:
result += char
else:
result += char
prev_name = name
return result
#####
# Customised SymPy Internals:
#####
# What constitutes a relation?
RELATIONS = {ast.Lt: "<", ast.LtE: "<=", ast.Gt: ">", ast.GtE: ">="}
def evaluateFalse(s):
"""Replaces operators with the SymPy equivalents and set evaluate=False.
Unlike the built-in evaluateFalse(...), we want to use a slightly more
sophisticated EvaluateFalseTransformer and make operators AND functions
evaluate=False.
- 's' should be a string of Python code for the maths abstract syntax tree.
"""
node = ast.parse(s)
node = _EvaluateFalseTransformer().visit(node)
# node is a Module, we want an Expression
node = ast.Expression(node.body[0].value)
return ast.fix_missing_locations(node)
class _EvaluateFalseTransformer(sympy_parser.EvaluateFalseTransformer):
"""Extend default SymPy EvaluateFalseTransformer to affect functions too.
The SymPy version does not force function calls to be 'evaluate=False',
which means expressions like "log(x, 10)" get simplified to "log(x)/log(10)"
or "cos(-x)" becomes "cos(x)". For our purposes, this is unhelpful and so
we also prevent this from occuring.
Currently there is a list of functions not to transform, because some do
not support the "evaluate=False" argument. This isn't particularly nice or
future proof!
"""
_evaluate_false_keyword = ast.keyword(arg='evaluate', value=ast.NameConstant(value=False, ctx=ast.Load()))
_one = ast.Call(func=ast.Name(id='Integer', ctx=ast.Load()), args=[ast.Num(n=1)], keywords=[])
_minus_one = ast.UnaryOp(op=ast.USub(), operand=_one)
_bool_ops = {
ast.And: "And",
ast.Or: "Or"
}
_bool_values = {
# CPython 3.8 and later no longer allow overridding of "True" and "False" as names,
# so we must use "true" and "false" instead.
# https://github.com/python/cpython/blob/3.8/Python/ast.c#L28-L39
True: ast.Name(id="true", ctx=ast.Load()),
False: ast.Name(id="false", ctx=ast.Load())
}
def visit_Call(self, node):
"""Ensure all function calls are 'evaluate=False'."""
# Since we have overridden the visit method, we are now responsible for
# ensuring all child nodes are visited too. This is done most simply by
# calling generic_visit(...) on ourself:
self.generic_visit(node)
# FIXME: Some functions cannot accept "evaluate=False" as an argument
# without their __new__() method raising a TypeError. There is probably
# some underlying reason which we could take into account of.
# For now, ignore those known to be problematic:
_ignore_functions = ["Integer", "Float", "Symbol", "factorial"]
if node.func.id in _ignore_functions:
# print("\tIgnoring function: {}".format(node.func.id))
pass
else:
# print("\tModifying function: {}".format(node.func.id))
node.keywords.append(self._evaluate_false_keyword)
# We must return the node, modified or not:
return node
def visit_Constant(self, node):
"""Ensure boolean constants can be sympy classes if needed."""
# As above, must ensure child nodes are visited:
self.generic_visit(node)
# Replace the built-in True/False with names we can override if needed:
if node.value in self._bool_values:
return self._bool_values[node.value]
else:
return node
def visit_Compare(self, node):
"""Ensure all comparisons use sympy classes with 'evaluate=False'."""
# Can't cope with comparing multiple inequalities:
if len(node.comparators) > 1:
raise TypeError("Cannot parse nested inequalities!")
# As above, must ensure child nodes are visited:
self.generic_visit(node)
# Use the custom Equals class if equality, otherwise swap with a known relation:
operator_class = node.ops[0].__class__
if isinstance(node.ops[0], ast.Eq):
return ast.Call(func=ast.Name(id='Eq', ctx=ast.Load()), args=[node.left, node.comparators[0]], keywords=[self._evaluate_false_keyword])
elif operator_class in RELATIONS:
return ast.Call(func=ast.Name(id='Rel', ctx=ast.Load()), args=[node.left, node.comparators[0], ast.Str(RELATIONS[operator_class])], keywords=[self._evaluate_false_keyword])
else:
# An unknown type of relation. Leave alone:
return node
def sympy_visit_BinOp(self, node):
"""Convert some operators to SymPy equivalents.
This method is mostly copied from SymPy directly, but there is a fix
to the nesting of Mul not yet in the upstream!
"""
if node.op.__class__ in self.operators:
sympy_class = self.operators[node.op.__class__]
right = self.visit(node.right)
left = self.visit(node.left)
if isinstance(node.left, ast.UnaryOp) and not isinstance(node.right, ast.UnaryOp) and sympy_class in ('Mul',):
left, right = right, left
if isinstance(node.op, ast.Sub):
right = ast.Call(
func=ast.Name(id='Mul', ctx=ast.Load()),
# Ensure Mul objects don't end up nested:
args=self.flatten([self._minus_one, right], 'Mul'),
keywords=[self._evaluate_false_keyword]
)
if isinstance(node.op, ast.Div):
if isinstance(node.left, ast.UnaryOp):
if isinstance(node.right, ast.UnaryOp):
left, right = right, left
left = ast.Call(
func=ast.Name(id='Pow', ctx=ast.Load()),
args=[left, self._minus_one],
keywords=[self._evaluate_false_keyword]
)
else:
right = ast.Call(
func=ast.Name(id='Pow', ctx=ast.Load()),
args=[right, self._minus_one],
keywords=[self._evaluate_false_keyword]
)
new_node = ast.Call(
func=ast.Name(id=sympy_class, ctx=ast.Load()),
args=[left, right],
keywords=[self._evaluate_false_keyword]
)
if sympy_class in ('Add', 'Mul'):
# Denest Add or Mul as appropriate
new_node.args = self.flatten(new_node.args, sympy_class)
return new_node
return node
def visit_BinOp(self, node):
"""Ensure bitwise operations are modified into SymPy operations.
This function also calls a SymPy function to convert Add, Sub, Mul
and Div into SymPy classes.
"""
node_class = node.op.__class__
# "Implies", which overloads bit-shifting, mustn't get simplified:
if node_class in [ast.LShift, ast.RShift]:
# As above, must ensure child nodes are visited:
right = self.generic_visit(node.right)
left = self.generic_visit(node.left)
if node_class is ast.LShift:
left, right = right, left
return ast.Call(func=ast.Name(id="Implies", ctx=ast.Load()), args=[left, right], keywords=[self._evaluate_false_keyword])
# "Xor" must be transformed from Bitwise to Boolean, and not simplified either:
elif node_class == ast.BitXor:
right = self.generic_visit(node.right)
left = self.generic_visit(node.left)
return ast.Call(func=ast.Name(id="Xor", ctx=ast.Load()), args=[left, right], keywords=[self._evaluate_false_keyword])
else:
# Otherwise we want ensure the parent SymPy method runs on this node,
# to save re-writing that code here:
return self.sympy_visit_BinOp(node)
def visit_BoolOp(self, node):
"""Ensure And and Or are not simplified."""
# As above, must ensure child nodes are visited:
self.generic_visit(node)
# Fix the boolean operations to SymPy versions to ensure no simplification:
node_class = node.op.__class__
if node_class in self._bool_ops:
return ast.Call(func=ast.Name(id=self._bool_ops[node_class], ctx=ast.Load()), args=node.values, keywords=[self._evaluate_false_keyword])
else:
# An unknown type of boolean operation. Leave alone:
return node
def visit_UnaryOp(self, node):
"""Ensure boolean Not is not simplified and unary minus is consistent."""
node_class = node.op.__class__
# As above, must ensure child nodes are visited:
self.generic_visit(node)
# Fix the boolean Not to the SymPy version to ensure no simplification:
if node_class in [ast.Not, ast.Invert]:
return ast.Call(func=ast.Name(id="Not", ctx=ast.Load()), args=[node.operand], keywords=[self._evaluate_false_keyword])
# Replace all uses of unary minus with multiplication by minus one for
# consistency reasons:
elif node_class in [ast.USub]:
return ast.Call(func=ast.Name(id='Mul', ctx=ast.Load()),
# Ensure Mul objects don't end up nested:
args=self.flatten([self._minus_one, node.operand], 'Mul'),
keywords=[self._evaluate_false_keyword])
else:
# Only interested these; leave everything else alone:
return node
# def visit(self, node):
# """Visit every node in the tree."""
# before = ast.dump(node)
# # MUST call super method to ensure tree is iterated over correctly!
# node = super().visit(node)
# after = ast.dump(node)
# print(node.__class__)
# print("{}\n==>\n{}\n\n".format(before, after))
# return node
#####
# Custom SymPy Parser Transformations:
#####
def auto_symbol(tokens, local_dict, global_dict):
"""Replace the sympy builtin auto_symbol with a much more aggressive version.
We have to replace this, because SymPy attempts to be too accepting of
what it considers to be valid input and allows Pythonic behaviour.
We only really want pure mathematics notations where possible!
"""
result = []
# As with all tranformations, we have to iterate through the tokens and
# return the modified list of tokens:
for tok in tokens:
tokNum, tokVal = tok
if tokNum == tokenize.NAME:
name = tokVal
# Check if the token name is in the local/global dictionaries.
# If it is, convert it correctly, otherwise leave untouched.
if name in local_dict:
result.append((tokenize.NAME, name))
continue
elif name in global_dict:
obj = global_dict[name]
if isinstance(obj, (Basic, type)) or callable(obj):
# If it's a function/basic class, don't convert it to a Symbol!
result.append((tokenize.NAME, name))
continue
result.extend([
(tokenize.NAME, 'Symbol'),
(tokenize.OP, '('),
(tokenize.NAME, repr(str(name))),
(tokenize.OP, ')'),
])
else:
result.append(tok)
return result
def fix_booleans(tokens, local_dict, global_dict):
"""
Combines several fixes to the tokenising of boolean-like inputs, for efficiency.
- Fix the precedence issues with "or", "and" and "not" being looser
than equals, which led to problems such as
"A == B or C" incorrectly becoming "Or(Eq(A, B), C)",
by changing them into bitwise operators to be fixed post-parsing.
- Add an xor keyword by tokenising it into a bitwise XOR to be treated
in the same way as the above bitwise operators must be.
- Convert 0 and 1 to be False and True respectively.
"""
result = []
_bool_to_bit_map = {
"xor": "^",
"or": "|",
"and": "&",
"not": "~"
}
_integer_map = {
"0": "false",
"1": "true"
}
for tok in tokens:
tokNum, tokVal = tok
tokVal = tokVal.lower()
if tokNum == tokenize.NAME and tokVal and tokVal in _bool_to_bit_map:
result.append((tokenize.OP, _bool_to_bit_map[tokVal]))
elif tokNum == tokenize.NUMBER and tokVal in _integer_map:
result.append((tokenize.NAME, _integer_map[tokVal]))
else:
result.append(tok)
return result
# This transformation is left for reference, and excluded from coverage reports:
def split_symbols_implicit_precedence(tokens, local_dict, global_dict): # pragma: no cover
"""Replace the sympy builtin split_symbols with a version respecting implicit multiplcation.
By replacing this we can better cope with expressions like 1/xyz being
equivalent to 1/(x*y*z) rather than (y*z)/x as is the default. However it
cannot address issues like 1/2x becoming (1/2)*x rather than 1/(2*x), because
Python's tokeniser does not respect whitespace and so cannot distinguish
between '1/2 x' and '1/2x'.
This transformation is unlikely to be used, but is provided as proof of concept.
"""
result = []
split = False
split_previous = False
for tok in tokens:
if split_previous:
# throw out closing parenthesis of Symbol that was split
split_previous = False
continue
split_previous = False
if tok[0] == tokenize.NAME and tok[1] == 'Symbol':
split = True
elif split and tok[0] == tokenize.NAME:
symbol = tok[1][1:-1]
if sympy_parser._token_splittable(symbol):
# If we're splitting this symbol, wrap it in brackets by adding
# them before the call to Symbol:
result = result[:-2] + [(tokenize.OP, '(')] + result[-2:]
for char in symbol:
if char in local_dict or char in global_dict:
# Get rid of the call to Symbol
del result[-2:]
result.extend([(tokenize.NAME, "{}".format(char)),
(tokenize.NAME, 'Symbol'), (tokenize.OP, '(')])
else:
result.extend([(tokenize.NAME, "'{}'".format(char)), (tokenize.OP, ')'),
(tokenize.NAME, 'Symbol'), (tokenize.OP, '(')])
# Delete the last two tokens: get rid of the extraneous
# Symbol( we just added
# Also, set split_previous=True so will skip
# the closing parenthesis of the original Symbol
del result[-2:]
split = False
split_previous = True
# Then close the extra brackets we added:
result.append((tokenize.OP, ')'))
continue
else:
split = False
result.append(tok)
return result
| isaacphysics/equality-checker | checker/parsing/utils.py | utils.py | py | 18,953 | python | en | code | 7 | github-code | 36 | [
{
"api_name": "unicodedata.name",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "re.match",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "ast.Lt",
"line_number": 91,
"usage_type": "attribute"
},
{
"api_name": "ast.LtE",
"line_number": 91... |
41076588706 | import os
import zipfile
def unzip(filename: str, extract_to: str) -> None:
"""
This method run unzip a file to specified path
:param filename: Path/filename of zip file
:param extract_to: Path with name of output folder
:return: None
"""
this_folder = os.path.dirname(os.path.abspath(__file__))
file = os.path.join(this_folder, filename)
with zipfile.ZipFile(file, "r") as zip_ref:
zip_ref.extractall(extract_to)
| samuelterra22/Analysis-of-antenna-coverage | src/main/python/support/extract_zip_file.py | extract_zip_file.py | py | 466 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "os.path.dirname",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line... |
15478667002 | import numpy as np
import glog
import pandas as pd
import os
import gzip
def export_roi_gene(file_path: str, roi: list):
"""
Args:
file_path: path of gene file
roi: [x0, y0, w, h]
Returns: None
"""
if file_path.endswith('.gz'):
f = gzip.open(file_path, 'rb')
else: f = open(file_path, 'rb')
glog.info('Start parse head info of file <{}>'.format(file_path))
header = ''
num_of_header_lines = 0
eoh = 0
for i, l in enumerate(f):
l = l.decode("utf-8") # read in as binary, decode first
if l.startswith('#'): # header lines always start with '#'
header += l
num_of_header_lines += 1
eoh = f.tell() # get end-of-header position
else: break
# find start of expression matrix
f.seek(eoh)
x0, y0, w, h = roi
typeColumn = {
"geneID": 'str',
"x": np.uint32,
"y": np.uint32,
"values": np.uint32,
"UMICount": np.uint32,
"MIDCount": np.uint32,
"MIDCounts": np.uint32
}
glog.info("Loading matrix data...")
df = pd.read_csv(file_path, header=num_of_header_lines, sep='\t', dtype=typeColumn)
if "UMICount" in df.columns: df = df.rename(columns={'UMICount':'MIDCount'})
if "MIDCounts" in df.columns: df = df.rename(columns={'MIDCounts':'MIDCount'})
df = df.drop(df[(df['x'] < x0) | (df['x'] > (x0 + w))].index)
df = df.drop(df[(df['y'] < y0) | (df['y'] > (y0 + h))].index)
df['x'] -= df['x'].min()
df['y'] -= df['y'].min()
output = os.path.join(os.path.dirname(file_path), 'SS2000.gem')
glog.info('Write ROI gene file to {}'.format(output))
# df.to_pickle(output, compression='gzip')
df.to_csv(output, sep='\t', index=False)
# with gzip.open(output.replace('.gem', '.gem.gz'), 'wb') as fd:
# fd.write(df)
def main():
file_path = r'D:\code\mine\github\SS200000213BR_C5.gem.gz'
roi = [12030, 16167, 5820, 4380]
export_roi_gene(file_path, roi)
if __name__ == '__main__': main()
| BGIResearch/StereoCell | scripts/utils.py | utils.py | py | 2,150 | python | en | code | 18 | github-code | 36 | [
{
"api_name": "gzip.open",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "glog.info",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.uint32",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "numpy.uint32",
"line_number... |
7661874071 | from django.template import Library
from hx_lti_initializer.models import LTIProfile
register = Library()
@register.filter_function
def list_of_possible_admins(already_in_course):
list_of_usernames_already_in_course = []
list_of_unique_names = []
result = []
for profile in LTIProfile.objects.all():
if profile.id in already_in_course:
list_of_usernames_already_in_course.append(profile.user.username)
if (
profile.user.username not in list_of_unique_names
and "preview:" not in profile.user.username
):
list_of_unique_names.append(profile.user.username)
for name in list_of_unique_names:
result.append((name in list_of_usernames_already_in_course, name))
return result
| lduarte1991/hxat | hx_lti_initializer/templatetags/possible_admins.py | possible_admins.py | py | 780 | python | en | code | 11 | github-code | 36 | [
{
"api_name": "django.template.Library",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "hx_lti_initializer.models.LTIProfile.objects.all",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "hx_lti_initializer.models.LTIProfile.objects",
"line_number": 13,
... |
15943063220 | import numpy as np
from scipy.special import factorial
ident = 'dick6D'
d = 6
title_plot = r'$f_6(u)=\left(\prod_{i=2}^6 u_i^{i-1} \right) \exp\left\{\prod_{i=1}^6 u_i\right\}$'
mat_folder = 'simuDick/d6'
true_val = np.exp(1.) - np.sum(1. / factorial(np.arange(6)))
orders = [1, 2, 4, 6, 8]
min_neval = 100
max_neval = 2 * 10**7
nks = 20
nreps = 50
def phi(u):
pu = 1.
for i in range(1, d):
pu = pu * u[:, i]**i
return pu * np.exp(np.prod(u, axis=-1))
| nchopin/cubic_strat | nonvanish_xp/dick6D.py | dick6D.py | py | 475 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.exp",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "scipy.special.factorial",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_num... |
1889257914 | '''
Created on 08.02.2016.
@author: Lazar
'''
from concepts.row import Row
from textx.exceptions import TextXSemanticError
class View(object):
basic_type_names = ['text', 'number', 'checkbox', 'link',
'email', 'password', 'menuitem', 'menu',
'button', 'radio', 'form', 'label', 'image',
'date', 'combobox', 'list', 'table',
'thumbnail', 'row', 'multilist']
def __init__(self, parent, name, views, object=None, query=None):
self.name = name
self.views = views
self.parent = parent
self.object = object
self.query = query
self.rows = []
self.path = {}
self.subviews = []
seen = set()
row = Row(self, 1)
last_row_number = 1
for view_selector in views:
if view_selector.__class__.__name__ == 'RowSeparator':
if view_selector.number < 0:
row_number = last_row_number
last_row_number += 1
else:
if view_selector.number and view_selector.number not in seen:
seen.add(view_selector.number)
else:
line, col = self._tx_metamodel.parser.pos_to_linecol(self._tx_position)
raise TextXSemanticError("ERROR: (at %d, %d) More than one row at position %d." %
(line, col, view_selector.number))
last_row_number = view_selector.number
row_number = view_selector.number
self.rows.append(row)
row = Row(self, row_number)
else:
row.selectors.append(view_selector)
if view_selector.__class__.__name__ == 'ViewInView':
self.subviews.append(view_selector.selector)
if row not in self.rows:
self.rows.append(row)
def __str__(self):
return self.name
def accept(self, visitor):
return visitor.visit_view(self)
| lazer-nikolic/GenAn | src/concepts/view.py | view.py | py | 2,115 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "concepts.row.Row",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "textx.exceptions.TextXSemanticError",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "concepts.row.Row",
"line_number": 46,
"usage_type": "call"
}
] |
4107020337 | import sys
from collections import deque
input = sys.stdin.readline
def check(a, b, size):
return 0 == psa[a + size - 1][b + size - 1] - psa[a - 1][b + size - 1] - psa[a + size - 1][b - 1] + psa[a - 1][b - 1]
moves = ((1, 0), (0, 1), (-1, 0), (0, -1))
n, m = [int(x) for x in input().split()]
psa = [[0] * (m + 1) for _ in range(n + 1)]
for i in range(1, n + 1):
line = input()[:-1]
for j in range(1, m + 1):
v = line[j - 1] == "X"
psa[i][j] = psa[i - 1][j] + psa[i][j - 1] - psa[i - 1][j - 1] + v
low, high = 1, 0
while high < min(n, m) and check(1, 1, high + 1):
high += 1
while low <= high:
size = (low + high) // 2
queue = deque([[1, 1]])
visited = [0] * ((n + 1) * (m + 1))
visited[m + 2] = 1
bottom, right = n - size + 1, m - size + 1
while queue:
a, b = queue.popleft()
if a == bottom and b == right:
low = size + 1
break
for x, y in moves:
x = x + a
y = y + b
if 1 <= x <= bottom and 1 <= y <= right and not visited[x * (m + 1) + y] and check(x, y, size):
visited[x * (m + 1) + y] = 1
queue.append([x, y])
else:
high = size - 1
print(high)
| AAZZAZRON/DMOJ-Solutions | aac2p3.py | aac2p3.py | py | 1,238 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "sys.stdin",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "collections.deque",
"line_number": 25,
"usage_type": "call"
}
] |
73360145384 | # django imports
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.auth.decorators import permission_required
from django.core.urlresolvers import reverse
from django.db import IntegrityError
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404
from django.template import RequestContext
from django.template.loader import render_to_string
# lfs imports
from lfs.catalog.models import Product
from lfs.catalog.models import ProductPropertyValue
from lfs.catalog.models import Property
from lfs.catalog.models import PropertyGroup
from lfs.core.signals import product_removed_property_group
@permission_required("manage_shop", login_url="/login/")
def manage_properties(request, product_id, template_name="manage/product/properties.html"):
"""
"""
product = get_object_or_404(Product, pk=product_id)
# Generate list of product property groups; used for enter value
product_property_groups = []
for property_group in product.property_groups.all():
properties = []
for property in property_group.properties.order_by("groupspropertiesrelation"):
# Try to get the value, if it already exists.
try:
ppv = ProductPropertyValue.objects.get(property = property, product=product)
except ProductPropertyValue.DoesNotExist:
value = ""
else:
value = ppv.value
# mark selected options "selected"
options = []
for option in property.options.all():
options.append({
"id" : option.id,
"name" : option.name,
"selected" : str(option.id) == value
})
properties.append({
"id" : property.id,
"name" : property.name,
"type" : property.type,
"options" : options,
"value" : value,
})
product_property_groups.append({
"id" : property_group.id,
"name" : property_group.name,
"properties" : properties,
})
# Generate list of all property groups; used for group selection
product_property_group_ids = [p["id"] for p in product_property_groups]
shop_property_groups = []
for property_group in PropertyGroup.objects.all():
shop_property_groups.append({
"id" : property_group.id,
"name" : property_group.name,
"selected" : property_group.id in product_property_group_ids,
})
return render_to_string(template_name, RequestContext(request, {
"product" : product,
"product_property_groups" : product_property_groups,
"shop_property_groups" : shop_property_groups,
}))
@permission_required("manage_shop", login_url="/login/")
def update_property_groups(request, product_id):
"""Updates property groups for the product with passed id.
"""
selected_group_ids = request.POST.getlist("selected-property-groups")
for property_group in PropertyGroup.objects.all():
# if the group is within selected groups we try to add it to the product
# otherwise we try do delete it
if str(property_group.id) in selected_group_ids:
try:
property_group.products.get(pk=product_id)
except ObjectDoesNotExist:
property_group.products.add(product_id)
else:
property_group.products.remove(product_id)
product = Product.objects.get(pk=product_id)
product_removed_property_group.send([property_group, product])
url = reverse("lfs_manage_product", kwargs={"product_id" : product_id})
return HttpResponseRedirect(url)
@permission_required("manage_shop", login_url="/login/")
def update_properties(request, product_id):
"""Updates properties for product with passed id.
"""
# Update properties' values
for key, value in request.POST.items():
if key.startswith("property") == False:
continue
property_id = key.split("-")[1]
property = get_object_or_404(Property, pk=property_id)
product = get_object_or_404(Product, pk=product_id)
try:
ppv = ProductPropertyValue.objects.get(product = product_id, property = property_id)
except ProductPropertyValue.DoesNotExist:
if not property.is_valid_value(value):
value = 0
ProductPropertyValue.objects.create(product=product, property = property, value=value)
else:
if not property.is_valid_value(value):
value = 0
ppv.value = value
ppv.save()
url = reverse("lfs_manage_product", kwargs={"product_id" : product_id})
return HttpResponseRedirect(url)
| django-lfs/lfs | manage/views/product/properties.py | properties.py | py | 5,016 | python | en | code | 23 | github-code | 36 | [
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "lfs.catalog.models.Product",
"line_number": 22,
"usage_type": "argument"
},
{
"api_name": "lfs.catalog.models.ProductPropertyValue.objects.get",
"line_number": 31,
... |
370424566 | from selenium import webdriver
from amazon_pages.home_page import HomePage
def test__amazon():
""" Go to book_page categorie, add first item to cart, change quantity to 2"""
driver = webdriver.Chrome()
home = HomePage(driver)
cart_page = home.accept_cookie()\
.open_all_book()\
.select_first_book_nouveautes()\
.add_to_cart().open_cart()\
.set_quantity(2)
assert cart_page.get_quantity() == "2"
| ClemiDouce/TP-pageobject | test_amazon.py | test_amazon.py | py | 450 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "amazon_pages.home_page.HomePage",
"line_number": 9,
"usage_type": "call"
}
] |
32879521112 | from matplotlib import pyplot as plt
def visualize(flight):
altitude = [t.position.y for t in flight]
velocity = [t.velocity.y for t in flight]
time = list(range(len(flight)))
plt.figure()
plt.subplot(211)
plt.xlabel("Time (secs)")
plt.ylabel("Altitude(m)")
plt.plot(time, altitude)
plt.subplot(212)
plt.xlabel("Time (secs)")
plt.ylabel("Velocity(m/s)")
plt.plot(time,velocity)
plt.show()
| mirman-school/hoc-rocketflight | visualizer.py | visualizer.py | py | 442 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "mat... |
6791731710 | """
Class for acting as a server inside of a simpy simulation. This server is nothing
more than a resource with some additional patches.
@author Tycho Atsma <tycho.atsma@gmail.com>
@file lib/Server.py
"""
# dependencies
from simpy import PreemptiveResource
from numpy.random import exponential, uniform
class Server(PreemptiveResource):
def __init__(self, *args, **kwargs):
"""
Constructor.
Parameters
----------
@see simpy.Resource
Keyworded arguments
-------------------
uuid: string
UUID as identifier for this server.
kind: string
Kind of the server (e.g. balance, regular, database).
Optiomal Keyworded arguments
memmax: integer:
Set scalar how many times the max capacity fits in memory
Default = 10
"""
# call the parent constructor
super().__init__(*args)
# reference to the environment and capacity
self._env = args[0]
# Set memory max capacity
if 'memmax' in kwargs:
self.memmax = kwargs['memmax']
else:
# Default is 10 times the capacity
self.memmax = 10
# Set latencyscaler capacity
if 'latencyscaler' in kwargs:
self.latencyscaler = kwargs['latencyscaler']
else:
# Default is 1 times the capacity
self.latencyscaler = 1
# setup the initial state of this server
self._state = {
'name': "%s#%s" % (kwargs['kind'], kwargs['uuid']),
'kind': kwargs['kind'],
'queue': len(self.queue),
'users': self.count,
'cpu': 0,
'memory': 0,
'latency': 0,
}
def environment(self):
"""
Getter to expose the environment.
Returns
-------
Environment
"""
return self._env
def get_capacity(self):
"""
Getter to expose the server capacity.
Returns
-------
int
"""
return self.capacity
def request(self, *args, **kwargs):
"""
Method override to request a context in which this resource can be accessed.
@see https://simpy.readthedocs.io/en/latest/api_reference/simpy.resources.html#simpy.resources.resource.Request
Keyworded parameters
----------
priority: int
See simpy.PreemptiveResource.request.
"""
# parse parameters for the super class method
priority = kwargs['priority'] if 'priority' in kwargs else 1
# call the parent class for the original method
return super().request(priority=priority)
def state(self):
"""
Method to expose the current state of a server.
Returns
-------
dict
"""
self._state.update(queue=len(self.queue),
users=self.count,
cpu=self.cpu(),
memory=self.memory(),
latency=self.latency()
)
return self._state
def latency(self):
"""
Method to expose the server latency.
Returns
-------
float
"""
# state = self.state()
# Temporalily update the amount of users by adding one to reflect the
# incoming transaction
# expose a random value based on an exponential distribution, scaled
# with the cpu usage
# return exponential(self.cpu())
latency = exponential(self.cpu()) * self.latencyscaler
return latency
def memory(self):
"""
Method to expose the server's memory usage.
Returns
-------
float
"""
# # get the current state
# state = self.state()
# expose the calculated memory usage based on the queue, users, and
# scaled capacity
return (self.count + len(self.queue)) / (self.capacity * self.memmax)
def cpu(self):
"""
Method to expose the server's cpu usage.
Optional parameters:
addition (defaul = 0), possibility to add user to keep values non-zero
Returns
-------
float
"""
# # get the current state
# state = self.state()
# expose the cpu load
return self.count / self.capacity
def faulty_patch(self, state):
# error function to increase the latency scaler tenfold when true
if state:
self.latencyscaler = 10
if not state:
self.latencyscaler = 1
| miloshdrago/discrete-event-simulation-ing | app/lib/Server.py | Server.py | py | 4,703 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "simpy.PreemptiveResource",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "numpy.random.exponential",
"line_number": 139,
"usage_type": "call"
}
] |
37005905790 | from pathlib import Path
import pytest
from helpers.regression import verify_output
RINOH_PATH = Path(__file__).parent / 'rinoh'
OUTPUT_PATH = Path(__file__).parent / 'output'
def test_version(script_runner):
ret = script_runner.run('rinoh', '--version')
assert ret.success
assert ret.stderr == ''
def collect_tests():
for rst_path in sorted(RINOH_PATH.glob('*.rst')):
yield rst_path.stem
@pytest.mark.parametrize('test_name', collect_tests())
def test_rinoh(script_runner, test_name):
rst_path = Path(test_name + '.rst')
args = []
templconf_path = rst_path.with_suffix('.rtt')
if (RINOH_PATH / templconf_path).exists():
args += ['--template', str(templconf_path)]
stylesheet_path = rst_path.with_suffix('.rts')
if (RINOH_PATH / stylesheet_path).exists():
args += ['--stylesheet', str(stylesheet_path)]
output_dir = OUTPUT_PATH / ('rinoh_' + test_name)
output_dir.mkdir(parents=True, exist_ok=True)
ret = script_runner.run('rinoh', *args, str(rst_path),
'--output', str(output_dir), cwd=RINOH_PATH)
assert ret.success
verify_output(test_name, output_dir, RINOH_PATH)
| Chris-Jr-Williams/rinohtype | tests_regression/test_rinoh.py | test_rinoh.py | py | 1,189 | python | en | code | null | github-code | 36 | [
{
"api_name": "pathlib.Path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "helpers.regression.verify_output... |
27662925156 | """"
MeGaNeKo 2022 - https://github.com/MeGaNeKoS/Discord-Bot-Template
Description:
This is a template to create your own discord bot in python.
Version: 1.0
"""
import logging
class STDERRLogger:
def __init__(self):
self.logger = logging.getLogger("STDERR")
formatter = logging.Formatter('%(levelname)s:%(name)s:%(message)s')
file_handler = logging.FileHandler('stderr.log')
file_handler.setFormatter(formatter)
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter)
self.logger.addHandler(file_handler)
self.logger.addHandler(stream_handler)
self.buf = []
def write(self, msg):
if msg.endswith('\n'):
self.buf.append(msg.removesuffix('\n'))
self.logger.error(''.join(self.buf))
self.buf = []
else:
self.buf.append(msg)
def flush(self):
pass
| MeGaNeKoS/Discord-Bot-Template | utils/sys_logger.py | sys_logger.py | py | 933 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "logging.Formatter",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "logging.FileHandler",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "logging.Strea... |
30502653408 | from collections import defaultdict
f = open('/Users/moishe/src/aoc-21/day-10/input/input.txt')
xlat_closer = {
'>': '<',
']': '[',
'}': '{',
')': '('
}
score_lookup = {
')': 3,
']': 57,
'}': 1197,
'>': 25137
}
counters = defaultdict(int)
score = 0
for l in f:
l = l.rstrip()
current_open = []
for (idx, c) in enumerate(l):
if c in ['(', '[', '<', '{']:
current_open.append(c)
else:
last_open = current_open.pop()
if last_open != xlat_closer[c]:
score += score_lookup[c]
print("corrupt closer: expected %c to be closed but found %c instead" % (last_open, c))
print(score) | Moishe/aoc-21 | day-10/part-1.py | part-1.py | py | 640 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "collections.defaultdict",
"line_number": 19,
"usage_type": "call"
}
] |
72178607465 |
import falcon
from math import sqrt
from wsgiref.simple_server import make_server
class Calc():
def on_get(self, req, resp):
qs = req.params
resp.body = "Waiting for input\n"
resp.status = falcon.HTTP_200
def on_post(self, req, resp):
chunk = req.stream.read(4096)
values = chunk.split(b'&')
a = int(values[0].split(b'=')[1])
b = int(values[1].split(b'=')[1])
c = int(values[2].split(b'=')[1])
resp.body = self.calc_x(a,b,c)
resp.status = falcon.HTTP_200
def calc_determinant(self,a, b, c):
return b*b - 4*a*c
def calc_x(self,a,b,c):
determinant = self.calc_determinant(a,b,c)
if determinant > 0:
x1 = (-b + sqrt(determinant)) / (2*a)
x2 = (-b - sqrt(determinant)) / (2*a)
return "Roots are real and different.\nx1 = {}\nx2 = {}".format(x1,x2)
elif determinant == 0:
x1 = (-b + sqrt(determinant)) / (2*a)
return "Roots are real and same.\nx1 = x2 = {}".format(x1)
else:
realPart = -b/(2*a)
imaginaryPart =sqrt(-determinant)/(2*a)
return "Roots are complex and different.\nx1 = {0}+{1}i\nx2 = {0}-{1}i\n".format(realPart,imaginaryPart)
api = falcon.API()
r = Calc()
api.add_route('/',r)
ser = make_server('', 8080, api)
ser.serve_forever()
| Lovelykira/Microservices | DBServer/db_server.py | db_server.py | py | 1,388 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "falcon.HTTP_200",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "falcon.HTTP_200",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "math.sqrt",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "math.sqrt",
"... |
32752187332 | from multiprocessing import Pool
from argparse import ArgumentParser
from tbselenium.tbdriver import TorBrowserDriver
from tbselenium.utils import launch_tbb_tor_with_stem
from tbselenium.common import STEM_SOCKS_PORT, USE_RUNNING_TOR,\
STEM_CONTROL_PORT
JOBS_IN_PARALLEL = 3
def run_in_parallel(inputs, worker, no_of_processes=JOBS_IN_PARALLEL):
p = Pool(no_of_processes)
p.map(worker, inputs)
def visit_check_tpo_with_stem(tbb_dir):
url = "https://check.torproject.org"
with TorBrowserDriver(tbb_dir,
socks_port=STEM_SOCKS_PORT,
control_port=STEM_CONTROL_PORT,
tor_cfg=USE_RUNNING_TOR) as driver:
driver.load_url(url, wait_on_page=3)
print(driver.find_element_by("h1.on").text)
print(driver.find_element_by(".content > p").text)
def launch_browsers_in_parallel(tbb_path):
tor_process = launch_tbb_tor_with_stem(tbb_path=tbb_path)
run_in_parallel(JOBS_IN_PARALLEL * [tbb_path],
visit_check_tpo_with_stem)
tor_process.kill()
def main():
desc = "Visit check.torproject.org website running 3 browsers in parallel"
parser = ArgumentParser(description=desc)
parser.add_argument('tbb_path')
args = parser.parse_args()
launch_browsers_in_parallel(args.tbb_path)
if __name__ == '__main__':
main()
| webfp/tor-browser-selenium | examples/parallel.py | parallel.py | py | 1,379 | python | en | code | 483 | github-code | 36 | [
{
"api_name": "multiprocessing.Pool",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "tbselenium.tbdriver.TorBrowserDriver",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "tbselenium.common.STEM_SOCKS_PORT",
"line_number": 19,
"usage_type": "name"
}... |
18913990683 | from collections import deque
def neighbors(position: tuple[int, int, int]) -> list[tuple[int, int, int]]:
x, y, step = position
return [
(x + 1, y, step + 1),
(x - 1, y, step + 1),
(x, y + 1, step + 1),
(x, y - 1, step + 1),
]
class Solution:
def nearestExit(self, maze: list[list[str]], entrance: list[int]) -> int:
assert maze
height = len(maze) - 1
width = len(maze[0]) - 1
def in_maze(x: int, y: int) -> bool:
return 0 <= x <= height and 0 <= y <= width
def is_exit(x: int, y: int) -> bool:
return x == 0 or y == 0 or x == height or y == width
seen = set()
positions: deque[tuple[int, int, int]] = deque()
start_x, start_y = entrance
seen.add((start_x, start_y))
positions.append((start_x, start_y, 0))
while positions:
position = positions.popleft()
for neighbor in neighbors(position):
x, y, step = neighbor
if (x, y) in seen or not in_maze(x, y):
continue
seen.add((x, y))
if maze[x][y] == ".":
if is_exit(x, y):
return step
positions.append(neighbor)
return -1
| lancelote/leetcode | src/nearest_exit_from_entrance_in_maze.py | nearest_exit_from_entrance_in_maze.py | py | 1,325 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "collections.deque",
"line_number": 28,
"usage_type": "name"
}
] |
35866822598 | import sys,os
import gmsh
import numpy as np
from mpi4py import MPI
import dolfinx
dir = os.path.dirname(__file__)
gmsh.initialize()
lc = 0.2
num_airfoil_refinement = 100
L = 4
H = 1
gdim = 2
"""
Defining the shape of the airfoil using Bézier Curves
Geometrical parameters of the airfoil according to Xiaoqiang et al
"""
c_1, c_2 = 0.5, 0.5 # Coefficients of camber-line-abscissa parameter equation
c_3, c_4 = 0.1, 0.05 # Coefficients of camber-line.ordinate parameter equation
coeff_thick = [0.2969,-0.126,-0.3516,0.2843,-0.1036] # Coefficients of the thickness equation taken from the NACA 2412 Airfoil
# Bézier curves with control parameter k \in [0,1]
x_c = lambda k: 3 * c_1 * k * (1 - k)**2 + 3 * c_2 * (1-k) * k**2 + k**3 # Camber-line-abscissa
y_c = lambda k: 3 * c_3 * k * (1 - k)**2 + 3 * c_4 * (1-k) * k**2 # Camber-line-abscissa
# Thickness equation for a 6% thick airfoil
thickness = lambda x: 0.06 / 0.1 * (coeff_thick[0] * np.sqrt(x) + coeff_thick[1] * x + coeff_thick[2] * x**2 + coeff_thick[3] * x**3 + coeff_thick[4] * x**4)
# Position of the airfoil in the computational domain defined by the coordinates of the leading edge
leading_edge_x = np.min([L/8,0.5])
leading_edge_y = H/2
# Upper and lower surface of the airfoil
x_u = x_l = lambda k: leading_edge_x + x_c(k)
y_u = lambda k: leading_edge_y + y_c(k) + thickness(x_c(k))
y_l = lambda k: leading_edge_y + y_c(k) - thickness(x_c(k))
# Calculate maximal thickness of the airfoil
thickness_max = np.max([thickness(x) for x in np.linspace(0,1,num_airfoil_refinement)])
"""
Meshing the airfoil using gmsh
"""
rectangle = gmsh.model.occ.addRectangle(0,0,0, L, H, tag=3)
# Define lower curve of the airfoil using the BSplines and given points
points_lower_curve=[]
for k in np.linspace(0,1,num_airfoil_refinement):
points_lower_curve.append(gmsh.model.occ.addPoint(x_l(k), y_l(k), 0.0, lc))
# Define upper curve of the airfoil using the BSplines and given points
points_upper_curve=[points_lower_curve[0]]
for k in np.linspace(0,1,num_airfoil_refinement)[1:-1]:
points_upper_curve.append(gmsh.model.occ.addPoint(x_u(k), y_u(k), 0.0, lc))
points_upper_curve.append(points_lower_curve[-1])
C1 = gmsh.model.occ.addBSpline(points_lower_curve, degree=3)
C2 = gmsh.model.occ.addBSpline(points_upper_curve, degree=3)
# Create airfoil and cut out of computational domain
W = gmsh.model.occ.addWire([C1,C2])
obstacle=gmsh.model.occ.addPlaneSurface([W])
# Remove points of the airfoil
for i in list(dict.fromkeys(points_lower_curve + points_upper_curve)):
gmsh.model.occ.remove([(0, i)])
# Cut out airfoil from computational domain
fluid = gmsh.model.occ.cut([(gdim, rectangle)], [(gdim, obstacle)])
gmsh.model.occ.synchronize()
# Create a distance field to the airfoil
distance_field = gmsh.model.mesh.field.add("Distance")
gmsh.model.mesh.field.setNumbers(distance_field, "CurvesList", [C1, C2])
gmsh.model.mesh.field.setNumbers(distance_field,"PointsList", [points_lower_curve[0],points_lower_curve[-1]])
gmsh.model.mesh.field.setNumber(distance_field,"Sampling", num_airfoil_refinement*2)
# Create refined mesh using a threshold field
refinement= gmsh.model.mesh.field.add("Threshold")
gmsh.model.mesh.field.setNumber(refinement, "IField", distance_field)
# Set the refinement levels (LcMin for the mesh size in the refined region, LcMax for the mesh size far from the refined region)
gmsh.model.mesh.field.setNumber(refinement, "LcMin", lc/5)
gmsh.model.mesh.field.setNumber(refinement, "LcMax", lc*2)
# Set the threshold value where which refinement should be applied
gmsh.model.mesh.field.setNumber(refinement, "DistMin", thickness_max/2)
gmsh.model.mesh.field.setNumber(refinement, "DistMax", thickness_max)
# Set the field as background mesh
gmsh.model.mesh.field.setAsBackgroundMesh(refinement)
# 8=Frontal-Delaunay for Quads
gmsh.option.setNumber("Mesh.Algorithm", 8)
# 2=simple full-quad
gmsh.option.setNumber("Mesh.RecombinationAlgorithm", 2)
# Apply recombination algorithm
gmsh.option.setNumber("Mesh.RecombineAll", 1)
# Mesh subdivision algorithm
gmsh.option.setNumber("Mesh.SubdivisionAlgorithm", 1)
# Mesh generation
gmsh.model.mesh.generate(gdim)
# Mesh order
gmsh.model.mesh.setOrder(1)
# Mesh optimisation
gmsh.model.mesh.optimize("Netgen")
"""
Defining boundary markers for the mesh
"""
fluid_marker, wall_marker, obstacle_marker = 1, 1, 2
wall, obstacle = [], []
surfaces = gmsh.model.getEntities(dim=gdim)
boundaries = gmsh.model.getBoundary(surfaces, oriented=False)
gmsh.model.addPhysicalGroup(surfaces[0][0], [surfaces[0][1]], fluid_marker)
gmsh.model.setPhysicalName(surfaces[0][0], fluid_marker, "Fluid")
for boundary in boundaries:
center_of_mass = gmsh.model.occ.getCenterOfMass(boundary[0], boundary[1])
if np.allclose(center_of_mass, [0, H/2, 0]):
wall.append(boundary[1])
elif np.allclose(center_of_mass, [L, H/2, 0]):
wall.append(boundary[1])
elif np.allclose(center_of_mass, [L/2, H, 0]):
wall.append(boundary[1])
elif np.allclose(center_of_mass, [L/2, 0, 0]):
wall.append(boundary[1])
else:
obstacle.append(boundary[1])
# Set physical markers for the boundaries
gmsh.model.addPhysicalGroup(gdim-1, wall, wall_marker)
gmsh.model.setPhysicalName(gdim-1, wall_marker, "wall")
gmsh.model.addPhysicalGroup(gdim-1, obstacle, obstacle_marker)
gmsh.model.setPhysicalName(gdim-1, obstacle_marker, "obstacle")
gmsh.model.occ.remove(gmsh.model.getEntities(dim=gdim-1))
gmsh.model.occ.remove(gmsh.model.getEntities(dim=gdim))
gmsh.model.occ.remove(gmsh.model.getEntities(dim=0))
gmsh.write(os.path.join(dir,"mesh.msh"))
gmsh.fltk.run()
# Import mesh in dolfinx
gmsh_model_rank = 0
mesh_comm = MPI.COMM_WORLD
mesh, cell_tags, facet_tags = dolfinx.io.gmshio.read_from_msh(os.path.join(dir,"mesh.msh"), mesh_comm, gmsh_model_rank, gdim=gdim)
with dolfinx.io.XDMFFile(MPI.COMM_WORLD, "mesh.xdmf","w") as mesh_file_xdmf:
mesh_file_xdmf.write_mesh(mesh)
mesh_file_xdmf.write_meshtags(cell_tags, mesh.geometry)
mesh_file_xdmf.write_meshtags(facet_tags, mesh.geometry) | niravshah241/MDFEniCSx | demo/3_airfoil_displacement/mesh_data/mesh.py | mesh.py | py | 6,178 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "os.path.dirname",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "gmsh.initialize",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_n... |
11473308179 | from typing import Optional, Union
import numpy as np
import pandas as pd
from lightfm import LightFM
from lightfm.evaluation import precision_at_k
from scipy import sparse as sps
from .base import BaseRecommender
class FMRecommender(BaseRecommender):
"""FM recommender based on `LightFM`.
Args:
interaction (pd.DataFrame): user-item interaction matrix.
items (Optional[pd.DataFrame]): item side information.
no_components (int): the dimensions of user/item embeddings.
item_alpha (float): L2 penalty on item features.
user_alpha (float): L2 penalty on user features.
"""
def __init__(
self,
interaction: pd.DataFrame,
items: Optional[pd.DataFrame],
test_size: Union[float, int] = 0.3,
random_split: bool = True,
no_components: int = 10,
item_alpha: float = 0,
user_alpha: float = 0,
loss: str = "bpr",
) -> None:
super().__init__(interaction, items, test_size, random_split)
self.fm = LightFM(
no_components=no_components,
item_alpha=item_alpha,
user_alpha=user_alpha,
loss=loss,
random_state=42,
)
def _fit(self) -> None:
self.fm.fit(self.train_mat, epochs=30)
def predict(
self,
user_ids: np.ndarray,
item_ids: np.ndarray,
user_features: Optional[sps.csr_matrix] = None,
item_features: Optional[sps.csr_matrix] = None,
) -> np.ndarray:
prediction: np.ndarray = self.fm.predict(
user_ids, item_ids, user_features, item_features
)
return prediction
def precision_at_top_k(self, top_k: int = 5) -> float:
precision: float = precision_at_k(self.fm, self.test_mat, k=top_k).mean()
return precision
| smartnews/rsdiv | src/rsdiv/recommenders/fm.py | fm.py | py | 1,837 | python | en | code | 7 | github-code | 36 | [
{
"api_name": "base.BaseRecommender",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "typing.Optional",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "pandas.Dat... |
8670554139 | import logging
from cterasdk import CTERAException
def unsuspend_filer_sync(self=None, device_name=None, tenant_name=None):
"""Unsuspend sync on a device"""
logging.info("Starting unsuspend sync task.")
try:
device = self.devices.device(device_name, tenant_name)
device.sync.unsuspend()
logging.info("Unsuspended sync on %s", device.name)
except Exception as e:
logging.warning(e)
logging.error("Error unsuspending sync")
| ctera/ctools | unsuspend_sync.py | unsuspend_sync.py | py | 480 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "logging.info",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "logging.warning",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "logging.error",
"line_nu... |
32860540539 | from django.http import HttpResponse
from django.shortcuts import render,redirect
from django.contrib.sites.shortcuts import get_current_site
from django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode
from django.utils.encoding import force_bytes, force_text
from django.template.loader import render_to_string
from .tokens import account_activation_token
from django.contrib.auth.models import User
from django.core.mail import EmailMessage
from django.contrib.auth import authenticate,login
from .forms import SignUpForm
# Create your views here.
def signup_view(request):
if request.method == 'POST':
f = SignUpForm(request.POST)
if f.is_valid():
user=f.save(commit=False)
user.is_active = False
user.save()
current_site = get_current_site(request)
mail_subject = 'Activate your blog account.'
message = render_to_string('accounts/acc_active_email.html', {
'user': user,
'domain': current_site.domain,
'uid': urlsafe_base64_encode(force_bytes(user.pk)).decode(),
'token': account_activation_token.make_token(user),
})
to_email = f.cleaned_data.get('email')
email = EmailMessage(
mail_subject, message, to=[to_email]
)
email.send()
return HttpResponse('Please confirm your email address to complete the registration')
"""username=f.cleaned_data.get('username')
raw_pswd=f.cleaned_data.get('password1')
email=f.cleaned_data.get('email')
user=authenticate(username=username, password=raw_pswd)
login(request,user)
print(f)
return render(request,'accounts/layout.html',{'user':username,'email':email})"""
#return redirect('list')
else:
f = SignUpForm()
return render(request, 'accounts/signup.html', {'form': f})
def activate(request, uidb64, token):
try:
uid = force_text(urlsafe_base64_decode(uidb64))
user = User.objects.get(pk=uid)
except(TypeError, ValueError, OverflowError, User.DoesNotExist):
user = None
if user is not None and account_activation_token.check_token(user, token):
user.is_active = True
user.save()
login(request, user)
# return redirect('home')
return HttpResponse('Thank you for your email confirmation. Now you can login your account.')
else:
return HttpResponse('Activation link is invalid!') | chandrika-gavireddy/Django-Email_Verfication_while_RegisteringUser | accounts/views.py | views.py | py | 2,581 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "forms.SignUpForm",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "django.contrib.sites.shortcuts.get_current_site",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "django.template.loader.render_to_string",
"line_number": 22,
"usage_type... |
35022463878 | # -*- coding: utf-8 -*-
"""
Authors: Ioanna Kandi & Konstantinos Mavrogiorgos
"""
# code
import numpy as np
import pandas as pd
import sklearn
import matplotlib.pyplot as plt
import seaborn as sns
import json
from flask import *
from flask_cors import CORS, cross_origin
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
# App config.
app = Flask(__name__, static_url_path='',
static_folder='templates',
template_folder='templates')
DEBUG = True
app.config.from_object(__name__)
app.config['SECRET_KEY'] = '7d441f27d441f27567d441f2b6176a'
CORS(app)
@app.route("/")
@cross_origin()
def home():
return render_template("index.html")
ratings = pd.read_csv("DATA_S2/q1-dataset/user_shows_new1.txt")
#ratings.head()
#ratings.insert(0, 'userId', range(1, 1 + len(ratings)))
#ratings.to_csv('user_shows_new.txt', index=False)
#transform the given dataset
shows = pd.read_csv("DATA_S2/q1-dataset/shows_new1.txt")
#shows.head()
#conversion of dataset columns to rows
new_ratings=pd.melt(ratings,id_vars=["userId"],
var_name="show",
value_name="Watched")
#userBasedApproach = []
#new_ratings.to_csv('user_shows_new1.txt', index=False)
n_ratings = len(ratings)
#n_shows = len(shows)
n_shows = len(ratings['Show'].unique())
n_users = len(ratings['userId'].unique())
#print(f"Number of ratings: {n_ratings}")
#print(f"Number of unique Show's: {n_shows}")
#print(f"Number of unique users: {n_users}")
#print(f"Average shows watched per user: {round(n_ratings/n_users, 2)}")
#print(f"Average watched status per show: {round(n_ratings/n_shows, 2)}")
# =============================================================================
# user_freq = ratings[['userId', 'Show']].groupby('userId').count().reset_index()
# user_freq.columns = ['userId', 'n_ratings']
# user_freq.head()
# =============================================================================
# =============================================================================
# # Find Lowest and Highest rated shows:
# mean_rating = ratings.groupby('Show')[['rating']].mean()
# # Lowest rated shows
# lowest_rated = mean_rating['rating'].idxmin()
# shows.loc[shows['Show'] == lowest_rated]
# # Highest rated shows
# highest_rated = mean_rating['rating'].idxmax()
# shows.loc[shows['Show'] == highest_rated]
# # show number of people who rated shows rated show highest
# ratings[ratings['Show']==highest_rated]
# # show number of people who rated shows rated show lowest
# ratings[ratings['Show']==lowest_rated]
#
# ## the above shows has very low dataset. We will use bayesian average
# show_stats = ratings.groupby('Show')[['rating']].agg(['count', 'mean'])
# show_stats.columns = show_stats.columns.droplevel()
# =============================================================================
# Now, we create user-item matrix using scipy csr matrix
from scipy.sparse import csr_matrix
def create_matrix(df):
N = len(df['userId'].unique())
M = len(df['Show'].unique())
# Map Ids to indices
user_mapper = dict(zip(np.unique(df["userId"]), list(range(N))))
show_mapper = dict(zip(np.unique(df["Show"]), list(range(M))))
# Map indices to IDs
user_inv_mapper = dict(zip(list(range(N)), np.unique(df["userId"])))
show_inv_mapper = dict(zip(list(range(M)), np.unique(df["Show"])))
user_index = [user_mapper[i] for i in df['userId']]
show_index = [show_mapper[i] for i in df['Show']]
X = csr_matrix((df["Watched"], (show_index, user_index)), shape=(M, N))
return X, user_mapper, show_mapper, user_inv_mapper, show_inv_mapper
X, user_mapper, show_mapper, user_inv_mapper, show_inv_mapper = create_matrix(ratings)
from sklearn.neighbors import NearestNeighbors
"""
Find similar shows using KNN
"""
def find_similar_shows(show_title, X, k, metric='cosine', show_distance=False):
neighbour_ids = []
show_ind = show_mapper[show_title]
show_vec = X[show_ind]
k+=1
kNN = NearestNeighbors(n_neighbors=k, algorithm="brute", metric=metric)
kNN.fit(X)
show_vec = show_vec.reshape(1,-1)
neighbour = kNN.kneighbors(show_vec, return_distance=show_distance)
for i in range(0,k):
n = neighbour.item(i)
neighbour_ids.append(show_inv_mapper[n])
neighbour_ids.pop(0)
return neighbour_ids
"""
Find similar users using KNN
"""
def find_similar_users(userId, X, k, metric='cosine', show_distance=False):
neighbour_ids = []
user_ind = user_mapper[userId]
user_vec = X[user_ind]
k+=1
kNN = NearestNeighbors(n_neighbors=k, algorithm="brute", metric=metric)
kNN.fit(X)
user_vec = user_vec.reshape(1,-1)
neighbour = kNN.kneighbors(user_vec, return_distance=show_distance)
for i in range(0,k):
n = neighbour.item(i)
neighbour_ids.append(user_inv_mapper[n])
neighbour_ids.pop(0)
return neighbour_ids
shows_titles = shows['Show']
show_title = 'The Situation Room with Wolf Blitzer'
userId = 1
#these are for testing
'''
similar_shows = find_similar_shows(show_title, X, k=10)
similar_users = find_similar_users(userId, X, k=3)
print(f"Since you watched {show_title}")
print(similar_shows)
print(similar_users)
watched_shows = ratings.loc[ratings['Watched'] == 1]
watched_shows = watched_shows.drop_duplicates('Show', keep='first')
for userId in similar_users:
userBasedApproach.append(watched_shows.iloc[userId]['Show'])
#convert list to JSON
userBasedApproachJson = json.dumps(userBasedApproach)
print(userBasedApproachJson)
'''
#this is the endpoint retrieving similar shows based on watched shows
@app.route("/getShows",methods=['GET', 'POST'])
@cross_origin()
def getShows():
if request.method == 'GET':
search_parameter = request.args.get('search_parameter')
search_method = request.args.get('search_method')
#check the search parameter to perform an item-based or user-based search
if search_method == "item":
similar_shows = find_similar_shows(search_parameter, X, k=10)
similar_shows_JSON = json.dumps(similar_shows)
return Response(similar_shows_JSON, status=200, mimetype="application/json")
elif search_method == "user":
#this is a list to store all the found shows
userBasedApproach = []
similar_users = find_similar_users(int(search_parameter), X, k=10)
watched_shows = ratings.loc[ratings['Watched'] == 1]
watched_shows = watched_shows.drop_duplicates('Show', keep='first')
for userId in similar_users:
userBasedApproach.append(watched_shows.iloc[userId]['Show'])
#convert list to JSON
similar_shows_JSON = json.dumps(userBasedApproach)
return Response(similar_shows_JSON, status=200, mimetype="application/json")
return Response('{"status":"error"}', status=500, mimetype="application/json")
if __name__ == "__main__":
app.run(debug=True, host='0.0.0.0', port=5000) | ioannakandi/recSys | main.py | main.py | py | 6,875 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "warnings.simplefilter",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "flask_cors.CORS",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "flask_cors.cross_origin",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "pandas.... |
24495985471 | from __future__ import print_function, unicode_literals
from ._util import compact_json_dumps, TERMINALS, NONTERMINALS
from ._util import follow_path, in_array, in_object, range, compat_kwargs
import copy
import bisect
import sys
import json
def diff(left_struc, right_struc,
array_align=True, compare_lengths=True, common_key_threshold=0.0,
verbose=True, key=None):
'''Compose a sequence of diff stanzas sufficient to convert the
structure ``left_struc`` into the structure ``right_struc``.
(Whether you can add ‘necessary and’ to ‘sufficient to’ depends on
the setting of the other parms, and how many cycles you want to
burn; see below).
Optional parameters:
``array_align``: Use :py:func:`needle_diff` to compute deltas
between arrays. Computationally expensive, but likely to
produce shorter diffs. If this parm is set to the string
``'udiff'``, :py:func:`needle_diff` will optimize for the
shortest udiff, instead of the shortest JSON-format diff.
Otherwise, set to any value that is true in a Boolean context
to enable.
``compare_lengths``: If ``[[key, right_struc]]`` can be encoded
as a shorter JSON-string, return it instead of examining the
internal structure of ``left_struc`` and ``right_struc``. It
involves calling :func:`json.dumps` twice for every node in
the structure, but may result in smaller diffs.
``common_key_threshold``: Skip recursion into ``left_struc``
and ``right_struc`` if the fraction of keys they have in
common (as computed by :func:`commonality`, which see) is less
than this parm (which should be a float between ``0.0`` and
``1.0``).
``verbose``: Print compression statistics will be to stderr.
The parameter ``key`` is present because this function is mutually
recursive with :py:func:`needle_diff` and :py:func:`keyset_diff`.
If set to a list, it will be prefixed to every keypath in the
output.
'''
if key is None:
key = []
options = {'common_key_threshold': common_key_threshold,
'array_align': array_align,
'compare_lengths': compare_lengths}
common = (0.0 if common_key_threshold == 0.0 else
commonality(left_struc, right_struc))
if (common < common_key_threshold
or not structure_comparable(left_struc, right_struc)):
my_diff = this_level_diff(left_struc, right_struc, key, common)
elif array_align:
my_diff = needle_diff(left_struc, right_struc, key, options)
else:
my_diff = keyset_diff(left_struc, right_struc, key, options)
if (compare_lengths and
(len(compact_json_dumps([[key[:], copy.copy(right_struc)]])) <
len(compact_json_dumps(my_diff)))):
my_diff = [[key[:], copy.copy(right_struc)]]
if key == []:
my_diff = sort_stanzas(my_diff)
if verbose:
msg = ('Size of delta {:.3f}% size of original '
'(original: {} chars, delta: {} chars)')
print(msg.format(*compute_diff_stats(right_struc, my_diff, True),
file=sys.stderr))
return my_diff
def compute_diff_stats(target, diff, percent=True):
'''Calculate the size of a minimal JSON dump of ``target`` and ``diff``,
and the ratio of the two sizes.
The ratio is expressed as a percentage if ``percent`` is ``True`` in
a Boolean context , or as a float otherwise.
Return value is a tuple of the form
``({ratio}, {size of target}, {size of diff})``
>>> compute_diff_stats([{}, 'foo', 'bar'], [], False)
(0.125, 16, 2)
>>> compute_diff_stats([{}, 'foo', 'bar'], [[0], {}])
(50.0, 16, 8)
'''
diff_size = float(len(compact_json_dumps(diff)))
target_size = len(compact_json_dumps(target))
fraction = diff_size / target_size
if percent:
fraction = fraction * 100
return fraction, target_size, int(diff_size)
def needle_diff(left_struc, right_struc, key, options={}):
'''Returns a diff between ``left_struc`` and ``right_struc``.
If ``left_struc`` and ``right_struc`` are both serializable as
arrays, this function will use a Needleman-Wunsch sequence
alignment to find a minimal diff between them. Otherwise, the
inputs are passed on to :func:`keyset_diff`.
This function probably shouldn’t be called directly. Instead, use
:func:`diff`, which is mutually recursive with this function and
:func:`keyset_diff` anyway.
'''
if type(left_struc) not in (list, tuple):
return keyset_diff(left_struc, right_struc, key, options)
assert type(right_struc) in (list, tuple)
down_col = 0
lastrow = [
[[key + [sub_i]] for sub_i in range(i)]
for i in range(len(left_struc), -1, -1)
]
def modify_cand():
'''Build the candidate diff that involves (potentially) modifying an
element.'''
if col_i + 1 < len(lastrow):
basis = lastrow[col_i+1]
mutand_key = key + [left_i]
return (basis +
diff(left_elem, right_elem, key=mutand_key, **options))
def delete_cand():
'''Build the candidate diff that involves deleting an element.'''
if row:
basis = row[0]
delend_key = key + [left_i]
return (basis + [[delend_key]])
def append_cand():
'''Build the candidate diff that involves appending an element.'''
if col_i == down_col:
basis = lastrow[col_i]
append_at_key = key + [append_key(lastrow[col_i], left_struc, key)]
return (basis + [[append_at_key, right_elem]])
def insert_cand():
'''Build the candidate diff that involves an insert-and-shift.'''
if col_i != down_col:
basis = lastrow[col_i]
# del_offset = len([s for s in basis if len(s) == 1])
insertion_key = key + [right_i]
return (basis + [[insertion_key, right_elem, 'i']])
def estimate_udiff_length(diff):
'''Estimate the length of a udiff based on ``diff``.'''
out = 0
for stanza in diff:
try:
key_matter = json.dumps(
follow_path(left_struc, stanza[0][len(key):]), indent=1
)
out += len(key_matter) + key_matter.count('\n')
except (KeyError, IndexError):
pass
if len(stanza) > 1:
assert 2 <= len(stanza) <= 3, stanza
repl_matter = json.dumps(stanza[1], indent=1)
out += len(repl_matter) + repl_matter.count('\n')
return out
for right_i, right_elem in enumerate(right_struc):
# first_left_i = min(right_i, len(left_struc) - 1)
# left_elems = left_struc[first_left_i:]
col_i = len(left_struc)
row = [insert_cand()]
for left_i, left_elem in enumerate(left_struc):
col_i = len(left_struc) - left_i - 1
cands = (c for c in (modify_cand(), delete_cand(),
append_cand(), insert_cand())
if c is not None)
if options['array_align'] == 'udiff':
winner = min(cands, key=estimate_udiff_length)
else:
winner = min(cands, key=lambda d: len(compact_json_dumps(d)))
row.insert(0, winner)
lastrow = row
return winner
def append_key(stanzas, left_struc, keypath=()):
'''Get the appropriate key for appending to the sequence ``left_struc``.
``stanzas`` should be a diff, some of whose stanzas may modify a
sequence ``left_struc`` that appears at path ``keypath``. If any of
the stanzas append to ``left_struc``, the return value is the
largest index in ``left_struc`` they address, plus one.
Otherwise, the return value is ``len(left_struc)`` (i.e. the index
that a value would have if it was appended to ``left_struc``).
>>> append_key([], [])
0
>>> append_key([[[2], 'Baz']], ['Foo', 'Bar'])
3
>>> append_key([[[2], 'Baz'], [['Quux', 0], 'Foo']], [], ['Quux'])
1
'''
keys = (s[0] for s in stanzas if s[0] == (list(keypath) + s[0][-1:]))
addition_key = len(left_struc)
for key in keys:
addition_key = max(addition_key, key[-1] + 1)
return addition_key
def compute_keysets(left_seq, right_seq):
'''Compare the keys of ``left_seq`` vs. ``right_seq``.
Determines which keys ``left_seq`` and ``right_seq`` have in
common, and which are unique to each of the structures. Arguments
should be instances of the same basic type, which must be a
non-terminal: i.e. :class:`list` or :class:`dict`. If they are
lists, the keys compared will be integer indices.
Returns:
Return value is a 3-tuple of sets ``({overlap}, {left_only},
{right_only})``. As their names suggest, ``overlap`` is a set
of keys ``left_seq`` have in common, ``left_only`` represents
keys only found in ``left_seq``, and ``right_only`` holds keys
only found in ``right_seq``.
Raises:
AssertionError if ``left_seq`` is not an instance of
``type(right_seq)``, or if they are not of a non-terminal
type.
>>> (compute_keysets({'foo': None}, {'bar': None})
... == (set([]), {'foo'}, {'bar'}))
True
>>> (compute_keysets({'foo': None, 'baz': None},
... {'bar': None, 'baz': None})
... == ({'baz'}, {'foo'}, {'bar'}))
True
>>> (compute_keysets(['foo', 'baz'], ['bar', 'baz'])
... == ({0, 1}, set([]), set([])))
True
>>> compute_keysets(['foo'], ['bar', 'baz']) == ({0}, set([]), {1})
True
>>> compute_keysets([], ['bar', 'baz']) == (set([]), set([]), {0, 1})
True
'''
assert isinstance(left_seq, type(right_seq)), (left_seq, right_seq)
assert type(left_seq) in NONTERMINALS, left_seq
if type(left_seq) is dict:
left_keyset = set(left_seq.keys())
right_keyset = set(right_seq.keys())
else:
left_keyset = set(range(len(left_seq)))
right_keyset = set(range(len(right_seq)))
overlap = left_keyset.intersection(right_keyset)
left_only = left_keyset - right_keyset
right_only = right_keyset - left_keyset
return (overlap, left_only, right_only)
def keyset_diff(left_struc, right_struc, key, options={}):
'''Return a diff between ``left_struc`` and ``right_struc``.
It is assumed that ``left_struc`` and ``right_struc`` are both
non-terminal types (serializable as arrays or objects). Sequences
are treated just like mappings by this function, so the diffs will
be correct but not necessarily minimal. For a minimal diff
between two sequences, use :func:`needle_diff`.
This function probably shouldn’t be called directly. Instead, use
:func:`diff`, which will call :func:`keyset_diff` if appropriate
anyway.
'''
out = []
(overlap, left_only, right_only) = compute_keysets(left_struc, right_struc)
out.extend([[key + [k]] for k in left_only])
out.extend([[key + [k], right_struc[k]] for k in right_only])
for k in overlap:
sub_key = key + [k]
out.extend(diff(left_struc[k], right_struc[k],
key=sub_key, **compat_kwargs(options)))
return out
def this_level_diff(left_struc, right_struc, key=None, common=None):
'''Return a sequence of diff stanzas between the structures
``left_struc`` and ``right_struc``, assuming that they are each at
the key-path ``key`` within the overall structure.
>>> (this_level_diff({'foo': 'bar', 'baz': 'quux'},
... {'foo': 'bar'})
... == [[['baz']]])
True
>>> (this_level_diff({'foo': 'bar', 'baz': 'quux'},
... {'foo': 'bar'}, ['quordle'])
... == [[['quordle', 'baz']]])
True
'''
out = []
if key is None:
key = []
if common is None:
common = commonality(left_struc, right_struc)
if common:
(overlap, left, right) = compute_keysets(left_struc, right_struc)
for okey in overlap:
if left_struc[okey] != right_struc[okey]:
out.append([key[:] + [okey], right_struc[okey]])
for okey in left:
out.append([key[:] + [okey]])
for okey in right:
out.append([key[:] + [okey], right_struc[okey]])
return out
elif left_struc != right_struc:
return [[key[:], right_struc]]
else:
return []
def structure_comparable(left_struc, right_struc):
'''Test if ``left_struc`` and ``right_struc`` can be efficiently diffed.'''
if type(left_struc) is not type(right_struc):
return False
if type(left_struc) in TERMINALS:
return False
if len(left_struc) == 0 or len(right_struc) == 0:
return False
return True
def commonality(left_struc, right_struc):
'''Return a float between ``0.0`` and ``1.0`` representing the amount
that the structures ``left_struc`` and ``right_struc`` have in
common.
Return value is computed as the fraction (elements in common) /
(total elements).
'''
if not structure_comparable(left_struc, right_struc):
return 0.0
if type(left_struc) is dict:
(overlap, left, right) = compute_keysets(left_struc, right_struc)
com = float(len(overlap))
tot = len(overlap.union(left, right))
else:
assert type(left_struc) in (list, tuple), left_struc
com = 0.0
for elem in left_struc:
if elem in right_struc:
com += 1
tot = max(len(left_struc), len(right_struc))
return com / tot
def split_diff(stanzas):
'''Split a diff into modifications, deletions and insertions.
Return value is a 4-tuple of lists: the first is a list of stanzas
from ``stanzas`` that modify JSON objects, the second is a list of
stanzas that add or change elements in JSON arrays, the third is a
list of stanzas which delete elements from arrays, and the fourth is
a list of stanzas which insert elements into arrays (stanzas ending
in ``"i"``).
'''
objs = [x for x in stanzas if in_object(x[0])]
seqs = [x for x in stanzas if in_array(x[0])]
assert len(objs) + len(seqs) == len(stanzas), stanzas
seqs.sort(key=len)
lengths = [len(x) for x in seqs]
mod_point = bisect.bisect_left(lengths, 2)
ins_point = bisect.bisect_left(lengths, 3)
return (objs, seqs[mod_point:ins_point],
seqs[:mod_point], seqs[ins_point:])
def sort_stanzas(stanzas):
'''Sort the stanzas in a diff.
Object changes can occur in any order, but deletions from arrays
have to happen last node first: ``['foo', 'bar', 'baz']`` →
``['foo', 'bar']`` → ``['foo']`` → ``[]``; additions to arrays
have to happen leftmost-node-first: ``[]`` → ``['foo']`` →
``['foo', 'bar']`` → ``['foo', 'bar', 'baz']``, and
insert-and-shift alterations to arrays must happen last: ``['foo',
'quux']`` → ``['foo', 'bar', 'quux']`` → ``['foo', 'bar', 'baz',
'quux']``.
Finally, stanzas are sorted in descending order of *length* of
keypath, so that the most deeply-nested structures are altered
before alterations which might change their keypaths take place.
Note that this will also sort changes to objects (dicts)
so that they occur first of all.
'''
if len(stanzas) <= 1:
return stanzas
# First we divide the stanzas using split_diff():
(objs, mods, dels, ins) = split_diff(stanzas)
# Then we sort modifications of lists in ascending order of keypath
# (note that we can’t tell appends from mods on the info available):
mods.sort(key=lambda x: x[0])
# Deletions from lists in descending order of keypath:
dels.sort(key=lambda x: x[0], reverse=True)
# And insert-and-shifts in ascending order of keypath:
ins.sort(key=lambda x: x[0])
# Finally, we sort by length of keypath:
stanzas = (objs + mods + dels + ins)
stanzas.sort(key=lambda s: len(s[0]), reverse=True)
return stanzas
| opensvc/igw_envoy | src/json_delta/_diff.py | _diff.py | py | 16,258 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "_util.compact_json_dumps",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "copy.copy",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "_util.compact_json_dumps",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "copy.copy... |
39095371429 |
# coding: utf-8
# In[1]:
#define a listener which listens to tweets in real time
import tweepy
# to install tweepy, use: pip install tweepy
# import twitter authentication module
from tweepy import OAuthHandler
# import tweepy steam module
from tweepy import Stream
# import stream listener
from tweepy.streaming import StreamListener
# import the python package to handle datetime
import datetime
# set your keys to access tweets
# you can find your keys in Twitter.
consumer_key = ''
consumer_secret = ''
access_token = ''
access_secret = ''
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_secret)
# Customize a tweet event listener
# inherited from StreamListener provided by tweepy
# This listener reacts when a tweet arrives or an error happens
class MyListener(StreamListener):
# constructor
def __init__(self, output_file, time_limit):
# attribute to get listener start time
self.start_time=datetime.datetime.now()
# attribute to set time limit for listening
self.time_limit=time_limit
# attribute to set the output file
self.output_file=output_file
# initiate superclass's constructor
StreamListener.__init__(self)
# on_data is invoked when a tweet comes in
# overwrite this method inheritted from superclass
# when a tweet comes in, the tweet is passed as "data"
def on_data(self, data):
# get running time
running_time=datetime.datetime.now()-self.start_time
print(running_time)
# check if running time is over time_limit
if running_time.seconds/60.0<self.time_limit:
# ***Exception handling***
# If an error is encountered,
# a try block code execution is stopped and transferred
# down to the except block.
# If there is no error, "except" block is ignored
try:
# open file in "append" mode
with open(self.output_file, 'a') as f:
# Write tweet string (in JSON format) into a file
f.write(data)
# continue listening
return True
# if an error is encountered
# print out the error message and continue listening
except BaseException as e:
print("Error on_data:" , str(e))
# if return "True", the listener continues
return True
else: # timeout, return False to stop the listener
print("time out")
return False
# on_error is invoked if there is anything wrong with the listener
# error status is passed to this method
def on_error(self, status):
print(status)
# continue listening by "return True"
return True
# In[ ]:
# Collect tweets with specific topics within 2 minute
# initiate an instance of MyListener
tweet_listener=MyListener(output_file="srksalman.txt",time_limit=10)
# start a staeam instance using authentication and the listener
twitter_stream = Stream(auth, tweet_listener)
# filtering tweets by topics
twitter_stream.filter(track=['#SlapAFilm', '#ISurviveTwitterBy','Kylie Jenner'])
# In[ ]:
tweet_listener=MyListener(output_file="newsrksalman.txt",time_limit=10)
twitter_stream = Stream(auth, tweet_listener)
twitter_stream.filter(track=['#SlapAFilm', '#ISurviveTwitterBy','Kylie Jenner'])
#twitter_stream.sample()
# In[14]:
#Read/write JSON
import json
tweets=[]
with open('newsrksalman.txt', 'r') as f:
# each line is one tweet string in JSON format
for line in f:
# load a string in JSON format as Python dictionary
tweet = json.loads(line)
tweets.append(tweet)
# write the whole list back to JSON
json.dump(tweets, open("all_tweets.json",'w'))
# to load the whole list
# pay attention to json.load and json.loads
tweets=json.load(open("all_tweets.json",'r'))
# In[42]:
# A tweet is a dictionary
# Some values are dictionaries too!
# for details, check https://dev.twitter.com/overview/api/tweets
print("# of tweets:", len(tweets))
first_tweet=tweets[0]
print("\nprint out first tweet nicely:")
print(json.dumps(first_tweet, indent=4))
print (tweets[0]["text"])
# In[51]:
print(len(tweets))
text = ""
for i in range(0,400):
text = text + tweets[i]["text"]
print (text)
# In[55]:
noUnicode = text.encode('utf8')
print(type(noUnicode))
print(noUnicode)
# In[71]:
text = text.replace(",","").lower()
a = text.split(" ")
print(a)
# In[72]:
for i,t in enumerate(a):
a[i]=a[i].encode('utf8')
print(type(a[i]))
# In[73]:
count_per_topic={}
for word in a:
if word in count_per_topic:
count_per_topic[word]+=1
else:
count_per_topic[word]=1
print(count_per_topic)
# In[74]:
sorted_topics = sorted(count_per_topic.items(),key=lambda item:-item[1])
print(sorted_topics)
# In[75]:
top_50_topics=sorted_topics[0:50]
print(top_50_topics)
# In[76]:
topics,count = zip(*top_50_topics)
print(topics,count)
# In[77]:
import pandas as pd
import brunel
df = pd.DataFrame(top_50_topics,columns=["topic","count"])
get_ipython().magic(u"brunel data('df') label(topic) size(count) color(topic) bubble sort(count)tooltip(count)")
# In[ ]:
| vigneshsriram/Python-Tutorials | WebScrapping2/WebScraping2.py | WebScraping2.py | py | 5,546 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "tweepy.OAuthHandler",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "tweepy.streaming.StreamListener",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 44,
"usage_type": "call"
},
{
"api_nam... |
1386058348 | import random
from typing import Set
import numpy as np
def random_rotate(x: np.ndarray,
rotation_directions: Set[int],
mirror_directions: Set[int] = None,
mirror_first=True) -> np.ndarray:
if mirror_directions is None:
mirror_directions = rotation_directions
rotation_directions = list(rotation_directions)
mirror_directions = list(mirror_directions)
def _random_rotate(x_local: np.ndarray) -> np.ndarray:
original_directions = rotation_directions.copy()
random.shuffle(rotation_directions)
for rotate_from, rotate_to in zip(original_directions, rotation_directions):
if rotate_from == rotate_to:
continue
x_local = np.rot90(x_local, k=1, axes=(rotate_from, rotate_to))
return x_local
if mirror_first:
for mirror_direction in mirror_directions:
if random.random() < 0.5:
x = np.flip(x, axis=mirror_direction)
x = _random_rotate(x)
else:
x = _random_rotate(x)
for mirror_direction in mirror_directions:
if random.random() < 0.5:
x = np.flip(x, axis=mirror_direction)
return x
| veeramallirajesh/CT-Vertebrae-Detection | load_data/random_rotate.py | random_rotate.py | py | 1,232 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.ndarray",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "typing.Set",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "typing.Set",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "numpy.ndarray",
"line_numbe... |
1734227672 | # Mongodb 测试
from pymongo import MongoClient
# 连接 mongodb,得到一个客户端对象
client = MongoClient('mongodb://localhost:27017')
# 获取名为 scrapy_db 的数据库对象
db = client.scrapy_db
# 获取名为 person 的集合对象
collection = db.person
doc = {
'name':'刘硕',
'age':34,
'sex':'M'
}
# 将文件插入集合
collection.insert_one(doc)
# 关闭客户端
client.close() | zkzhang1986/-Scrapy- | practise/scrapyMongodbTest.py | scrapyMongodbTest.py | py | 443 | python | zh | code | 11 | github-code | 36 | [
{
"api_name": "pymongo.MongoClient",
"line_number": 5,
"usage_type": "call"
}
] |
9037012840 | import os
import math
import json
import asyncio
from operator import itemgetter
import aiohttp
from aiohttp import ClientConnectorError, ServerTimeoutError, TooManyRedirects
from aiolimiter import AsyncLimiter
from fastapi import FastAPI, Path
app = FastAPI()
# allow for 10 concurrent entries within a 2 second window
rate_limit = AsyncLimiter(10, 2)
default_currency = os.getenv('DEFAULT_CURRENCY','USD')
def parse_data(datum):
return {
"symbol": datum.get("CoinInfo", {}).get("Name"),
"CC_Price": datum.get("RAW", {}).get("USD", {}).get("PRICE")
}
async def get_numbered_ranking_page(limit:int = 100, page:int = 0):
async with rate_limit:
response = await call_cryptocompare(limit, page)
if response['Data']:
parsed_data = [parse_data(datum) for datum in response['Data']]
return (page, parsed_data)
else:
print(f"nothing found at page {page}: {response}")
return (page, [])
async def call_cryptocompare(limit:int = 100, page:int = 0):
url = 'https://min-api.cryptocompare.com/data/top/totalvolfull'
parameters = {
'limit': str(limit),
'page': str(page),
'tsym': default_currency
}
headers = {
'Accepts': 'application/json',
'X-CMC_PRO_API_KEY': os.getenv('CRYPTOCOMPARE_KEY'),
}
async with aiohttp.ClientSession() as session:
session.headers.update(headers)
async with session.get(url, params=parameters) as response:
if response.status == 200:
return json.loads(await response.text())
else:
print(response.status, await response.text())
return {}
@app.get("/")
async def get_rankings(limit: int = 1000, single_page_limit: int = 100):
if limit <= single_page_limit:
# if pagination is not needed
async_results = await get_numbered_ranking_page(limit)
async_results = [async_results]
else:
# request pages async
rank_tasks = []
semaphore = asyncio.Semaphore(20)
for page in range(math.ceil(limit/single_page_limit)):
rank_tasks.append(
asyncio.create_task(get_numbered_ranking_page(single_page_limit, page))
)
async_results = await asyncio.gather(*rank_tasks)
# rank and sort results based on page and order
rank_data = []
for page_results in async_results:
page, page_rankings = page_results
for rank, coin in enumerate(page_rankings):
coin['rank'] = page*single_page_limit + (rank+1)
rank_data.append(coin)
return sorted(rank_data, key=itemgetter('rank'))
| treybrooks/TopCryptosAPI | ranking/app/main.py | main.py | py | 2,688 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "fastapi.FastAPI",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "aiolimiter.AsyncLimiter",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"li... |
29284640142 | import boto3
import json
def create_aws_resource(name, region, KEY, SECRET):
"""
Creates an AWS resource, e.g., ec2, s3.
:param str name - the name of the resource
:param str region - the name of the AWS region that will contain the resource.
:param str KEY - the aws access key
:param str SECRET - the aws access secret key
:return - the AWS resource
:raises UnknownServiceError - if the name is an invalid AWS resource name
"""
try:
resource = boto3.resource(
name,
region_name=region,
aws_access_key_id=KEY,
aws_secret_access_key=SECRET
)
except Exception as e:
print("Error: Issues creating the aws resource")
raise(e)
return resource
def create_aws_client(name, region, KEY, SECRET):
"""
Creates an AWS client, e.g., ima, redshift.
:param str name - the name of the client
:param str region - the name of the AWS region that will contain the client.
:param str KEY - the aws access key
:param str SECRET - the aws access secret key
:return - the AWS client
"""
try:
client = boto3.client(
name,
region_name=region,
aws_access_key_id=KEY,
aws_secret_access_key=SECRET
)
except Exception as e:
print("Error: Issues creating the aws client")
raise(e)
return client
def create_iam_role(iam, DWH_IAM_ROLE_NAME):
"""
Create an IAM Role that makes Redshift able to access S3 bucket (ReadOnly)
:param iam Any: the iam client
:param DWH_IAM_ROLE_NAME Any: the name for the iam role
:return - the aws iam role
"""
#1.1 Create the role,
try:
print("1.1 Creating a new IAM Role")
dwhRole = iam.create_role(
Path='/',
RoleName=DWH_IAM_ROLE_NAME,
Description = "Allows Redshift clusters to call AWS services on your behalf.",
AssumeRolePolicyDocument=json.dumps(
{'Statement': [{'Action': 'sts:AssumeRole',
'Effect': 'Allow',
'Principal': {'Service': 'redshift.amazonaws.com'}}],
'Version': '2012-10-17'})
)
except Exception as e:
print("Error: Issues creating iam role")
print(e)
print("1.2 Attaching Policy")
iam.attach_role_policy(
RoleName=DWH_IAM_ROLE_NAME,
PolicyArn="arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess"
)['ResponseMetadata']['HTTPStatusCode']
print("1.3 Get the IAM role ARN")
roleArn = iam.get_role(RoleName=DWH_IAM_ROLE_NAME)['Role']['Arn']
print(roleArn)
return roleArn
| Hyacinth-Ali/data-warehouse-S3-to-Redshift-ETL | provision_resource_helper.py | provision_resource_helper.py | py | 2,716 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "boto3.resource",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "boto3.client",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 69,
"usage_type": "call"
}
] |
70619296105 | import gym
import numpy as np
import random
#from gym_minigrid.wrappers import *
#import gym_minigrid
#from gym_minigrid import Window
import matplotlib
#env = gym.make('MiniGrid-Empty-6x6-v0',render_mode='human')
env = gym.make('MiniGrid-Empty-8x8-v0')
#env = gym.make('MiniGrid-Empty-8x8-v0')
#window = Window("minigrid")
env.reset()
#x =env.agent_pos
#y= env.agent_pos
#print(x,y)
steps = []
rew = []
ep = []
Q = {(env.agent_pos[0],env.agent_pos[1]):{dire:{a:0 for a in range(3)}for dire in range (4) }}
policy= {(env.agent_pos[0],env.agent_pos[1]):{dire:0 for dire in range (4)}}
alpha = 0.4
epsilon =1
decay_rate = 1.1
total_episodes = 150
actions = [0,1,2]
gamma=0.9
for episodes in range (total_episodes):
env.reset()
#env.render()
# state1 = (env.agent_pos[0],env.agent_pos[1])
#d1 = env.agent_dir
print(episodes+1)
ep.append(episodes+1)
epsilon = epsilon/decay_rate
x1,y1 = env.agent_pos
state1 =x1,y1
d1 = env.agent_dir
if Q.get(state1) is None:
Q[state1]={dire:{a:0 for a in range (3)}for dire in range(4)}
if policy.get(state1) is None:
policy[state1]={dire:0 for dire in range (4)}
#if(np.random.uniform(0,1)<epsilon):
# a1 = random.choice(actions)
#else:
# a1 = policy[state1][d1]
R =0
S = 0
done = False
while (not done):
if(np.random.uniform(0,1)<epsilon):
a1 = random.choice(actions)
policy[state1][d1]=a1
else:
a1 = policy[state1][d1]
obs,reward,done,info,_=env.step(policy[state1][d1])
x2,y2 = env.agent_pos
state2 = x2,y2
d2=env.agent_dir
R = R+reward
S = S+1
if Q.get(state2) is None:
Q[state2]={dire:{a:0 for a in range(3)}for dire in range(4)}
if policy.get(state2) is None:
policy[state2]={dire:0 for dire in range (4)}
a2=0
if np.random.uniform(0,1)<epsilon:
a2 = random.choice(actions)
policy[state2][d2]=a2
else:
a2 = policy[state2][d2]
#print(policy,d2,state2)
Q[state1][d1][a1]= Q[state1][d1][a1]+ (alpha*(reward+ (gamma*Q[state2][d2][a2])-Q[state1][d1][a1]))
state1 =state2
d1=d2
a1=a2
rew.append(R)
steps.append(S)
#img = env.get_frame()
# window.show_img(img)
#env.close()
print(" reward list : ")
print(rew)
print(" steps ")
print( steps)
#print(" episode no. ")
#print(ep) | VaibhavMishra02001/Implementation-of-RL-algorithms | sarsa1.py | sarsa1.py | py | 2,633 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "gym.make",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.random.uniform",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "random.choice",
"l... |
25947248538 | # idea: do dfs, put current coordinate and node,
# but put left node first and pop most left element firts from stack
# than add coordinate and node to dict
# than sort dict and return values
from collections import defaultdict
from typing import List, Optional
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def verticalOrder(self, root: Optional[TreeNode]) -> List[List[int]]:
stack = [(root, 0)]
data = defaultdict(list)
while stack:
node, x = stack.pop(0)
data[x].append(node.val)
if node.left:
stack.append([node.left, x - 1])
if node.right:
stack.append([node.right, x + 1])
return [data[i] for i in sorted(data)]
| dzaytsev91/leetcode-algorithms | medium/314_binary_tree_vertical_order_traversal.py | 314_binary_tree_vertical_order_traversal.py | py | 857 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "typing.Optional",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "collections.defaultdict",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 19,
"usage_type": "name"
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.