blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8c98d2aa767bafa8ef8cf224690564287b4da0e3 | c6abd5db21dbe5799754c8fd501e145da1d6e74f | /utils/utils_image.py | 896aed2ff991f0f7d64e0020ee3a2fb46092c92f | [] | no_license | tomtirer/IDBP-python | 1beb7dcdda7afa8b64c3f1c4e8b88ab554e732da | 471062551264d2504acb1c489d8de303169c1814 | refs/heads/main | 2023-06-10T04:20:05.606928 | 2021-07-04T21:26:18 | 2021-07-04T21:26:18 | 382,945,112 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 26,049 | py | import os
import math
import random
import numpy as np
import torch
import cv2
from torchvision.utils import make_grid
from datetime import datetime
# import torchvision.transforms as transforms
import matplotlib.pyplot as plt
import os
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
'''
modified by Kai Zhang (github: https://github.com/cszn)
03/03/2019
https://github.com/twhui/SRGAN-pyTorch
https://github.com/xinntao/BasicSR
'''
IMG_EXTENSIONS = ['.jpg', '.JPG', '.jpeg', '.JPEG', '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP', '.tif']
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
def get_timestamp():
return datetime.now().strftime('%y%m%d-%H%M%S')
def imshow(x, title=None, cbar=False, figsize=None):
plt.figure(figsize=figsize)
plt.imshow(np.squeeze(x), interpolation='nearest', cmap='gray')
if title:
plt.title(title)
if cbar:
plt.colorbar()
plt.show()
def surf(Z):
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = Axes3D(fig)
X = np.arange(0, 25, 1)
Y = np.arange(0, 25, 1)
ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap='rainbow')
# ax3.contour(X, Y, Z, zdim='z', offset=-2, cmap='rainbow)
# ax.view_init(elev=45, azim=45)
# ax.set_xlabel("x")
# plt.title(" ")
plt.tight_layout(0.9)
plt.show()
'''
# =======================================
# get image pathes of files
# =======================================
'''
def get_image_paths(dataroot):
paths = None # return None if dataroot is None
if dataroot is not None:
paths = sorted(_get_paths_from_images(dataroot))
return paths
def _get_paths_from_images(path):
assert os.path.isdir(path), '{:s} is not a valid directory'.format(path)
images = []
for dirpath, _, fnames in sorted(os.walk(path)):
for fname in sorted(fnames):
if is_image_file(fname):
img_path = os.path.join(dirpath, fname)
images.append(img_path)
assert images, '{:s} has no valid image file'.format(path)
return images
'''
# =======================================
# makedir
# =======================================
'''
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
def mkdirs(paths):
if isinstance(paths, str):
mkdir(paths)
else:
for path in paths:
mkdir(path)
def mkdir_and_rename(path):
if os.path.exists(path):
new_name = path + '_archived_' + get_timestamp()
print('Path already exists. Rename it to [{:s}]'.format(new_name))
os.rename(path, new_name)
os.makedirs(path)
'''
# =======================================
# read image from path
# Note: opencv is fast
# but read BGR numpy image
# =======================================
'''
def todevice(x_list, device=torch.device('cuda' if torch.cuda.is_available() else 'cpu')):
return [img.to(device) for img in x_list]
# ----------------------------------------
# get single image of size HxWxn_channles (BGR)
# ----------------------------------------
def read_img(path):
# read image by cv2
# return: Numpy float32, HWC, BGR, [0,1]
img = cv2.imread(path, cv2.IMREAD_UNCHANGED) # cv2.IMREAD_GRAYSCALE
img = img.astype(np.float32) / 255.
if img.ndim == 2:
img = np.expand_dims(img, axis=2)
# some images have 4 channels
if img.shape[2] > 3:
img = img[:, :, :3]
return img
# ----------------------------------------
# get uint8 image of size HxWxn_channles (RGB)
# ----------------------------------------
def imread_uint(path, n_channels=3):
# input: path
# output: HxWx3(RGB or GGG), or HxWx1 (G)
if n_channels == 1:
img = cv2.imread(path, 0) # cv2.IMREAD_GRAYSCALE
img = np.expand_dims(img, axis=2) # HxWx1
elif n_channels == 3:
img = cv2.imread(path, cv2.IMREAD_UNCHANGED) # BGR or G
if img.ndim == 2:
img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB) # GGG
else:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # RGB
return img
def imsave(img, img_path):
img = np.squeeze(img)
if img.ndim == 3:
img = img[:, :, [2, 1, 0]]
cv2.imwrite(img_path, img)
'''
# =======================================
# numpy(single) <---> numpy(unit)
# numpy(single) <---> tensor
# numpy(unit) <---> tensor
# =======================================
'''
# --------------------------------
# numpy(single) <---> numpy(unit)
# --------------------------------
def uint2single(img):
return np.float32(img/255.)
def single2uint(img):
return np.uint8((img.clip(0, 1)*255.).round())
def uint162single(img):
return np.float32(img/65535.)
def single2uint16(img):
return np.uint8((img.clip(0, 1)*65535.).round())
# --------------------------------
# numpy(unit) <---> tensor
# uint (HxWxn_channels (RGB) or G)
# --------------------------------
# convert uint (HxWxn_channels) to 4-dimensional torch tensor
def uint2tensor4(img):
if img.ndim == 2:
img = np.expand_dims(img, axis=2)
return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float().div(255.).unsqueeze(0)
# convert uint (HxWxn_channels) to 3-dimensional torch tensor
def uint2tensor3(img):
if img.ndim == 2:
img = np.expand_dims(img, axis=2)
return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float().div(255.)
# convert torch tensor to uint
def tensor2uint(img):
img = img.data.squeeze().float().clamp_(0, 1).cpu().numpy()
if img.ndim == 3:
img = np.transpose(img, (1, 2, 0))
return np.uint8((img*255.0).round())
# --------------------------------
# numpy(single) <---> tensor
# single (HxWxn_channels (RGB) or G)
# --------------------------------
# convert single (HxWxn_channels) to 4-dimensional torch tensor
def single2tensor4(img):
return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float().unsqueeze(0)
def single2tensor5(img):
return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1, 3).float().unsqueeze(0)
def single32tensor5(img):
return torch.from_numpy(np.ascontiguousarray(img)).float().unsqueeze(0).unsqueeze(0)
def single42tensor4(img):
return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1, 3).float()
# convert single (HxWxn_channels) to 3-dimensional torch tensor
def single2tensor3(img):
return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float()
# convert single (HxWx1, HxW) to 2-dimensional torch tensor
def single2tensor2(img):
return torch.from_numpy(np.ascontiguousarray(img)).squeeze().float()
# convert torch tensor to single
def tensor2single(img):
img = img.data.squeeze().float().clamp_(0, 1).cpu().numpy()
if img.ndim == 3:
img = np.transpose(img, (1, 2, 0))
return img
def tensor2single3(img):
img = img.data.squeeze().float().clamp_(0, 1).cpu().numpy()
if img.ndim == 3:
img = np.transpose(img, (1, 2, 0))
elif img.ndim == 2:
img = np.expand_dims(img, axis=2)
return img
# from skimage.io import imread, imsave
def tensor2img(tensor, out_type=np.uint8, min_max=(0, 1)):
'''
Converts a torch Tensor into an image Numpy array of BGR channel order
Input: 4D(B,(3/1),H,W), 3D(C,H,W), or 2D(H,W), any range, RGB channel order
Output: 3D(H,W,C) or 2D(H,W), [0,255], np.uint8 (default)
'''
tensor = tensor.squeeze().float().cpu().clamp_(*min_max) # squeeze first, then clamp
tensor = (tensor - min_max[0]) / (min_max[1] - min_max[0]) # to range [0,1]
n_dim = tensor.dim()
if n_dim == 4:
n_img = len(tensor)
img_np = make_grid(tensor, nrow=int(math.sqrt(n_img)), normalize=False).numpy()
img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0)) # HWC, BGR
elif n_dim == 3:
img_np = tensor.numpy()
img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0)) # HWC, BGR
elif n_dim == 2:
img_np = tensor.numpy()
else:
raise TypeError(
'Only support 4D, 3D and 2D tensor. But received with dimension: {:d}'.format(n_dim))
if out_type == np.uint8:
img_np = (img_np * 255.0).round()
# Important. Unlike matlab, numpy.unit8() WILL NOT round by default.
return img_np.astype(out_type)
'''
# =======================================
# Augmentation
# The following two functions are enough.
# (1) augmet_img: numpy image of wxhxc or wxh
# (2) augment_img_tensor4: tensor image 1xcxwxh
# =======================================
'''
def augment_img(img, mode=0):
if mode == 0:
return img
elif mode == 1:
return np.flipud(np.rot90(img))
elif mode == 2:
return np.flipud(img)
elif mode == 3:
return np.rot90(img, k=3)
elif mode == 4:
return np.flipud(np.rot90(img, k=2))
elif mode == 5:
return np.rot90(img)
elif mode == 6:
return np.rot90(img, k=2)
elif mode == 7:
return np.flipud(np.rot90(img, k=3))
def augment_img_tensor4(img, mode=0):
if mode == 0:
return img
elif mode == 1:
return img.rot90(1, [2, 3]).flip([2])
elif mode == 2:
return img.flip([2])
elif mode == 3:
return img.rot90(3, [2, 3])
elif mode == 4:
return img.rot90(2, [2, 3]).flip([2])
elif mode == 5:
return img.rot90(1, [2, 3])
elif mode == 6:
return img.rot90(2, [2, 3])
elif mode == 7:
return img.rot90(3, [2, 3]).flip([2])
def augment_img_np3(img, mode=0):
if mode == 0:
return img
elif mode == 1:
return img.transpose(1, 0, 2)
elif mode == 2:
return img[::-1, :, :]
elif mode == 3:
img = img[::-1, :, :]
img = img.transpose(1, 0, 2)
return img
elif mode == 4:
return img[:, ::-1, :]
elif mode == 5:
img = img[:, ::-1, :]
img = img.transpose(1, 0, 2)
return img
elif mode == 6:
img = img[:, ::-1, :]
img = img[::-1, :, :]
return img
elif mode == 7:
img = img[:, ::-1, :]
img = img[::-1, :, :]
img = img.transpose(1, 0, 2)
return img
def augment_img_tensor(img, mode=0):
img_size = img.size()
img_np = img.data.cpu().numpy()
if len(img_size) == 3:
img_np = np.transpose(img_np, (1, 2, 0))
elif len(img_size) == 4:
img_np = np.transpose(img_np, (2, 3, 1, 0))
img_np = augment_img(img_np, mode=mode)
img_tensor = torch.from_numpy(np.ascontiguousarray(img_np))
if len(img_size) == 3:
img_tensor = img_tensor.permute(2, 0, 1)
elif len(img_size) == 4:
img_tensor = img_tensor.permute(3, 2, 0, 1)
return img_tensor.type_as(img)
def augment_imgs(img_list, hflip=True, rot=True):
# horizontal flip OR rotate
hflip = hflip and random.random() < 0.5
vflip = rot and random.random() < 0.5
rot90 = rot and random.random() < 0.5
def _augment(img):
if hflip:
img = img[:, ::-1, :]
if vflip:
img = img[::-1, :, :]
if rot90:
img = img.transpose(1, 0, 2)
return img
return [_augment(img) for img in img_list]
'''
# =======================================
# image processing process on numpy image
# channel_convert(in_c, tar_type, img_list):
# rgb2ycbcr(img, only_y=True):
# bgr2ycbcr(img, only_y=True):
# ycbcr2rgb(img):
# modcrop(img_in, scale):
# =======================================
'''
def rgb2ycbcr(in_img, only_y=True):
'''same as matlab rgb2ycbcr
only_y: only return Y channel
Input:
uint8, [0, 255]
float, [0, 1]
'''
in_img_type = in_img.dtype
img = np.float32(in_img)
if in_img_type != np.uint8:
img *= 255.
# convert
if only_y:
rlt = np.dot(img, [65.481, 128.553, 24.966]) / 255.0 + 16.0
else:
rlt = np.matmul(img, [[65.481, -37.797, 112.0], [128.553, -74.203, -93.786],
[24.966, 112.0, -18.214]]) / 255.0 + [16, 128, 128]
if in_img_type == np.uint8:
rlt = rlt.round()
else:
rlt /= 255.
return rlt.astype(in_img_type)
def ycbcr2rgb(in_img):
'''same as matlab ycbcr2rgb
Input:
uint8, [0, 255]
float, [0, 1]
'''
in_img_type = in_img.dtype
img = np.float32(in_img)
if in_img_type != np.uint8:
img *= 255.
# convert
rlt = np.matmul(img, [[0.00456621, 0.00456621, 0.00456621], [0, -0.00153632, 0.00791071],
[0.00625893, -0.00318811, 0]]) * 255.0 + [-222.921, 135.576, -276.836]
rlt = np.clip(rlt, 0, 255)
if in_img_type == np.uint8:
rlt = rlt.round()
else:
rlt /= 255.
return rlt.astype(in_img_type)
def bgr2ycbcr(img, only_y=True):
'''bgr version of rgb2ycbcr
only_y: only return Y channel
Input:
uint8, [0, 255]
float, [0, 1]
'''
in_img_type = img.dtype
img.astype(np.float32)
if in_img_type != np.uint8:
img *= 255.
# convert
if only_y:
rlt = np.dot(img, [24.966, 128.553, 65.481]) / 255.0 + 16.0
else:
rlt = np.matmul(img, [[24.966, 112.0, -18.214], [128.553, -74.203, -93.786],
[65.481, -37.797, 112.0]]) / 255.0 + [16, 128, 128]
if in_img_type == np.uint8:
rlt = rlt.round()
else:
rlt /= 255.
return rlt.astype(in_img_type)
def modcrop(img_in, scale):
# img_in: Numpy, HWC or HW
img = np.copy(img_in)
if img.ndim == 2:
H, W = img.shape
H_r, W_r = H % scale, W % scale
img = img[:H - H_r, :W - W_r]
elif img.ndim == 3:
H, W, C = img.shape
H_r, W_r = H % scale, W % scale
img = img[:H - H_r, :W - W_r, :]
else:
raise ValueError('Wrong img ndim: [{:d}].'.format(img.ndim))
return img
def shave(img_in, border=0):
# img_in: Numpy, HWC or HW
img = np.copy(img_in)
h, w = img.shape[:2]
img = img[border:h-border, border:w-border]
return img
def channel_convert(in_c, tar_type, img_list):
# conversion among BGR, gray and y
if in_c == 3 and tar_type == 'gray': # BGR to gray
gray_list = [cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) for img in img_list]
return [np.expand_dims(img, axis=2) for img in gray_list]
elif in_c == 3 and tar_type == 'y': # BGR to y
y_list = [bgr2ycbcr(img, only_y=True) for img in img_list]
return [np.expand_dims(img, axis=2) for img in y_list]
elif in_c == 1 and tar_type == 'RGB': # gray/y to BGR
return [cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) for img in img_list]
else:
return img_list
'''
# =======================================
# metric, PSNR and SSIM
# =======================================
'''
# ----------
# PSNR
# ----------
def calculate_psnr(img1, img2, border=0):
# img1 and img2 have range [0, 255]
if not img1.shape == img2.shape:
raise ValueError('Input images must have the same dimensions.')
h, w = img1.shape[:2]
img1 = img1[border:h-border, border:w-border]
img2 = img2[border:h-border, border:w-border]
img1 = img1.astype(np.float64)
img2 = img2.astype(np.float64)
mse = np.mean((img1 - img2)**2)
if mse == 0:
return float('inf')
return 20 * math.log10(255.0 / math.sqrt(mse))
# ----------
# SSIM
# ----------
def calculate_ssim(img1, img2, border=0):
'''calculate SSIM
the same outputs as MATLAB's
img1, img2: [0, 255]
'''
if not img1.shape == img2.shape:
raise ValueError('Input images must have the same dimensions.')
h, w = img1.shape[:2]
img1 = img1[border:h-border, border:w-border]
img2 = img2[border:h-border, border:w-border]
if img1.ndim == 2:
return ssim(img1, img2)
elif img1.ndim == 3:
if img1.shape[2] == 3:
ssims = []
for i in range(3):
ssims.append(ssim(img1, img2))
return np.array(ssims).mean()
elif img1.shape[2] == 1:
return ssim(np.squeeze(img1), np.squeeze(img2))
else:
raise ValueError('Wrong input image dimensions.')
def ssim(img1, img2):
C1 = (0.01 * 255)**2
C2 = (0.03 * 255)**2
img1 = img1.astype(np.float64)
img2 = img2.astype(np.float64)
kernel = cv2.getGaussianKernel(11, 1.5)
window = np.outer(kernel, kernel.transpose())
mu1 = cv2.filter2D(img1, -1, window)[5:-5, 5:-5] # valid
mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5]
mu1_sq = mu1**2
mu2_sq = mu2**2
mu1_mu2 = mu1 * mu2
sigma1_sq = cv2.filter2D(img1**2, -1, window)[5:-5, 5:-5] - mu1_sq
sigma2_sq = cv2.filter2D(img2**2, -1, window)[5:-5, 5:-5] - mu2_sq
sigma12 = cv2.filter2D(img1 * img2, -1, window)[5:-5, 5:-5] - mu1_mu2
ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) *
(sigma1_sq + sigma2_sq + C2))
return ssim_map.mean()
'''
# =======================================
# pytorch version of matlab imresize
# =======================================
'''
# matlab 'imresize' function, now only support 'bicubic'
def cubic(x):
absx = torch.abs(x)
absx2 = absx**2
absx3 = absx**3
return (1.5*absx3 - 2.5*absx2 + 1) * ((absx <= 1).type_as(absx)) + \
(-0.5*absx3 + 2.5*absx2 - 4*absx + 2) * (((absx > 1)*(absx <= 2)).type_as(absx))
def calculate_weights_indices(in_length, out_length, scale, kernel, kernel_width, antialiasing):
if (scale < 1) and (antialiasing):
# Use a modified kernel to simultaneously interpolate and antialias- larger kernel width
kernel_width = kernel_width / scale
# Output-space coordinates
x = torch.linspace(1, out_length, out_length)
# Input-space coordinates. Calculate the inverse mapping such that 0.5
# in output space maps to 0.5 in input space, and 0.5+scale in output
# space maps to 1.5 in input space.
u = x / scale + 0.5 * (1 - 1 / scale)
# What is the left-most pixel that can be involved in the computation?
left = torch.floor(u - kernel_width / 2)
# What is the maximum number of pixels that can be involved in the
# computation? Note: it's OK to use an extra pixel here; if the
# corresponding weights are all zero, it will be eliminated at the end
# of this function.
P = math.ceil(kernel_width) + 2
# The indices of the input pixels involved in computing the k-th output
# pixel are in row k of the indices matrix.
indices = left.view(out_length, 1).expand(out_length, P) + torch.linspace(0, P - 1, P).view(
1, P).expand(out_length, P)
# The weights used to compute the k-th output pixel are in row k of the
# weights matrix.
distance_to_center = u.view(out_length, 1).expand(out_length, P) - indices
# apply cubic kernel
if (scale < 1) and (antialiasing):
weights = scale * cubic(distance_to_center * scale)
else:
weights = cubic(distance_to_center)
# Normalize the weights matrix so that each row sums to 1.
weights_sum = torch.sum(weights, 1).view(out_length, 1)
weights = weights / weights_sum.expand(out_length, P)
# If a column in weights is all zero, get rid of it. only consider the first and last column.
weights_zero_tmp = torch.sum((weights == 0), 0)
if not math.isclose(weights_zero_tmp[0], 0, rel_tol=1e-6):
indices = indices.narrow(1, 1, P - 2)
weights = weights.narrow(1, 1, P - 2)
if not math.isclose(weights_zero_tmp[-1], 0, rel_tol=1e-6):
indices = indices.narrow(1, 0, P - 2)
weights = weights.narrow(1, 0, P - 2)
weights = weights.contiguous()
indices = indices.contiguous()
sym_len_s = -indices.min() + 1
sym_len_e = indices.max() - in_length
indices = indices + sym_len_s - 1
return weights, indices, int(sym_len_s), int(sym_len_e)
# --------------------------------
# imresize for tensor image
# --------------------------------
def imresize(img, scale, antialiasing=True):
# Now the scale should be the same for H and W
# input: img: pytorch tensor, CHW or HW [0,1]
# output: CHW or HW [0,1] w/o round
need_squeeze = True if img.dim() == 2 else False
if need_squeeze:
img.unsqueeze_(0)
in_C, in_H, in_W = img.size()
out_C, out_H, out_W = in_C, math.ceil(in_H * scale), math.ceil(in_W * scale)
kernel_width = 4
kernel = 'cubic'
# Return the desired dimension order for performing the resize. The
# strategy is to perform the resize first along the dimension with the
# smallest scale factor.
# Now we do not support this.
# get weights and indices
weights_H, indices_H, sym_len_Hs, sym_len_He = calculate_weights_indices(
in_H, out_H, scale, kernel, kernel_width, antialiasing)
weights_W, indices_W, sym_len_Ws, sym_len_We = calculate_weights_indices(
in_W, out_W, scale, kernel, kernel_width, antialiasing)
# process H dimension
# symmetric copying
img_aug = torch.FloatTensor(in_C, in_H + sym_len_Hs + sym_len_He, in_W)
img_aug.narrow(1, sym_len_Hs, in_H).copy_(img)
sym_patch = img[:, :sym_len_Hs, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
img_aug.narrow(1, 0, sym_len_Hs).copy_(sym_patch_inv)
sym_patch = img[:, -sym_len_He:, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
img_aug.narrow(1, sym_len_Hs + in_H, sym_len_He).copy_(sym_patch_inv)
out_1 = torch.FloatTensor(in_C, out_H, in_W)
kernel_width = weights_H.size(1)
for i in range(out_H):
idx = int(indices_H[i][0])
for j in range(out_C):
out_1[j, i, :] = img_aug[j, idx:idx + kernel_width, :].transpose(0, 1).mv(weights_H[i])
# process W dimension
# symmetric copying
out_1_aug = torch.FloatTensor(in_C, out_H, in_W + sym_len_Ws + sym_len_We)
out_1_aug.narrow(2, sym_len_Ws, in_W).copy_(out_1)
sym_patch = out_1[:, :, :sym_len_Ws]
inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(2, inv_idx)
out_1_aug.narrow(2, 0, sym_len_Ws).copy_(sym_patch_inv)
sym_patch = out_1[:, :, -sym_len_We:]
inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(2, inv_idx)
out_1_aug.narrow(2, sym_len_Ws + in_W, sym_len_We).copy_(sym_patch_inv)
out_2 = torch.FloatTensor(in_C, out_H, out_W)
kernel_width = weights_W.size(1)
for i in range(out_W):
idx = int(indices_W[i][0])
for j in range(out_C):
out_2[j, :, i] = out_1_aug[j, :, idx:idx + kernel_width].mv(weights_W[i])
if need_squeeze:
out_2.squeeze_()
return out_2
# --------------------------------
# imresize for numpy image
# --------------------------------
def imresize_np(img, scale, antialiasing=True):
# Now the scale should be the same for H and W
# input: img: Numpy, HWC or HW [0,1]
# output: HWC or HW [0,1] w/o round
img = torch.from_numpy(img)
need_squeeze = True if img.dim() == 2 else False
if need_squeeze:
img.unsqueeze_(2)
in_H, in_W, in_C = img.size()
out_C, out_H, out_W = in_C, math.ceil(in_H * scale), math.ceil(in_W * scale)
kernel_width = 4
kernel = 'cubic'
# Return the desired dimension order for performing the resize. The
# strategy is to perform the resize first along the dimension with the
# smallest scale factor.
# Now we do not support this.
# get weights and indices
weights_H, indices_H, sym_len_Hs, sym_len_He = calculate_weights_indices(
in_H, out_H, scale, kernel, kernel_width, antialiasing)
weights_W, indices_W, sym_len_Ws, sym_len_We = calculate_weights_indices(
in_W, out_W, scale, kernel, kernel_width, antialiasing)
# process H dimension
# symmetric copying
img_aug = torch.FloatTensor(in_H + sym_len_Hs + sym_len_He, in_W, in_C)
img_aug.narrow(0, sym_len_Hs, in_H).copy_(img)
sym_patch = img[:sym_len_Hs, :, :]
inv_idx = torch.arange(sym_patch.size(0) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(0, inv_idx)
img_aug.narrow(0, 0, sym_len_Hs).copy_(sym_patch_inv)
sym_patch = img[-sym_len_He:, :, :]
inv_idx = torch.arange(sym_patch.size(0) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(0, inv_idx)
img_aug.narrow(0, sym_len_Hs + in_H, sym_len_He).copy_(sym_patch_inv)
out_1 = torch.FloatTensor(out_H, in_W, in_C)
kernel_width = weights_H.size(1)
for i in range(out_H):
idx = int(indices_H[i][0])
for j in range(out_C):
out_1[i, :, j] = img_aug[idx:idx + kernel_width, :, j].transpose(0, 1).mv(weights_H[i])
# process W dimension
# symmetric copying
out_1_aug = torch.FloatTensor(out_H, in_W + sym_len_Ws + sym_len_We, in_C)
out_1_aug.narrow(1, sym_len_Ws, in_W).copy_(out_1)
sym_patch = out_1[:, :sym_len_Ws, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
out_1_aug.narrow(1, 0, sym_len_Ws).copy_(sym_patch_inv)
sym_patch = out_1[:, -sym_len_We:, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
out_1_aug.narrow(1, sym_len_Ws + in_W, sym_len_We).copy_(sym_patch_inv)
out_2 = torch.FloatTensor(out_H, out_W, in_C)
kernel_width = weights_W.size(1)
for i in range(out_W):
idx = int(indices_W[i][0])
for j in range(out_C):
out_2[:, i, j] = out_1_aug[:, idx:idx + kernel_width, j].mv(weights_W[i])
if need_squeeze:
out_2.squeeze_()
return out_2.numpy()
if __name__ == '__main__':
img = imread_uint('test.bmp',3)
| [
"noreply@github.com"
] | noreply@github.com |
2146132029e154c9162b74995f0cf34f0ef3342e | 60654caf2633613021470d0285817343f76223e5 | /daily_catch/public_update/config.py | 566a37dc4796f6f4c390e00778aea0555a926b77 | [] | no_license | whoiskx/com_code | 79460ccee973d1dfe770af3780c273e4a0f466c9 | 388b5a055393ee7768cc8525c0484f19c3f97193 | refs/heads/master | 2020-04-09T23:14:28.228729 | 2018-12-06T07:10:25 | 2018-12-06T07:10:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 641 | py | # -*- coding: utf-8 -*-
import os
read_ver_url = 'http://dispatch.yunrunyuqing.com:38082/resources/sourceVersion/weixin/version.txt'
download_url = 'http://dispatch.yunrunyuqing.com:38082/resources/sourceVersion/weixin/public_update.zip'
base_path = os.path.dirname(os.path.abspath(__file__))
core_spider_path = os.path.join(base_path, 'public_update')
core_zip_path = os.path.join(core_spider_path, 'public_update.zip')
version_txt_path = os.path.join(core_spider_path, 'version.txt')
spider_path = os.path.join(core_spider_path, 'daily_collect')
run_path = os.path.join(spider_path, 'daily_collect.py')
kill_path = 'daily_collect.py'
| [
"574613576@qq.com"
] | 574613576@qq.com |
009c2f80f9980fd30b33b1b39d0b96a77e5c484a | 2d0b009f8b8560a1348a9feca2fb78d1a3616b45 | /pandair_application_final.py | 8ffa629c568038f91a23360560e2b819d7c5f41a | [] | no_license | EvaChitul/pandair_app | 03f4a66bb34ebf82d3cfd220f8ad9dbb81e86244 | e37d4919ea38ae9d24b17307f25b2ede97fa2586 | refs/heads/main | 2023-03-10T15:47:46.195624 | 2021-02-21T14:55:52 | 2021-02-21T14:55:52 | 320,321,339 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,193 | py | import itertools
import logging
import time
import datetime
fleet_database_check = set()
flights_log_database = {}
regional_fleet = {}
logging.basicConfig(level=logging.DEBUG, filename=f'{datetime.date.today()}_pandair_logging')
log = logging.getLogger('Pandair Airline')
class Aircraft:
def __init__(self, manufacturer, weight, speed, consumption, identifier, number_flights_maintenance):
self.manufacturer = manufacturer
self.weight = weight
self.speed = speed
self.consumption = consumption
self.identifier = identifier
self.number_flights_maintenance = number_flights_maintenance
def __str__(self):
return f'Aircraft type {type(self).__name__}, identifier {self.identifier}, manufacturer {self.manufacturer}'
def __repr__(self):
return self.identifier
def due_for_maintenance(self):
log.info(f' Checking if Aircraft {self.identifier} is due for maintenance')
if self.number_flights_maintenance >= 30:
return True
else:
return False
class QuickMaintenanceMixin:
def quick_maintenance(self):
if self.number_flights_maintenance - 10 < 0:
self.number_flights_maintenance = 0
else:
self.number_flights_maintenance -= 10
log.debug(f'{time.asctime(time.localtime(time.time()))} {self} completed quick maintenance. Flights number is now {self.number_flights_maintenance}')
return f'{self} has gone through quick maintenance. Flights number is now: {self.number_flights_maintenance}'
class PassengerAircraft(Aircraft):
def __init__(self, manufacturer, weight, speed, consumption, identifier, number_flights_maintenance, number_passengers):
self.number_passengers = number_passengers
super().__init__(manufacturer, weight, speed, consumption, identifier, number_flights_maintenance)
class CargoAircraft(Aircraft, QuickMaintenanceMixin):
def __init__(self, manufacturer, weight, speed, consumption, identifier, load_weight, number_flights_maintenance):
self.load_weight = load_weight
super().__init__(manufacturer, weight, speed, consumption, identifier, number_flights_maintenance)
def due_for_maintenance(self):
if self.number_flights_maintenance >= 50:
return True
else:
return False
class PrivateAircraft(PassengerAircraft, QuickMaintenanceMixin):
def __init__(self, manufacturer, weight, speed, consumption, identifier, number_passengers, number_flights_maintenance):
super().__init__(manufacturer, weight, speed, consumption, identifier, number_flights_maintenance, number_passengers)
class CommercialAircraft(PassengerAircraft):
def __init__(self, manufacturer, weight, speed, consumption, identifier, number_flights_maintenance, number_passengers):
super().__init__(manufacturer, weight, speed, consumption, identifier, number_flights_maintenance, number_passengers)
class Airport:
def __init__(self):
self.airport_list = []
def __len__(self):
return len(self.airport_list)
def __getitem__(self, index):
return self.airport_list[index]
def __str__(self):
return f'Airport: {self.airport_list}'
def __repr__(self):
return f'{self.airport_list}'
def __add__(self, second_airport):
region_airport = Airport()
all_aircraft = self.airport_list + second_airport.airport_list
for aircraft in all_aircraft:
region_airport.add_aircraft(aircraft, check_duplicates=False)
log.debug(f'{time.asctime(time.localtime(time.time()))} Created Regional Airport from {self} and {second_airport}')
return region_airport
def add_aircraft(self, aircraft, check_duplicates=True):
if aircraft in fleet_database_check and check_duplicates:
print(f'{aircraft} already in Fleet. Cannot duplicate \n')
log.info(f' {aircraft} already in Fleet.')
else:
self.airport_list.append(aircraft)
fleet_database_check.add(aircraft)
log.debug(f'{time.asctime(time.localtime(time.time()))} {aircraft} added in Airport and Fleet. Airplanes in fleet overview: {fleet_database_check}')
def remove_aircraft(self, aircraft):
if aircraft in self.airport_list:
position = self.airport_list.index(aircraft)
del self.airport_list[position]
fleet_database_check.remove(aircraft)
log.debug(f'{time.asctime(time.localtime(time.time()))} {aircraft} removed from Airport and Fleet. Airplanes in fleet overview: {fleet_database_check}')
else:
print(f'{aircraft} not found at Airport. Unable to remove')
log.info(f' {aircraft} not found at Airport.')
class FleetDatabase:
def __init__(self):
self.fleet = {}
def __getitem__(self, key):
if key not in self.fleet:
print(f'Airport {key} not in Fleet Database. Airport will be added')
log.info(f' Airport {key} not in Fleet Database.')
self.fleet[key] = Airport()
log.debug(f'{time.asctime(time.localtime(time.time()))} Airport {key} not found. Airport was added to Fleet Database. Fleet Overview: {self.fleet}')
return self.fleet[key]
def __delitem__(self, key):
del self.fleet[key]
def __setitem__(self, key, value):
self.fleet[key] = value
def __len__(self):
return len(self.fleet)
def __iter__(self):
return iter(self.fleet)
def __str__(self):
return f'Fleet and Location Overview: {self.fleet}'
def __repr__(self):
return f'{self.fleet}'
def add_airport(self, airport_name, airport_list):
if airport_name.title() in self.fleet.keys():
print('Airport already in Fleet Database. The new aircrafts will replace the old ones')
log.info(f' {airport_name.title()} already in Fleet Database.')
for aircraft in self.fleet[airport_name.title()]:
fleet_database_check.remove(aircraft)
log.debug(f'{time.asctime(time.localtime(time.time()))} {airport_name} removed from Fleet Database. Fleet Overview {fleet_database_check}')
for new_aircraft in airport_list:
fleet_database_check.add(new_aircraft)
log.debug(f'{time.asctime(time.localtime(time.time()))} {new_aircraft} added to Fleet Database. Fleet Overview {fleet_database_check}')
self.fleet[airport_name.title()] = airport_list
log.debug(f'{time.asctime(time.localtime(time.time()))} {airport_name.title()} fleet replaced. New {airport_name.title()} {airport_list} ')
def remove_airport(self, airport_name):
if airport_name.title() in self.fleet.keys():
for aircraft in self.fleet[airport_name.title()]:
fleet_database_check.remove(aircraft)
log.debug(f'{time.asctime(time.localtime(time.time()))} {aircraft} removed from Fleet Database. Fleet overview {fleet_database_check}')
del self.fleet[airport_name.title()]
log.debug(f'{time.asctime(time.localtime(time.time()))} {airport_name.title()} removed from Fleet Database. Overview of airports {self.fleet}')
else:
print(f'{airport_name.title()} Airport not found in Fleet Database. Unable to remove')
log.info(f' {airport_name.title()} not found in Fleet. ')
def flights_log(flight):
def track_flight(*args):
results = flight(*args)
if results:
aircraft, city, destination = results[0], results[1], results[2]
flights_log_database[f'Entry {len(flights_log_database) + 1}'] = f' Aircraft {aircraft.identifier}: {city} to {destination}'
log.info(f' New flight added to flight log: {aircraft.identifier}: {city} to {destination}')
if aircraft.due_for_maintenance:
print(f'Alert: Aircraft {aircraft.identifier} is due for maintenance!')
log.info(f' Aircraft Alert: {aircraft.identifier} is due for maintenance!')
else:
return None
return flight
return track_flight
@flights_log
def operate_flight(fleet_data, city, destination, aircraft):
if aircraft in fleet_data[city.title()]:
fleet_data[city.title()].remove_aircraft(aircraft)
log.debug(f'{time.asctime(time.localtime(time.time()))} {aircraft} removed from {city.title()} airport. {city.title()} airport overview: {fleet_data[city.title()]}')
fleet_data[destination.title()].add_aircraft(aircraft)
log.debug(f'{time.asctime(time.localtime(time.time()))} {aircraft} added to {destination} airport. {destination.title()} airport overview: {fleet_data[destination.title()]}')
aircraft.number_flights_maintenance += 1
log.debug(f'{time.asctime(time.localtime(time.time()))} {aircraft} number of flights increased by 1. Number of flights operated now at {aircraft.number_flights_maintenance}')
else:
print(f'{aircraft} not in {city}. Cannot perform flight ')
log.info(f' {aircraft} not found in {city} airport.')
return None
return aircraft, city, destination
def generate_pairs(fleet_database):
list_origin = set([origin for origin, fleet in fleet_database.fleet.items() for aircraft in fleet
if isinstance(aircraft, PassengerAircraft) and aircraft.number_passengers > 100])
list_destination = set([destination for destination, planes in fleet_database.fleet.items() if len(planes) <= 3])
print(list_origin)
print(list_destination)
for origin, destination in itertools.product(list_origin, list_destination):
if origin != destination:
log.debug(f'{time.asctime(time.localtime(time.time()))} New origin-destination pair was generated: {origin} - {destination}')
yield f'Possible origin destination pair: {origin} - {destination}'
class AlterAircraft:
def __init__(self, plane):
self.plane = plane
def __enter__(self):
self.pandair_status = open('pandair_status.txt', 'w')
self.pandair_status.write('Altering behaviour of due for maintenance method. Be careful with the flights! \n')
self.original_maintenance_method = self.plane.due_for_maintenance
self.plane.due_for_maintenance = lambda: False
log.info(f'Behaviour of {self.plane} has been changed')
log.debug(f'{time.asctime(time.localtime(time.time()))} Due for maintenance method for {self.plane} now returning {self.plane.due_for_maintenance}')
return self.plane
def __exit__(self, exc_type, exc_value, traceback):
self.pandair_status.write(f'Due for maintenance method of aircraft returned to original state\n')
self.pandair_status.write('Closing down Pandair App. Travel safe!\n')
self.plane.due_for_maintenance = self.original_maintenance_method
log.info(f'Behaviour of {self.plane} has returned to original')
log.debug(f'{time.asctime(time.localtime(time.time()))} Due for maintenance method for {self.plane} now back to original form')
| [
"noreply@github.com"
] | noreply@github.com |
0b313513a4e40c31df181c98f2e15203095458e5 | 9bfd93b93531c7d66335fffded2d00db0c1f8935 | /blog_censurfridns_dk/blog/translation.py | 9e8157edd7537f26fe16f55c391113b0d9039730 | [] | no_license | mortensteenrasmussen/blog.censurfridns.dk | 7d5da3961b6abf4124fddba7b1fdf5a4fc014c2c | 53939dee90ad5028256aace4c876d38695ec9e07 | refs/heads/master | 2021-01-14T14:23:17.443442 | 2016-08-29T20:11:22 | 2016-08-29T20:11:22 | 65,412,684 | 0 | 0 | null | 2016-08-10T20:03:31 | 2016-08-10T20:03:31 | null | UTF-8 | Python | false | false | 412 | py | from modeltranslation.translator import register, TranslationOptions
from .models import BlogPost
from taggit.models import Tag
@register(BlogPost)
class BlogPostTranslationOptions(TranslationOptions):
fields = ('title', 'body', 'slug')
required_languages = ('en', 'da')
@register(Tag)
class TaggitTranslations(TranslationOptions):
fields = ('name','slug')
required_languages = ('en', 'da')
| [
"thomas@gibfest.dk"
] | thomas@gibfest.dk |
fe7908aeabd98e2aefcd834864ebcb28ee36506e | a1bbb55b0be9aa69f456256b2107dbf5b35b640b | /Machine Learning Engineer Nanodegree/Core Curricula/Unsupervised Learning/PCA Mini-Project/pca/eigenfaces.py | a75c577b9d31ce3cf689740de1b8892fd21f3fae | [] | no_license | theodoreguo/Udacity | bc7599ed3f6b7e3d7bc03263443fb4b419cc03f0 | 8312d5aa35126cc0f222c6b4ae26b2bde6d9ae22 | refs/heads/master | 2021-07-05T12:17:51.412698 | 2018-01-27T08:07:12 | 2018-01-27T08:07:12 | 96,605,467 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,237 | py | """
===================================================
Faces recognition example using eigenfaces and SVMs
===================================================
The dataset used in this example is a preprocessed excerpt of the
"Labeled Faces in the Wild", aka LFW_:
http://vis-www.cs.umass.edu/lfw/lfw-funneled.tgz (233MB)
.. _LFW: http://vis-www.cs.umass.edu/lfw/
original source: http://scikit-learn.org/stable/auto_examples/applications/face_recognition.html
"""
print __doc__
from time import time
import logging
import pylab as pl
import numpy as np
from sklearn.cross_validation import train_test_split
from sklearn.datasets import fetch_lfw_people
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.decomposition import RandomizedPCA
from sklearn.svm import SVC
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
###############################################################################
# Download the data, if not already on disk and load it as numpy arrays
lfw_people = fetch_lfw_people(min_faces_per_person=70, resize=0.4)
# introspect the images arrays to find the shapes (for plotting)
n_samples, h, w = lfw_people.images.shape
np.random.seed(42)
# for machine learning we use the data directly (as relative pixel
# position info is ignored by this model)
X = lfw_people.data
n_features = X.shape[1]
# the label to predict is the id of the person
y = lfw_people.target
target_names = lfw_people.target_names
n_classes = target_names.shape[0]
print "Total dataset size:"
print "n_samples: %d" % n_samples
print "n_features: %d" % n_features
print "n_classes: %d" % n_classes
###############################################################################
# Split into a training and testing set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=42)
###############################################################################
# Compute a PCA (eigenfaces) on the face dataset (treated as unlabeled
# dataset): unsupervised feature extraction / dimensionality reduction
n_components = 150
print "Extracting the top %d eigenfaces from %d faces" % (n_components, X_train.shape[0])
t0 = time()
pca = RandomizedPCA(n_components=n_components, whiten=True).fit(X_train)
print "done in %0.3fs" % (time() - t0)
eigenfaces = pca.components_.reshape((n_components, h, w))
print "Projecting the input data on the eigenfaces orthonormal basis"
t0 = time()
X_train_pca = pca.transform(X_train)
X_test_pca = pca.transform(X_test)
print "done in %0.3fs" % (time() - t0)
###############################################################################
# Train a SVM classification model
print "Fitting the classifier to the training set"
t0 = time()
param_grid = {
'C': [1e3, 5e3, 1e4, 5e4, 1e5],
'gamma': [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.1],
}
# for sklearn version 0.16 or prior, the class_weight parameter value is 'auto'
clf = GridSearchCV(SVC(kernel='rbf', class_weight='balanced'), param_grid)
clf = clf.fit(X_train_pca, y_train)
print "done in %0.3fs" % (time() - t0)
print "Best estimator found by grid search:"
print clf.best_estimator_
###############################################################################
# Quantitative evaluation of the model quality on the test set
print "Predicting the people names on the testing set"
t0 = time()
y_pred = clf.predict(X_test_pca)
print "done in %0.3fs" % (time() - t0)
print classification_report(y_test, y_pred, target_names=target_names)
print confusion_matrix(y_test, y_pred, labels=range(n_classes))
###############################################################################
# How much of the variance is explained by the first principal component? The second?
print(pca.explained_variance_ratio_[0])
print(pca.explained_variance_ratio_[1])
###############################################################################
# Qualitative evaluation of the predictions using matplotlib
def plot_gallery(images, titles, h, w, n_row=3, n_col=4):
"""Helper function to plot a gallery of portraits"""
pl.figure(figsize=(1.8 * n_col, 2.4 * n_row))
pl.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35)
for i in range(n_row * n_col):
pl.subplot(n_row, n_col, i + 1)
pl.imshow(images[i].reshape((h, w)), cmap=pl.cm.gray)
pl.title(titles[i], size=12)
pl.xticks(())
pl.yticks(())
# plot the result of the prediction on a portion of the test set
def title(y_pred, y_test, target_names, i):
pred_name = target_names[y_pred[i]].rsplit(' ', 1)[-1]
true_name = target_names[y_test[i]].rsplit(' ', 1)[-1]
return 'predicted: %s\ntrue: %s' % (pred_name, true_name)
prediction_titles = [title(y_pred, y_test, target_names, i)
for i in range(y_pred.shape[0])]
plot_gallery(X_test, prediction_titles, h, w)
# plot the gallery of the most significative eigenfaces
eigenface_titles = ["eigenface %d" % i for i in range(eigenfaces.shape[0])]
plot_gallery(eigenfaces, eigenface_titles, h, w)
pl.show()
| [
"jsguozitong@gmail.com"
] | jsguozitong@gmail.com |
e43dc1c4687a848c69bf66702ed69a995ab3c08f | 1291b1974904918accf29f87c2d303e679297e03 | /038comp2dates.py | 8f57b80d77fd8950587bb9796dea50f4a02bffb6 | [] | no_license | utkarsh192000/PythonAdvance | 853a594ed678c462d9604a8bd6300aa0449dd610 | e2e5f3059050b94f2e89ba681ddd6e3f01091b97 | refs/heads/master | 2023-04-05T09:42:34.942443 | 2021-04-04T19:07:18 | 2021-04-04T19:07:18 | 354,625,582 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 101 | py |
from datetime import *
d1=date(2021,3,23)
d2=date(2010,3,23)
print(d1<d2)
print(d1>d2)
print(d1==d2) | [
"iutkarshyadav192000@gmail.com"
] | iutkarshyadav192000@gmail.com |
7314a4d9d557e3691daec59e8b9e46b80d2baa04 | 3b000b60a40f40764714b1ef0165ef551c4e4ba2 | /tests/test_django_get_forms.py | cffed78d9d5fd8958cbba7c82584cddc56f590e3 | [
"BSD-3-Clause"
] | permissive | estebistec/django-get-forms | 7d765830faac297b07484f6a7cab9c08ac6e8d38 | dd97951766e2275c8b2b3155973192cbf465e968 | refs/heads/master | 2021-01-25T07:34:47.957983 | 2014-04-07T03:11:32 | 2014-04-07T03:11:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 427 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_django_get_forms
----------------------------------
Tests for `django_get_forms` module.
"""
import unittest
from django_get_forms import django_get_forms
class TestDjango_get_forms(unittest.TestCase):
def setUp(self):
pass
def test_something(self):
pass
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main() | [
"estebistec@gmail.com"
] | estebistec@gmail.com |
8cb138171fb97a6c77d5430f0173327daaf88d9e | 67f1620411d959781c1e9e349a00a885cc3031c6 | /Pandas_session (2).txt | 8b08d7d6ef81c768c3009f4690462db533121980 | [] | no_license | Jyoti-27/Python_Study_Materials | 9e9bef5b589ea2875834a9c4fb50816ec2413f6e | 364d5b66540e12d9787cfd106e4c00b2ff7c84d8 | refs/heads/main | 2023-02-25T16:10:50.982517 | 2021-02-04T19:08:17 | 2021-02-04T19:08:17 | 336,055,362 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 36,494 | txt | #!/usr/bin/env python
# coding: utf-8
# In[3]:
import numpy as np
import pandas as pd
# In[3]:
print(pd.__version__)
get_ipython().run_line_magic('pinfo', 'pd')
# # Creation of Pandas Series
# In[4]:
series = pd.Series([0.2, 0.5, 0.75, 1.6]) #call the constructor and send the values as a list
print("Pandas Series:\n" , series)
# Attributes of Pandas Series
# In[5]:
series.unique()
# In[6]:
print("Series.values: ",series.values) #to find the values in a series
print("Index of Series: ", series.index)
print("Data type of Series.values: ",series.values.dtype)
print("Data type of Series", type(series.values))
print("Type of Series", type(series))
# In[7]:
s = pd.Series([5,4,3], index=[100, 200, 300]) #creating a series with a given index, index has to be given as 2nd parameter
print("Series is : \n", s, '\n Indices are : ', s.index)
print("Data type of Series", type(series.values))
# ## Creating Series from a List
# In[8]:
List=[20, 15, 42, 33, 94, 8, 5] #Default indexing or Implicit Indexing
print("List is: " , List)
print("Series from List\n", ser_list)
print("Data type of Series", type(ser_list.values))
print("Type of Series", type(ser_list))
# In[9]:
print("Explicit Indexing: \n",
pd.Series(List, index = ['i','ii','iii','iv','v','vi','vii']))
# In[10]:
#Update the whole index of a series
s1= pd.Series([0,1,2,3,4])
print(s1)
s1.index=['A','B','C','E','E']
print(s1['E'])
# ## Creating Series from numpy array
# ### Numpy 1D array vs Series
# Array contains implicit indexing, series has explicit indexing along with some additional capabilities
# In[11]:
arr = np.array([10, 20, 30, 40, 50]) #creating a numpy array
ser_arr = pd.Series(arr) #creating Series from a numpy array
print("Pandas Series:\n" , ser_arr)
print("Data type of Series", type(ser_arr.values))
print("Type of Series",type (ser_arr)) #Observe difference between dtype between List and array
#dtype tells memory allocated to one item or element of an array. it is an array method
#type() is like type(str) dtype tells memory allocated like int32 float64
# In[12]:
np_arr= np.random.random(5)
index= ['a','b', 'c','d', 'e'] #index
ser_arr=pd.Series(np_arr, index)
print("Series \n", ser_arr) #show that repition is allowed in index
# ## Creating series from a dictionary
#
# In[21]:
dict = {'a':10, 'b':20, 'c':30, 'd':40, 'e':50} #creating a Dictionary
print(dict)
ser_dict = pd.Series(dict) # creating a Series from a Dictionary
print("Series is \n", ser_dict)
print("b" in ser_dict)
print('Indices are : ', ser_dict.index,'\n Elements of the series are : ', ser_dict.values)
# In[22]:
d={'monkey':153 ,'rat':212 ,'cotton':334 ,'fan':98}
print("Dictionary is: ", d)
ser_d=pd.Series(d)
print("Series from Dictionary:\n", ser_d)
print('Indices are : ', ser_d.index,'\n Elements of the series are : ', ser_d.values)
# ## Indexing and Slicing
# In[23]:
#Acessing, Indexing and Slicing of Values in a series
#Since a series is a Numpy array we can access elements using the default numeric index like array
#array or list type of slicing
ser_arr = pd.Series([10, 20, 30, 40, 50,60])
print(ser_arr[3])
print(ser_arr[1:4]) #array or list type of slicing
print(ser_arr[:4])
print(ser_arr[3:])
print(ser_arr[1:6:2])
print(ser_arr[: : 2])
ser_arr[3]=100 #update of a series this means series values are mutable
#print(ser_arr)
# In[24]:
# familiar attributes from NumPy arrays
print("\n ser_arr.size: ",ser_arr.size ,
'\n ser_arr.shape: ',ser_arr.shape,
'\n ser_arr.ndim: ',ser_arr.ndim,
'\n ser_arr.dtype: ',ser_arr.dtype)
# In[25]:
#Another way to slice a series is to select elements by specifying the index
#Fancy Indexing
ser_slice=pd.Series(ser_arr, index=[3,2]) #select rows with the index
print(ser_slice)
print(ser_slice)
# In[26]:
#Accessing series elements in a dictionary way. this is with explicit index or key
dict = {'a':10, 'b':20, 'c':30, 'd':40, 'e':50} #creating a Dictionary
print("dictinary is ", dict)
ser_dict = pd.Series(dict) # creating a Series from a Dictionary
print("ser_dict['b']:\n", ser_dict['b']) #Accessing oone element
print("ser_dict['b':'e']:\n ", ser_dict['b': 'e'])
print("ser_dict[: 'd']:\n", ser_dict[:'d'])
print("ser_dict[['b', 'e']]:\n", ser_dict[['b', 'e']]) #Fancy Indexing
# In[27]:
s1= pd.Series([0,1,2,3,4], index=['A','B','C','D','E'])
print(s1)
#Series operations similar to sets and dictinary
print('A' in s1)
print(s1.keys()) #similarity to dictionary
print(list(s1.items())) #similarity to dictionary
print(s1.values) #access to dictionary values
# extending the series like dictinaries
s1['F'] = 5
print("\n After updation : \n",s1)
# In[28]:
# masking on the values to extract subsets of data
s1= pd.Series([10,20,30,40,50], index=['A','B','C','D','E'])
print(s1)
print('Masking')
print("s1[(s1>10) & (s1<40)] \n", s1[(s1>10) & (s1<40)])
#print('Fancy indexing')
#print("s1[['A', 'C']] \n" , s1[['A', 'C']])
# Slicing may be the source of the most confusion. Notice that when slicing with an explicit index (i.e. data['a':'c']), the final index is included in the slice, while when slicing with an implicit index (i.e. data[0:4]), the final index is excluded from the slice.
# In[29]:
#Problem that may arise with implicit and explicit indexing
#Consider an Example where the explicit index is also a number
s = pd.Series([5,4,3,2], index=[100, 200, 300,400]) # index has to be given as 2nd parameter
print("Series is : \n", s, '\n Indices are : ', s.index)
#print(s['100':'300'])
print(s[1:3])
# Because of this potential confusion in the case of integer indexes, Pandas provides some special indexer attributes
# loc() - explicit indexing and iloc() always refers to the implicit Python-style index:
# In[30]:
#the loc() - indexing and slicing with explicit index
#the iloc() - indexing and slicing with implicit index
s = pd.Series([5,4,3,2], index=[100, 200, 300,400]) # index has to be given as 2nd parameter
print("Series is : \n", s, '\n Indices are : ', s.index)
print("Series with explicit index :s.loc[100:300]\n", s.loc[100:300]) #it will take the end value too
print("Series with implicit index: s.iloc[1:3] \n" , s.iloc[1:3])
print('Implicit access : s.iloc[2] \n' , s.iloc[2])
print('Explicit access : s.loc[200] \n' , s.loc[200])
# # Series Operations
# In[31]:
s1=pd.Series([6,7,8,9,5])
s3 = pd.Series([1,2,3,4], index = ['a','b','c','d'])
print('s1: \n',s1,'s3: \n',s3)
s4 = s1.append(s3) #Observe no copy is created
print('Appended series: \n',s4 )
# Delete a row with a particular element
s4.drop(['c'])
print("Series s4 after dropping 'c':\n", s4)
# ## Aritmetic Functions
# ### Elementwise Addition, Subtraction, Multiplication and Division
# In[32]:
import pandas as pd
#Create two series
s1=pd.Series([6,7,8,9,5])
s2=pd.Series([0,1,2,3,4,5,7])
print('Series are : \n',s1, '\n', s2)
# In[33]:
# Series methods print('Addition of series: \n', s1.add(s2)) #Elementwise addition
print('\n Subtraction of series: \n', s1.sub(s2)) #Elementwise Subtraction
print('\n Multiplication of series: \n', s1.mul(s2))
print('\n Division of series: \n', s1.div(s2))
print('Series are : \n',s1, '\n', s2) #Series remains unchanged
# ## Aggregate Functions - Which reduce the series to a single number
# In[34]:
print("\nMedian of series s2 is", s2.median())
print("\n Mean of series s2 is " , s2.mean())
print("\n Maximum of series s2 is", s2.max())
print("\n Minimum of series s2 is", s2.min())
# In[35]:
#Series with char/ string elements
string=pd.Series(['a','b','c','S','e','J','g','B','P','o'])
print('A Series wih String values: \n ', string)
print('string.str.upper(): \n',string.str.upper())
print('string.str.lower(): \n',string.str.lower())
# In[36]:
#Avoid this
# Just as we can do slicing like an array on a series index we can also do set operations on an index but here
#index should not have repitition
# Index as ordered set
indA = pd.Index([1, 3, 5, 7, 9]) #we can just create a Index object
indB = pd.Index([2, 3, 5, 7, 11])
print(indA & indB) # intersection
print(indA | indB) # union
print(indA ^ indB) # symmetric difference
# In[37]:
#Dont do this #Index as an immutable array
#Acessing, Indexing and Slicing of Indices in a series
ser = pd.Series([10, 20, 30, 40, 50,60])
#Index is like an ordered set
print(ser.index)
print(ser.index[3])
print(ser.index[1:4]) #array or list type of slicing
print(ser.index[:4])
ser.index[3]=10 #index array cannot be updated
# # Pandas DataFrames
# * Table with indexed rows and columns
# * can be seen as a sequence of aligned Series object, i.e., share same index
# * generalization of NumPy 2D Arrays
# * with heterogenous and/or missing data
# ## Creation of DataFrames
# In[2]:
#Dataframe as a stack of Series. we create two columns using series and then make a DataFrame
population_d= {'California': 3833, 'Texas': 8193,
'New York': 6511, 'Florida': 5560, 'Ohio': 1135} #Statewise population
print(population_d, type(population_d))
population = pd.Series(population_d)
print(population)
# In[3]:
area_d = {'California': 423967, 'Texas': 695662, 'New York': 141297,
'Florida': 170312, 'Ohio': 149995}
area = pd.Series(area_d)
print(area)
# In[4]:
states = pd.DataFrame({'Population': population, 'Area': area}) #two series with same index
print("Data Frame of States: \n", states)
# ## DataFrame Attributes
# In[51]:
states = pd.DataFrame({ 'Area': area_d})
#print("Data Frame of States: \n", states)
states
print('\n', states.index) #row indices
print('\n', states.columns) #column names
print('\n', states.values)
print('\n', states['Area']) #access a column on a DataFrame like a key value pair
print('\n',states.Area) #Columns can also be accessed as an Attribute
#print('\n',states.Area is states['Area'])
print('\n',states.loc['California']) #accessing row of a dataframe with explicit index
print('\n', states.iloc[3])
print('\n', states.loc['California','Area'])
print('\n', states.iloc[3,1])
# using Numpy Arrays
# In[52]:
import numpy as np
num_arr=np.random.randn(6,4) #random delection of numbers following a standard normal distribution
print("Array is : \n", num_arr)
cols=['A','B','C','D'] #arrays will not have index and columns
df1=pd.DataFrame(num_arr, columns=cols, index = ['i', 'ii', 'iii', 'iv', 'v', 'vi'])
#array of values, index, column
print('\n Data Frame from numpy array is : \n')
df1
# ### DataFrame as a Specialized Dictionary
# * DataFrame maps a column name to a Series of column data.
# * key is a column name and value is a series
# In[53]:
# create a dataframe using a dictionary of Lists, values are lists and column names are keys
data= {'city' : ['Bombay', 'Chennai', 'Chennai', 'Delhi', 'Mysore' ], 'year' : [2001, 2005, 2003, 2001, 2000],
'pop' : [25, 35, 20, 40, 15]}
df2= pd.DataFrame(data)
print(df2)
#observe index is assigned automatically
# In[54]:
# create a dataframe using a dictionary of Lists, values are lists and column names are keys
data= {'city' : ['Bombay', 'Chennai', 'Chennai', 'Delhi', 'Mysore' ], 'year' : [2001, 2005, 2003, 2001, 2000],
'pop' : [25, 35, 20, 40, 15]} #this will have only columns no index
labels=['a', 'b', 'c', 'd', 'e']
df2= pd.DataFrame(data, index=labels)
print(df2)
#observe index is assigned automatically
# In[55]:
#Exercise
#create a dataframe from a list of dictionaries
df3=pd.DataFrame([{'a': 1, 'b': 2, 'c':3, 'd':4}, {'a': 10, 'b': 20, 'c': 30}, {'a': 11, 'b': 21, 'c': 41, 'd': 51}])
print(df3) # creating a dataframe from a list of dictionaries
# ## Visualizing DataFrames
# In[56]:
#First Create a DataFrame
data={'Animals': ['cat','cat','turtle','dog','dog','cat','turtle','cat','dog','dog'],
'Age': [2.5,3,0.5,np.nan,5,2,4.5,np.nan,7,3],
'Visits' : [1,3,2,3,2,3,1,1,2,1],
'Priority' : ['y','y','n','y','n','n','n','y','n','n']}
labels=['a','b','c','d','e','f','g','h','i','j']
animals_data=pd.DataFrame(data,index=labels)
print(animals_data)
print(type(animals_data)) #type of the dataframe
# ### DataFrame Attributes - index, cols, values, datatype of values
# In[57]:
print("\n animals_data.index:\n ", animals_data.index)
print("\n animals_data.columns:\n", animals_data.columns)
print("\n animals_data.values:\n", animals_data.values) #will show only values without index and column names
print("\n animals_data.dtypes:\n", animals_data.dtypes) #will show the datatype of each column
# In[ ]:
### Visualizing DataFrames
# In[58]:
print(animals_data) #Visualizing complete may not be feasible in real data
# In[59]:
print(animals_data.head()) # will display top 5 lines of the dataFrame
print(animals_data.tail()) # will display bottom 5 lines of the dataframe
# ### Details about the DataFrame
# In[60]:
# Information about the whole dataframe
print( animals_data.info()) #nrows, ncols, index, datatype of each column, number of nonnull values
#statistical data of dataframe
print('\n Statistical Description : \n',animals_data.describe())
#mean std max min quartiles for columns with numeric type
print('\n Description for object values: \n',animals_data.describe(include = ['object']))
#count, unique values, mode , freq
# ## DataFrame Operations
# * Accessing/Slcing Data in a DataFrame
# * Indexing into a DataFrame is for retrieving one or more columns either with a single value or sequence
# In[61]:
#Exercise
print("\n animals_data.index:\n ", animals_data.index) #accessing index
print("\n animals_data.columns:\n", animals_data.columns) #accessing column names
#Accessing columns of a DataFrame 2 ways
print("\n animals_data['Animals']:\n",animals_data['Animals'] )
print("\n animals_data['Age'] :\n", animals_data['Age'])
print("\n animals_data.Animal:\n",animals_data.Animals)
animals_data[['Age','Visits']] #Displaying particular Columns
print("\n animals_data.loc['b', 'Age']:\n",animals_data.loc['b', 'Age']) #accessing by row and column
animals_data.loc['b', 'Age'] =50
print("\n animals_data.loc['b', 'Age']:\n",animals_data.loc['b', 'Age']) #updatinng a value in a Dataframe
# In[62]:
#Accessing rows of a DataFrame by implict and explicit index
print("\n animals_data.loc['a', :] :\n", animals_data.loc['a', :]) #values of a row are given as columns
print("\ Rows 1 to 3 using slicing:\n",animals_data.iloc[1:3, 2:3] ) #iloc for implicit indexing
# In[63]:
# Exercise
#Acessing individual elements in the table by row and column
print(animals_data)
print("\n animals_data.loc['b', 'Age']:\n",animals_data.loc['b', 'Age']) #accessing by row and column
animals_data.loc['b', 'Age'] =50
print("\n animals_data.loc['b', 'Age']:\n",animals_data.loc['b', 'Age']) #updatinng a value in a Dataframe
print("\n animals_data.loc['b', 'Age']:\n",animals_data.iloc[2,2]) #accessing by row and column
print(animals_data.iloc[:5, 2:4] )
print(animals_data.loc['b':'e', 'Animals':'Visits'] )
# In[64]:
print("Transpose of the Data Frame :")
animals_data.T
# ### Sorting DataFrames
# * By default is ascending order
# * Mandatory to provide (by = ' '), Sort by one column
# * Can also combine sorting with slicing
# In[65]:
#Methods in DataFrame object Sort By Values
print(animals_data)
print("\n Sorting the Data Agewise:\n", animals_data.sort_values(by = 'Age', ascending = False)) #sort by which column
#Any missing value is sorted at end by default
animals_data.sort_values(by='Age')[1:4]
#Sort by index
print("\n Sorting the Data by Index:\n", animals_data.sort_index(axis=1)) #Since it is already sorted you dont see the change
# ## ReIndexing DataFrames
# * Reindexing allows you to change/add/delete the index on a specified axis. This returns a copy of the data.
# In[66]:
#. Create a new index Reindexing
print(animals_data)
animals_data_reindex = animals_data.reindex(['d', 'e', 'g', 'f', 'a', 'b', 'c', 'i', 'j'])
print("\n ReIndexed Data: \n",animals_data.reindex) #will not modify original data
print("\n Sorted by row index:\n", animals_data_reindex.sort_index(axis=0))
print("\n Sorted by Column Index:\n", animals_data_reindex.sort_index(axis=1))
# ### Creating a copy of the DataFrame
# In[67]:
animals_data_c=animals_data.copy()
print("\n Copy of animals_data:\n", animals_data_c)
# ### Deleting a row or Column of a DataFrame
# * The drop() function modifies the size or shape of a Series or DataFrame,
# * can manipulate an object in-place without returning a new object
# * Be careful with inplace as it destroys any data that is dropped
#
# In[68]:
print(animals_data)
print("Drop rows with names 'a; and 'b':\n", animals_data.drop(['a', 'b'])) #dropping rows
#print(animals_data)
#to drop the columns permanently use inplace - True
print("Drop rows with names 'a; and 'b':\n", animals_data_c.drop(['a', 'b'], inplace=True))
print(" Animals_data _c with inplace = true: \n", animals_data_c)
print(animals_data.drop('Visits', axis=1))
#dropping column columns are axis=1 for drop() default is row
#So if we dont mention axis = 1 it will search for a row with name 'Visits'
print(animals_data.drop('Visits', axis='columns'))
# ### Aggregate Functions
# * All aggregation functions discussed in Series can be performed on columns of a DataFrame as each column is like a Series
# In[69]:
#Why doing an Aggregation on a Row doesnt make sense
print("Mean of the Dataframe is: \n",animals_data.mean()) #mean of values in columns containing numeric data
print("\nMean of 'Age' is: ",animals_data[['Age']].mean())
print("\nTotal visits :",animals_data[['Visits']].sum())
print("\nMax visits: ",animals_data[['Visits']].max())
print("\nMin visits: ",animals_data[['Visits']].min())
print("\n Index of Max visits: ",animals_data[['Visits']].idxmax())
print("\n Index of Min visits: ",animals_data[['Visits']].idxmin())
print("\nSum: \n",animals_data.sum()) #for strings sum is string concatenation
# ### Handling Missing Values
# * Difference between None and np.nan
# *For Series and DataFrame both None and np.nan are handled as np.nan
# * To detect missing values the isnull() and notnull() functions in Pandas are used
# *Filling of Missing Values
# In[70]:
#Trouble with missing data
#Why we need to drop missing values
import numpy as np
arr1 = np.array([1, None, 3, 4]) #observe None is a NoneType
print(arr1, arr1.dtype)
print(arr1.sum()) #unsupported operand type(s) for +: 'int' and 'NoneType
print(arr1.mean())
arr2 = np.array([1, np.nan, 3,4]) #np.nan is a float type
print(arr2, arr2.dtype)
print(arr2.sum()) #so np.nan is handled by numpy but not None
print(arr2.mean())
# In[71]:
print(pd.Series([1, np.nan, 2, None]))
ser_null = pd.Series(range(5,8), dtype=int)
print('\n',ser_null)
ser_null[0] = None
print('\n',ser_null)
print('\n',ser_null.sum())
#casting the integer array to floating point, Pandas automatically converts the None to a NaN value.
#Series datatype converts a None also to a nan and it can do the aggregation even with the nan values .
#it ignores the nan values
# In[72]:
#Dataframe aggregation methods ignore nan values and find the sum
data = pd.DataFrame([[1, np.nan, 2],
[2, 3, 5],
[np.nan, 4, 6]])
print(data)
print(data.sum()) #sum by default is column sum axis =0
print(data.sum(axis=1)) #sum across columns
# In[ ]:
# ### Detecting Null Values
# * To detect missing values the isnull() and notnull() functions in Pandas are used
# In[73]:
print(pd.isnull(animals_data)) #isnull() function in pandas library
#print(animals_data.isnull()) #isnull() in DataFrame object
#Observe that Age has two missing values
print(pd.notnull(animals_data))
# In[74]:
# we do this with a simpler example
data = pd.DataFrame([[1, np.nan, 2],
[2, 3, 5],
[np.nan, 4, 6]])
print('\n data.isnull(): \n',data.isnull())
print('\n data.notnull(): \n',data.notnull())
data[data.notnull()]
# ## Dropping Null values
# * dropna() drops all Null values - might drop good data
# * We specify how or thresh parameters
# * DataFrame.dropna(axis=0, how='any', thresh=None, subset=None, inplace=False)
# * how(any,all)
# * ‘any’ : If any NA values are present, drop that row or column.
# * ‘all’ : If all values are NA, drop that row or column. thresh - 3 means requires that many nonNA values
# * inplace is True or False
# * For finer-grained control, the thresh parameter specifies a min no. of non-null values for the row/column to be kept
# In[75]:
#Dropping null values
print(data.dropna())
data.dropna(axis='rows', thresh=2) #axis =0 means drop rows which have missing values, 1 cols which have missing values
data.dropna(axis='columns', thresh = 3)
# ### Filling Missing values
# * We may choose to fill in different data according to the data type of the column
# * Both numpy.nan and None can be filled in using pandas.fillna().
# * For categorical columns (string columns), we want to fill in the missing values with mode.
# * For numerical columns, we want to fill in the missing values with mean
# * DataFrame.fillna(value=None, method=None, axis=None, inplace=False
# In[76]:
data = pd.DataFrame([[1, np.nan, 2],
[2, 3, 5],
[np.nan, 4, 6]])
print(data)
#print(data.fillna(0)) we can fill with column mean or mode for categorical data
#print(data.fillna(method='ffill'))
print(data.fillna(method='bfill'))
print(data) # original data will not change, to change we need to set inplace = True
#find mean of each column and fill each individually
# ### Reading a csv and excel file into a DataFrame
# In[81]:
#First Create a DataFrame
data={'Animals': ['cat','cat','turtle','dog','dog','cat','turtle','cat','dog','dog'],
'Age': [2.5,3,0.5,np.nan,5,2,4.5,np.nan,7,3],
'Visits' : [1,3,2,3,2,3,1,1,2,1],
'Priority' : ['y','y','n','y','n','n','n','y','n','n']}
labels=['a','b','c','d','e','f','g','h','i','j']
animals_data=pd.DataFrame(data,index=labels)
print(animals_data)
#print(animals_data.fillna(0))
print("\n\n",animals_data.fillna(animals_data['Age'].mean()))
#observe the data type of each column
# In[6]:
#First write dataframe to csv then read it back
data={'Animals': ['cat','cat','turtle','dog','dog','cat','turtle','cat','dog','dog'],
'Age': [2.5,3,0.5,np.nan,5,2,4.5,np.nan,7,3],
'Visits' : [1,3,2,3,2,3,1,1,2,1],
'Priority' : ['y','y','n','y','n','n','n','y','n','n']}
labels=['a','b','c','d','e','f','g','h','i','j']
animals_data=pd.DataFrame(data,index=labels)
animals_data
#data.to_csv('animal.csv')
# In[8]:
animals_data.to_csv('animal.csv')
# In[84]:
df_animal=pd.read_csv('animal.csv')
df_animal.head(3)
# In[90]:
animals_data.to_excel('animals.xlsx',sheet_name='sheet1')
#animals_data.to_excel('animals.xlsx',sheet_name='sheet1')
df_animal2=pd.read_excel('animals.xlsx','sheet1', index_col=None)
df_animal2
# In[92]:
animals_data.to_excel('animal.xlsx',sheet_name='sheet1')
df_animal2=pd.read_excel('animal.xlsx', 'sheet1',
index_col=None, na_values=['NA'])
df_animal2
# ### Combining DataSets
# * Pandas concatenation preserves indices, even if it results in duplicate indices.
# * Series Concatenation
# * DataFrame Concatenation : Concatenation one below another (axis=0) , Concatenation side by side (axis=1)
# * Ignore Index while concatenation
# In[15]:
ser1 = pd.Series(['A', 'B', 'C'], index=[1, 2, 3])
ser2 = pd.Series( ['D', 'E', 'F'], index=[4, 5, 6] ) #test with the same index
print("Series 1 : \n",ser1, "\nSeries 2 : \n",ser2)
print("Concatenating series: \n", pd.concat([ser1, ser2]))
# In[ ]:
* DataFrame Concatenation
# In[17]:
df1 = pd.DataFrame({'A' : ['axe', 'art', 'ant'], 'B' : ['bat', 'bar', 'bin'], 'C' : ['cap', 'cat', 'car']},
index = [1,2,3])
df2 = pd.DataFrame({'D' : ['dam', 'den', 'dot'], 'E': [ 'ear', 'eat', 'egg'], 'F': ['fan', 'fog', 'fat']},
index =[ 2, 3, 6])
print("Data frame 1 : \n", df1,'\n Data Frame 2: \n', df2)
print("Concatenating Data Frames: \n",pd.concat([df1,df2], axis=0)) # axis =0 is stacking one below the other
print("Concatenating Data Frames along axis 1: \n",pd.concat([df1,df2], axis = 1))
#will consider common indices
# ##### Ignoring the index
# In[93]:
df_concat = pd.concat([df1, df2], ignore_index = True)
print("Concatenation of dataframes while ignoring the index: \n", df_concat)
# ### Joining DataFrames
# * Inner Join - Concatenation of common columns ie intersection of two dataframes
# * concat is like outer join
# * Using append() Function
# In[20]:
print(" Inner Join on dataframes : \n", pd.concat([df1, df2], join = 'inner')) #no overlapping columns
# In[ ]:
#exercise
df3 = pd.DataFrame({'B' : ['ball', 'box' , 'band'], 'C': ['cat', 'calendar', 'cone'],'G' : ['grain', 'grape', 'goat']} ,
index =[ 1, 4, 2])
print("Data Frame 1 : \n", df1, "Data Frame 3 : \n", df3)
print(" Joining Data frmes: \n" , pd.concat([df1, df3])) #stacking one below another
print(" Joining Data frmes along axis = 1: \n" , pd.concat([df1, df3], axis = 1))
# In[22]:
print(" Inner Join on dataframes : \n", pd.concat([df1, df3], join = 'inner'))
# #### The append()
# * the append() method in Pandas does not modify the original object—instead, it creates a new object with the combined data
# * not very efficient method as a new index and data buffer is created
# In[95]:
print(df1)
print(df2)
print(df1.append(df2)) # append is same as concat stocks dataframes one below another
print(df1) # Original DataFrames are not update
print(df2) # a new ccatenated dataframe is created
# ### Merge Operations
# * Pandas has join operations identical to SQL
# * pd.merge(left, right, how='inner', on=None, left_on=None, right_on=None, left_index=False, right_index=False, sort=True)
# * left, right- dataframes, One of 'left', 'right', 'outer', 'inner
# * on - Column to join, default is common column, left_on- column in left dataframe to use as keys
# * left_index- True means use left dataframe index as join key, sort - True Sort result by joining Keys
# In[96]:
df_stud = pd.DataFrame({'St_id': [101,102,103,104,105],'Branch': ['IT','CS','ECE','CS','Mech']})
df_fac = pd.DataFrame({'F_id' : [110,120,130,140,150 ],'F_name' : ['A', 'B', 'C', 'D', 'E'],'Branch': ['ECE','Mech', 'EEE', "IT", 'CS'] })
print("Student dataframe: \n", df_stud,'\nFaculty Dataframe :\n', df_fac)
df_merge = pd.merge(df_stud, df_fac)
print("Merged dataframe : \n ", df_merge) #Merge on a common column
#works only if both dataframes have the specified column Default is inner
#print("Merged dataframe : \n ", pd.merge(df_stud, df_fac, on = 'Branch') )
# * When similar columns have different names in different dataframes
# In[5]:
df_fac1 = pd.DataFrame({'F_name' : ['A', 'B', 'C', 'D', 'E'],'Stream': ['ECE','Mech', 'EEE', "IT", 'CS'] })
print("Student Details : \n", df_stud, 'Faculty Details: \n', df_fac)
print("Merged Dataframes : \n", pd.merge(df_stud, df_fac1, left_on = 'Branch', right_on = 'Stream'))
print("Student Details : \n", df_stud, 'Faculty Details: \n', df_fac)
# In[105]:
# the redundant column can also be dropped
pd.merge(df_stud, df_fac, left_on = 'Branch', right_on = 'Stream').drop('Stream', axis = 1)
# # Merge over indices
# * left_index and right_index flags can be used to perform merge over the similar index of the dataframes.
# * Also, join( ) method performs the merge by default on indices
# In[6]:
#print('\n Using merge on indices: \n',pd.merge(df_stud, df_fac, left_index=True, right_index=True))
#print('\n Using join( ): \n', df_stud.join(df_fac))
#If we use default index branch is repeated. better way is to set the common column as index
df1 = df_stud.set_index('Branch')
df2 = df_fac1.set_index('Stream')
print("Student Details : \n", df1, 'Faculty Details: \n', df2)
print('\nUsing merge on indices: \n',pd.merge(df1, df2, left_index=True, right_index=True))
print('\nUsing join( ): \n', df1.join(df2))
#DataFrame has a convenient join method for merging by index
# In[ ]:
#### Different types of joins can also be specified like 'inner' , 'outer', 'left' and 'right' using how keyword
# In[98]:
# to see the effect of outer Join which is like Union we need to add different elements to Branch
df_stud = pd.DataFrame({'St_id': [101,102,103,104,105,106],'Branch': ['IT','CS','ECE','CS','Mech', ' EEE']})
df_fac = pd.DataFrame({'F_id' : [120,130,140,150 ],'F_name' : ['B', 'C', 'D', 'E'],'Branch': ['Mech', 'EEE', "IT", 'CS'] })
print("Student dataframe: \n", df_stud,'\nFaculty Dataframe :\n', df_fac)
df_merge = pd.merge(df_stud, df_fac, on = 'Branch', how='right')
print("Merged dataframe : \n ", df_merge) #Merge on a common column
# #### One-to-one join
# * uses a common column as the key to join the dataframe.
# * the order of values in each column in not necessarily maintained
# In[99]:
df1 = pd. DataFrame({'key' :['b', 'a', 'd','e'], 'data1': range(4)}) #has unique rows labels
df2 =pd. DataFrame({'key' :['a', 'b', 'd'], 'data2': range(3)}) # has unique row labels
print("DataFrame1 : \n", df1, '\nDataFrame2 :\n', df2)
#Example of one to one merge situation
#print("Inner Join:\n", pd.merge(df1, df2, on ='key', how = 'inner', sort=True)) # intersection of keys
#print("Outer Join:\n", pd.merge(df1, df2, on ='key', how = 'outer', sort=True)) # union of keys
#print("Left Join:\n", pd.merge(df1, df2, on ='key', how = 'left', sort=True)) # keys from left dataframe
#print("Right Join:\n", pd.merge(df1, df2, on ='key', how = 'right', sort=True)) #
#Here left and outer is same and Right and Inner is same
# ##### Many-to-one joins
# - one of the two key columns contains duplicate entries.
# - the merged DataFrame preserves the duplicate entries.
# In[100]:
df1 =pd. DataFrame({'key' :['b', 'b', 'a', 'c', 'a', 'a'], 'data1': range(6)}) #has multiple rows labelled a and b
df2 = pd. DataFrame({'key' :['a', 'b', 'd'], 'data2': range(3)})
print("DataFrame1 : \n", df1, '\nDataFrame2 :\n', df2)
#Example of many to one merge situation
#No of rows will be 5X2
#print("Inner Join:\n", pd.merge(df1, df2, on ='key', how = 'inner', sort=True)) # intersection of keys
#print("Outer Join:\n", pd.merge(df1, df2, on ='key', how = 'outer', sort=True)) # union of keys
#print("Left Join:\n", pd.merge(df1, df2, on ='key', how = 'left', sort=True)) # keys from left dataframe
#print("Right Join:\n", pd.merge(df1, df2, on ='key', how = 'right', sort=True)) #
#Here left and outer is same and Right and Inner is same
# ##### Many-to-many join
# * when the key column in both the left and right array contins duplicates
# In[101]:
df1 =pd. DataFrame({'key' : ['b', 'b', 'a','c', 'a', 'a', 'b'], 'data1': range(7)}) #has multiple rows labelled a and b
df2 = pd.DataFrame({'key' : ['a', 'b', 'a', 'b', 'd'], 'data2': range(5)})
print("DataFrame1 : \n", df1, '\nDataFrame2 :\n', df2)
#Example of many to one merge situation
#No of rows in the dataframe = 7x4 for inner
#No of rows will be 5X2
print("Inner Join:\n", pd.merge(df1, df2, on ='key', how = 'inner', sort=True)) # intersection of keys
#print("Outer Join:\n", pd.merge(df1, df2, on ='key', how = 'outer', sort=True)) # union of keys
#print("Left Join:\n", pd.merge(df1, df2, on ='key', how = 'left', sort=True)) # keys from left dataframe
#print("Right Join:\n", pd.merge(df1, df2, on ='key', how = 'right', sort=True)) #
#Here left and outer is same and Right and Inner is same
# In[5]:
df1 =pd. DataFrame({'key' :['b', 'b', 'a', 'c', 'a', 'a'], 'data1': range(6)}) #has multiple rows labelled a and b
df2 = pd. DataFrame({'key' :['a', 'b', 'd'], 'data2': range(3)})
print("DataFrame1 : \n", df1, '\nDataFrame2 :\n', df2)
#Example of many to one merge situation
#No of rows will be 5X2
#print("Inner Join:\n", pd.merge(df1, df2, on ='key', how = 'inner', sort=True)) # intersection of keys
print("Outer Join:\n", pd.merge(df1, df2, on ='key', how = 'outer', sort=True)) # union of keys
#print("Left Join:\n", pd.merge(df1, df2, on ='key', how = 'left', sort=True)) # keys from left dataframe
#print("Right Join:\n", pd.merge(df1, df2, on ='key', how = 'right', sort=True)) #
#Here left and outer is same and Right and Inner is same
# In[6]:
f1 =pd. DataFrame({'key' :['b', 'b', 'a', 'c', 'a', 'a'], 'data1': range(6)}) #has multiple rows labelled a and b
df2 = pd. DataFrame({'key' :['a', 'b', 'd'], 'data2': range(3)})
print("DataFrame1 : \n", df1, '\nDataFrame2 :\n', df2)
#Example of many to one merge situation
#No of rows will be 5X2
#print("Inner Join:\n", pd.merge(df1, df2, on ='key', how = 'inner', sort=True)) # intersection of keys
#print("Outer Join:\n", pd.merge(df1, df2, on ='key', how = 'outer', sort=True)) # union of keys
print("Left Join:\n", pd.merge(df1, df2, on ='key', how = 'left', sort=True)) # keys from left dataframe
#print("Right Join:\n", pd.merge(df1, df2, on ='key', how = 'right', sort=True)) #
#Here left and outer is same and Right and Inner is same
# In[7]:
f1 =pd. DataFrame({'key' :['b', 'b', 'a', 'c', 'a', 'a'], 'data1': range(6)}) #has multiple rows labelled a and b
df2 = pd. DataFrame({'key' :['a', 'b', 'd'], 'data2': range(3)})
print("DataFrame1 : \n", df1, '\nDataFrame2 :\n', df2)
#Example of many to one merge situation
#No of rows will be 5X2
#print("Inner Join:\n", pd.merge(df1, df2, on ='key', how = 'inner', sort=True)) # intersection of keys
#print("Outer Join:\n", pd.merge(df1, df2, on ='key', how = 'outer', sort=True)) # union of keys
#print("Left Join:\n", pd.merge(df1, df2, on ='key', how = 'left', sort=True)) # keys from left dataframe
print("Right Join:\n", pd.merge(df1, df2, on ='key', how = 'right', sort=True)) #
#Here left and outer is same and Right and Inner is same
# In[102]:
series = pd.Series([2,3,4,5])
print(series[2])
series[2]=7.8
print(series[2])
print(series)
# ### pandas.merge connects rows in DataFrames based on one or more keys. This
# * will be familiar to users of SQL or other relational databases, as it implements database join operations
# * left LEFT OUTER JOIN Use keys from left object
# * right RIGHT OUTER JOIN Use keys from right object
# * outer FULL OUTER JOIN Use union of keys
# * inner INNER JOIN Use intersection of keys
#
# • pandas.concat concatenates or “stacks” together objects along an axis.### The merge method() is equivalent to the SQL join
dir( String)
# In[9]:
dir(Series)
# In[104]:
### Difference beytween axis =0 and axis =1
#Dataframe aggregation methods ignore nan values and find the sum
data = pd.DataFrame([[1, 4, 2],
[2, 3, 5],
[7, 4, 6]])
print(data)
print(data.sum()) #sum by default is column sum axis =0 columnwise sum
print(data.sum(axis=1)) #roqwwise sum
# In[ ]:
| [
"noreply@github.com"
] | noreply@github.com |
ad595cf02acfda9585e683c5ef2199d598f30910 | c7d354658353e20f46432b9c1a40744224365f4c | /test_moby.py | 3d5c95b8f6eefd32217f42923f9c7042db653033 | [] | no_license | c11z/or-the-whale | baf35db79621d06e1da5ee15f4a64ba2701cff90 | d76036aaa03ea5497a6fa3d61efb58de096b43ef | refs/heads/master | 2022-09-14T10:30:28.942779 | 2020-03-23T01:26:19 | 2020-03-23T01:26:19 | 152,700,060 | 0 | 0 | null | 2022-08-22T23:42:27 | 2018-10-12T05:48:18 | HTML | UTF-8 | Python | false | false | 397 | py | from moby import Moby
TEST_PATH = "/script/data/test.txt"
def test_init_no_limit():
t = Moby("testbook", TEST_PATH)
assert len(t.ch_text.keys()) == 2
assert len(t.ch_doc["chapter_1"]) == 12
assert len(t.ch_doc["chapter_2"]) == 12
def test_init_w_limit():
t = Moby("testbook", TEST_PATH, 1)
assert len(t.ch_text.keys()) == 1
assert len(t.ch_doc["chapter_1"]) == 12
| [
"corydominguez@gmail.com"
] | corydominguez@gmail.com |
5a8c7b455587292892a64bea88f4dad7baa836f6 | 271e44ac22fc2778bcbf1d25f6e593864cac1a28 | /10_1/textread.py | 7803e9638d3b72e78b3e684a60a520be8d04f59f | [] | no_license | yinhao5969/Python-study | d018bbc639922f6259bcb999dbee991adc4687bc | 2316f487d817f3474a43bcafd2c23195d0bb39d3 | refs/heads/master | 2020-09-17T01:34:16.018193 | 2019-12-14T05:47:23 | 2019-12-14T05:47:23 | 223,948,231 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 972 | py | #10-1
file_name = 'python do what.txt'
with open(file_name) as file_object:
file = file_object.read()
print(file)
with open(file_name) as file_object:
lines = file_object.readlines()
for line in lines:
print(line)
with open(file_name) as file_object:
for item in file_object:
print(item)
#10-2
file_name = 'python do what.txt'
with open(file_name) as file_object:
for line in file_object:
print('befor',line)
line = line.replace('python', 'c')
print('after',line)
#10-3 10-4
while True:
visitor = input('Please input your name\n')
if visitor == 'q':
break
file_name = 'visitors.txt'
with open(file_name, 'a') as file_object:
file_object.write(visitor+'\n')
#10-5
while True:
reason = input('why you like to program?\n')
if reason == 'q':
break
file_name = 'reason.txt'
with open(file_name, 'a') as file_object:
file_object.write(reason+'\n')
| [
"yinhao5969@icloud.com"
] | yinhao5969@icloud.com |
0282baa646e2c48a49c37b3d4c7c57ce6221be3e | 8712509880af7544115733099861ac3bb9e818cf | /DeepLearning_AndrewNg_coursera/1neural-network-deep-learning/assignment1/test2.1.py | 5cbecc7f8c8daf2293ddfc4a919e4cde7fc62f55 | [] | no_license | most-corner/deeplearning | 94152b111ccefeaa4317093f62e2616cad90d4af | aefe5542cc46f285b4ff34e90212a0e29cc2f5c5 | refs/heads/master | 2020-06-11T22:30:44.216540 | 2019-07-01T14:21:10 | 2019-07-01T14:21:10 | 194,107,559 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 716 | py | # L1
import numpy as np
def L1(yhat, y):
"""
Arguments:
yhat -- vector of size m (predicted labels)
y -- vector of size m (true labels)
Returns:
loss -- the value of the L1 loss function defined above
"""
loss = np.sum(np.abs(yhat-y))
return loss
yhat = np.array([.9, 0.2, 0.1, .4, .9])
y = np.array([1, 0, 0, 1, 1])
print("L1 = " + str(L1(yhat, y)))
def L2(yhat, y):
"""
Arguments:
yhat -- vector of size m (predicted labels)
y -- vector of size m (true labels)
Returns:
loss -- the value of the L2 loss function defined above
"""
loss = np.dot(yhat-y, yhat-y)
return loss
print("L2 = " + str(L2(yhat, y)))
| [
"750315795@qq.com"
] | 750315795@qq.com |
53cc948a777e59d5aae918f523f52b94ac11c8e3 | edab237d44998a3671beac415bedd97f870bafd9 | /scribeui_pyramid/static/lib/proj4js/tools/pjjs.py | 5b79d585fd0ab9aa110d1cf09e1d59ba904e193f | [
"MIT"
] | permissive | mapgears/scribeui | ede35bfe4f2e41d9eb42f88242a9839c30fa53d8 | f7e917236c088cad265a94210a0031f7d7be4a71 | refs/heads/master | 2023-08-07T03:03:34.017894 | 2020-03-26T20:27:33 | 2020-03-26T20:27:33 | 10,671,138 | 12 | 7 | MIT | 2023-02-02T07:14:59 | 2013-06-13T17:04:40 | JavaScript | UTF-8 | Python | false | false | 3,061 | py | #!/usr/bin/env python
#
# TODO explain
#
# -- Copyright 2007 IGN France / Geoportail project --
#
import sys
import os
import re
SUFFIX_JAVASCRIPT = ".js"
def _pjcat2js_remove(rezDirectory,catName,targetDirectory):
pjCatFilename = os.path.join(rezDirectory, catName)
pjCat = open(pjCatFilename,'r')
comment_re = re.compile("^#")
srsdef_re = re.compile("^<([^>]*)>.* <>$")
l = pjCat.readline()
while len(l) != 0:
if comment_re.search(l) is None:
srsdef_mo = srsdef_re.match(l)
srsdef_fn = os.path.join(targetDirectory, catName+srsdef_mo.group(1)+".js")
if os.path.exists(srsdef_fn):
os.remove(srsdef_fn)
l = pjCat.readline()
pjCat.close()
def _pjcat2js_make(rezDirectory,catName,targetDirectory):
pjCatFilename = os.path.join(rezDirectory, catName)
pjCat = open(pjCatFilename,'r')
comment_re = re.compile("^#")
srsdef_re = re.compile("^<([^>]*)> *(.*) <>$")
l = pjCat.readline()
while len(l) != 0:
if comment_re.search(l) is None:
srsdef_mo = srsdef_re.match(l)
srsdef_fn = os.path.join(targetDirectory, catName+srsdef_mo.group(1)+".js")
srsdef = 'Proj4js.defs["'+catName+':'+srsdef_mo.group(1)+'"]="'+srsdef_mo.group(2)+'";'
file(srsdef_fn,'w').write(srsdef)
l = pjCat.readline()
pjCat.close()
def pjcat2js_clean(rezDirectory,targetDirectory):
if not os.path.isdir(rezDirectory):
return
if not os.path.isdir(targetDirectory):
return
if os.path.abspath(rezDirectory) == '/':
return
if os.path.abspath(targetDirectory) == '/':
return
rezDirectory_name_len = len(rezDirectory)
for root, dirs, filenames in os.walk(rezDirectory):
if 'CVS' in dirs:
dirs.remove('CVS')
if '.svn' in dirs:
dirs.remove('.svn')
for filename in filenames:
if not filename.endswith(SUFFIX_JAVASCRIPT) and not filename.startswith("."):
filepath = os.path.join(root, filename)[rezDirectory_name_len+1:]
filepath = filepath.replace("\\", "/")
_pjcat2js_remove(rezDirectory,filepath,targetDirectory)
def pjcat2js_run(rezDirectory,targetDirectory):
if not os.path.isdir(rezDirectory):
return
if not os.path.isdir(targetDirectory):
return
if os.path.abspath(rezDirectory) == '/':
return
if os.path.abspath(targetDirectory) == '/':
return
rezDirectory_name_len = len(rezDirectory)
for root, dirs, filenames in os.walk(rezDirectory):
if 'CVS' in dirs:
dirs.remove('CVS')
if '.svn' in dirs:
dirs.remove('.svn')
for filename in filenames:
if not filename.endswith(SUFFIX_JAVASCRIPT) and not filename.startswith("."):
filepath = os.path.join(root, filename)[rezDirectory_name_len+1:]
filepath = filepath.replace("\\", "/")
_pjcat2js_make(rezDirectory,filepath,targetDirectory)
| [
"cbourget@mapgears.com"
] | cbourget@mapgears.com |
6d776f7423961968bb562001dd0866a7c64ecf22 | d0e94f036e780ed8b1d34871dd6956f7f25dcfca | /game/migrations/0001_initial.py | b899b4c5bd81562a3c33abce797aa05f4350646f | [] | no_license | kikiyuyu/datavisual_django2 | 59b689ee1e6c7cacc56fabe4174a7824131a74d5 | a840530d1416a4aa8ff75523b9145b0d08a61166 | refs/heads/master | 2020-04-04T23:21:41.290294 | 2018-11-08T09:00:43 | 2018-11-08T09:00:43 | 156,356,042 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 517 | py | # Generated by Django 2.1.3 on 2018-11-08 01:27
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Game',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('game_name', models.CharField(max_length=20, verbose_name='游戏名字')),
],
),
]
| [
"yu.wang@fanyoy.com"
] | yu.wang@fanyoy.com |
56355f3875195391f7293db4127fc87bb679e048 | 55aa8fd35ffb28da8d834e8f0ac1470ffa9bcd4d | /arithmetic.py | 22dc261b12820a3345bacd318540903c108692bc | [] | no_license | paszczureKk/Elliptic-Curve-Digital-Signature-Algorithm | 430800476720763a0c5aa185a5c1194723894c4c | 3cec29591b8f89f49399b07032b79c2f7825898e | refs/heads/master | 2020-05-30T07:35:00.902771 | 2019-05-31T13:49:40 | 2019-05-31T13:49:40 | 189,601,415 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 545 | py | #!/usr/bin/python
import sys
result = 0
var1 = int(sys.argv[2],16)
var2 = int(sys.argv[3],16)
if sys.argv[1] == "sum":
result = var1 + var2
elif sys.argv[1] == "sub":
result = var1 - var2
elif sys.argv[1] == "div":
result = var1 / var2
elif sys.argv[1] == "fdiv":
result = var1 // var2
elif sys.argv[1] == "mul":
result = var1 * var2
elif sys.argv[1] == "pow":
result = var1 ** var2
elif sys.argv[1] == "mod":
result = var1 % var2
elif sys.argv[1] == "con":
result = var1 & var2
print '{:x}'.format(result)
| [
"dszynszecki@wp.pl"
] | dszynszecki@wp.pl |
bf1ede23da3e02e00b2cf1c77c17765bc71ab71a | b39d72ba5de9d4683041e6b4413f8483c817f821 | /GeneVisualization/ass1/Lib/site-packages/itk/itkAggregateLabelMapFilterPython.py | 0dbdcf699057bdee2b4e73071db281a38799bb72 | [] | no_license | ssalmaan/DataVisualization | d93a0afe1290e4ea46c3be5718d503c71a6f99a7 | eff072f11337f124681ce08742e1a092033680cc | refs/heads/master | 2021-03-13T05:40:23.679095 | 2020-03-11T21:37:45 | 2020-03-11T21:37:45 | 246,642,979 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,395 | py | # This file was automatically generated by SWIG (http://www.swig.org).
# Version 3.0.8
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (3, 0, 0):
new_instancemethod = lambda func, inst, cls: _itkAggregateLabelMapFilterPython.SWIG_PyInstanceMethod_New(func)
else:
from new import instancemethod as new_instancemethod
if version_info >= (2, 6, 0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_itkAggregateLabelMapFilterPython', [dirname(__file__)])
except ImportError:
import _itkAggregateLabelMapFilterPython
return _itkAggregateLabelMapFilterPython
if fp is not None:
try:
_mod = imp.load_module('_itkAggregateLabelMapFilterPython', fp, pathname, description)
finally:
fp.close()
return _mod
_itkAggregateLabelMapFilterPython = swig_import_helper()
del swig_import_helper
else:
import _itkAggregateLabelMapFilterPython
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if (name == "thisown"):
return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if (not static):
object.__setattr__(self, name, value)
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr_nondynamic(self, class_type, name, static=1):
if (name == "thisown"):
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
if (not static):
return object.__getattr__(self, name)
else:
raise AttributeError(name)
def _swig_getattr(self, class_type, name):
return _swig_getattr_nondynamic(self, class_type, name, 0)
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object:
pass
_newclass = 0
def _swig_setattr_nondynamic_method(set):
def set_attr(self, name, value):
if (name == "thisown"):
return self.this.own(value)
if hasattr(self, name) or (name == "this"):
set(self, name, value)
else:
raise AttributeError("You cannot add attributes to %s" % self)
return set_attr
import itkInPlaceLabelMapFilterPython
import itkLabelMapFilterPython
import ITKLabelMapBasePython
import itkStatisticsLabelObjectPython
import itkPointPython
import itkFixedArrayPython
import pyBasePython
import vnl_vector_refPython
import vnl_vectorPython
import vnl_matrixPython
import stdcomplexPython
import itkVectorPython
import itkIndexPython
import itkOffsetPython
import itkSizePython
import itkMatrixPython
import itkCovariantVectorPython
import vnl_matrix_fixedPython
import itkAffineTransformPython
import itkMatrixOffsetTransformBasePython
import itkArray2DPython
import itkOptimizerParametersPython
import itkArrayPython
import ITKCommonBasePython
import itkVariableLengthVectorPython
import itkDiffusionTensor3DPython
import itkSymmetricSecondRankTensorPython
import itkTransformBasePython
import itkShapeLabelObjectPython
import itkImageRegionPython
import itkLabelObjectPython
import itkLabelObjectLinePython
import itkHistogramPython
import itkSamplePython
import itkImageSourcePython
import itkImageSourceCommonPython
import itkVectorImagePython
import itkImagePython
import itkRGBAPixelPython
import itkRGBPixelPython
import itkImageToImageFilterCommonPython
def itkAggregateLabelMapFilterLM3_New():
return itkAggregateLabelMapFilterLM3.New()
def itkAggregateLabelMapFilterLM2_New():
return itkAggregateLabelMapFilterLM2.New()
class itkAggregateLabelMapFilterLM2(itkInPlaceLabelMapFilterPython.itkInPlaceLabelMapFilterLM2):
"""Proxy of C++ itkAggregateLabelMapFilterLM2 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkAggregateLabelMapFilterLM2_Pointer":
"""__New_orig__() -> itkAggregateLabelMapFilterLM2_Pointer"""
return _itkAggregateLabelMapFilterPython.itkAggregateLabelMapFilterLM2___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkAggregateLabelMapFilterLM2_Pointer":
"""Clone(itkAggregateLabelMapFilterLM2 self) -> itkAggregateLabelMapFilterLM2_Pointer"""
return _itkAggregateLabelMapFilterPython.itkAggregateLabelMapFilterLM2_Clone(self)
__swig_destroy__ = _itkAggregateLabelMapFilterPython.delete_itkAggregateLabelMapFilterLM2
def cast(obj: 'itkLightObject') -> "itkAggregateLabelMapFilterLM2 *":
"""cast(itkLightObject obj) -> itkAggregateLabelMapFilterLM2"""
return _itkAggregateLabelMapFilterPython.itkAggregateLabelMapFilterLM2_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkAggregateLabelMapFilterLM2
Create a new object of the class itkAggregateLabelMapFilterLM2 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkAggregateLabelMapFilterLM2.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkAggregateLabelMapFilterLM2.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkAggregateLabelMapFilterLM2.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
itkAggregateLabelMapFilterLM2.Clone = new_instancemethod(_itkAggregateLabelMapFilterPython.itkAggregateLabelMapFilterLM2_Clone, None, itkAggregateLabelMapFilterLM2)
itkAggregateLabelMapFilterLM2_swigregister = _itkAggregateLabelMapFilterPython.itkAggregateLabelMapFilterLM2_swigregister
itkAggregateLabelMapFilterLM2_swigregister(itkAggregateLabelMapFilterLM2)
def itkAggregateLabelMapFilterLM2___New_orig__() -> "itkAggregateLabelMapFilterLM2_Pointer":
"""itkAggregateLabelMapFilterLM2___New_orig__() -> itkAggregateLabelMapFilterLM2_Pointer"""
return _itkAggregateLabelMapFilterPython.itkAggregateLabelMapFilterLM2___New_orig__()
def itkAggregateLabelMapFilterLM2_cast(obj: 'itkLightObject') -> "itkAggregateLabelMapFilterLM2 *":
"""itkAggregateLabelMapFilterLM2_cast(itkLightObject obj) -> itkAggregateLabelMapFilterLM2"""
return _itkAggregateLabelMapFilterPython.itkAggregateLabelMapFilterLM2_cast(obj)
class itkAggregateLabelMapFilterLM3(itkInPlaceLabelMapFilterPython.itkInPlaceLabelMapFilterLM3):
"""Proxy of C++ itkAggregateLabelMapFilterLM3 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkAggregateLabelMapFilterLM3_Pointer":
"""__New_orig__() -> itkAggregateLabelMapFilterLM3_Pointer"""
return _itkAggregateLabelMapFilterPython.itkAggregateLabelMapFilterLM3___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkAggregateLabelMapFilterLM3_Pointer":
"""Clone(itkAggregateLabelMapFilterLM3 self) -> itkAggregateLabelMapFilterLM3_Pointer"""
return _itkAggregateLabelMapFilterPython.itkAggregateLabelMapFilterLM3_Clone(self)
__swig_destroy__ = _itkAggregateLabelMapFilterPython.delete_itkAggregateLabelMapFilterLM3
def cast(obj: 'itkLightObject') -> "itkAggregateLabelMapFilterLM3 *":
"""cast(itkLightObject obj) -> itkAggregateLabelMapFilterLM3"""
return _itkAggregateLabelMapFilterPython.itkAggregateLabelMapFilterLM3_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkAggregateLabelMapFilterLM3
Create a new object of the class itkAggregateLabelMapFilterLM3 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkAggregateLabelMapFilterLM3.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkAggregateLabelMapFilterLM3.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkAggregateLabelMapFilterLM3.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
itkAggregateLabelMapFilterLM3.Clone = new_instancemethod(_itkAggregateLabelMapFilterPython.itkAggregateLabelMapFilterLM3_Clone, None, itkAggregateLabelMapFilterLM3)
itkAggregateLabelMapFilterLM3_swigregister = _itkAggregateLabelMapFilterPython.itkAggregateLabelMapFilterLM3_swigregister
itkAggregateLabelMapFilterLM3_swigregister(itkAggregateLabelMapFilterLM3)
def itkAggregateLabelMapFilterLM3___New_orig__() -> "itkAggregateLabelMapFilterLM3_Pointer":
"""itkAggregateLabelMapFilterLM3___New_orig__() -> itkAggregateLabelMapFilterLM3_Pointer"""
return _itkAggregateLabelMapFilterPython.itkAggregateLabelMapFilterLM3___New_orig__()
def itkAggregateLabelMapFilterLM3_cast(obj: 'itkLightObject') -> "itkAggregateLabelMapFilterLM3 *":
"""itkAggregateLabelMapFilterLM3_cast(itkLightObject obj) -> itkAggregateLabelMapFilterLM3"""
return _itkAggregateLabelMapFilterPython.itkAggregateLabelMapFilterLM3_cast(obj)
def aggregate_label_map_filter(*args, **kwargs):
"""Procedural interface for AggregateLabelMapFilter"""
import itk
instance = itk.AggregateLabelMapFilter.New(*args, **kwargs)
return instance.__internal_call__()
def aggregate_label_map_filter_init_docstring():
import itk
import itkTemplate
if isinstance(itk.AggregateLabelMapFilter, itkTemplate.itkTemplate):
aggregate_label_map_filter.__doc__ = itk.AggregateLabelMapFilter.values()[0].__doc__
else:
aggregate_label_map_filter.__doc__ = itk.AggregateLabelMapFilter.__doc__
| [
"44883043+ssalmaan@users.noreply.github.com"
] | 44883043+ssalmaan@users.noreply.github.com |
d7c0d7693181b79f9f44abbeaedd2d8e7988f5ff | caa14cf78fe15affc96acc3de6f4fb1b54bcdf70 | /sap/sap/saplib/tests/test_saputils.py | 6eec437d874842f6cbed599b9adb923e141e3f69 | [] | no_license | jesstherobot/Sycamore_FPGA | 2e3f0dea21482de87ea444506ae2af3f58b5a344 | d1096e15f07b17a8dcb2276e312c5ba3e0006632 | refs/heads/master | 2021-01-18T07:57:14.268157 | 2011-10-19T22:46:28 | 2011-10-19T22:46:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,689 | py | import unittest
import sys
import os
class Test (unittest.TestCase):
"""Unit test for saputils"""
def setUp(self):
os.environ["SAPLIB_BASE"] = sys.path[0] + "/saplib"
#print "SAPLIB_BASE: " + os.getenv("SAPLIB_BASE")
def test_create_dir(self):
"""create a directory"""
import saputils
result = saputils.create_dir("~/sandbox/projects")
self.assertEqual(result, True)
def test_remove_comments(self):
"""try and remove all comments from a buffer"""
import saputils
bufin = "not comment /*comment\n\n*/\n\n//comment\n\n/*\nabc\n*/soemthing//comment"
#print "input buffer:\n" + bufin
output_buffer = saputils.remove_comments(bufin)
#print "output buffer:\n" + bufout
self.assertEqual(len(output_buffer) > 0, True)
def test_find_rtl_file_location(self):
"""give a filename that should be in the RTL"""
import saputils
result = saputils.find_rtl_file_location("simple_gpio.v")
#print "file location: " + result
try:
testfile = open(result)
result = True
testfile.close()
except:
result = False
self.assertEqual(result, True)
def test_resolve_linux_path(self):
"""given a filename with or without the ~ return a filename with the ~ expanded"""
import saputils
filename1 = "/filename1"
filename = saputils.resolve_linux_path(filename1)
#print "first test: " + filename
#if (filename == filename1):
# print "test1: they are equal!"
self.assertEqual(filename == "/filename1", True)
filename2 = "~/filename2"
filename = saputils.resolve_linux_path(filename2)
correct_result = os.path.expanduser("~") + "/filename2"
#print "second test: " + filename + " should equal to: " + correct_result
#if (correct_result == filename):
# print "test2: they are equal!"
self.assertEqual(correct_result == filename, True)
filename = filename.strip()
def test_read_slave_tags(self):
"""try and extrapolate all info from the slave file"""
import saputils
base_dir = os.getenv("SAPLIB_BASE")
filename = base_dir + "/hdl/rtl/wishbone/slave/simple_gpio/simple_gpio.v"
drt_keywords = [
"DRT_ID",
"DRT_FLAGS",
"DRT_SIZE"
]
tags = saputils.get_module_tags(filename, keywords = drt_keywords, debug = False)
io_types = [
"input",
"output",
"inout"
]
#
#for io in io_types:
# for port in tags["ports"][io].keys():
# print "Ports: " + port
self.assertEqual(True, True)
def test_read_slave_tags_with_params(self):
"""some verilog files have a paramter list"""
import saputils
base_dir = os.getenv("SAPLIB_BASE")
filename = base_dir + "/hdl/rtl/wishbone/slave/ddr/wb_ddr.v"
drt_keywords = [
"DRT_ID",
"DRT_FLAGS",
"DRT_SIZE"
]
tags = saputils.get_module_tags(filename, keywords = drt_keywords, debug = True)
io_types = [
"input",
"output",
"inout"
]
#
#for io in io_types:
# for port in tags["ports"][io].keys():
# print "Ports: " + port
print "\n\n\n\n\n\n"
print "module name: " + tags["module"]
print "\n\n\n\n\n\n"
self.assertEqual(True, True)
def test_read_hard_slave_tags(self):
"""try and extrapolate all info from the slave file"""
import saputils
base_dir = os.getenv("SAPLIB_BASE")
filename = base_dir + "/hdl/rtl/wishbone/slave/ddr/wb_ddr.v"
drt_keywords = [
"DRT_ID",
"DRT_FLAGS",
"DRT_SIZE"
]
tags = saputils.get_module_tags(filename, keywords = drt_keywords, debug = True)
io_types = [
"input",
"output",
"inout"
]
#
#for io in io_types:
# for port in tags["ports"][io].keys():
# print "Ports: " + port
self.assertEqual(True, True)
if __name__ == "__main__":
sys.path.append (sys.path[0] + "/../")
import saputils
unittest.main()
| [
"cospan@gmail.com"
] | cospan@gmail.com |
b6ab8185b53278fef56ffa3e54f21c192f66c2c4 | 9071ca80e487ab7084059ea92a8a0ded2b70a2cd | /api/google-cloud-sdk/lib/googlecloudsdk/third_party/apis/cloudidentity/v1alpha1/cloudidentity_v1alpha1_messages.py | a815beaf99d8e57d32b09f5d804022b8f4b070a9 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | anishnuni/Hearo-WebApp | d1065be46e5736bb565f738f789afa099601231c | ead2faa234e876e717bedd727537e9666acb8b79 | refs/heads/master | 2022-07-09T12:59:57.823491 | 2019-11-22T04:18:05 | 2019-11-22T04:18:05 | 218,857,650 | 0 | 1 | null | 2022-06-21T23:20:04 | 2019-10-31T20:38:24 | Python | UTF-8 | Python | false | false | 29,832 | py | """Generated message classes for cloudidentity version v1alpha1.
API for provisioning and managing identity resources.
"""
# NOTE: This file is autogenerated and should not be edited by hand.
from apitools.base.protorpclite import messages as _messages
from apitools.base.py import encoding
from apitools.base.py import extra_types
package = 'cloudidentity'
class CloudidentityGroupsCreateRequest(_messages.Message):
r"""A CloudidentityGroupsCreateRequest object.
Enums:
InitialGroupConfigValueValuesEnum: Initial configuration for creating the
Group.
Fields:
group: A Group resource to be passed as the request body.
initialGroupConfig: Initial configuration for creating the Group.
"""
class InitialGroupConfigValueValuesEnum(_messages.Enum):
r"""Initial configuration for creating the Group.
Values:
INITIAL_GROUP_CONFIG_UNSPECIFIED: <no description>
WITH_INITIAL_OWNER: <no description>
EMPTY: <no description>
"""
INITIAL_GROUP_CONFIG_UNSPECIFIED = 0
WITH_INITIAL_OWNER = 1
EMPTY = 2
group = _messages.MessageField('Group', 1)
initialGroupConfig = _messages.EnumField('InitialGroupConfigValueValuesEnum', 2)
class CloudidentityGroupsDeleteRequest(_messages.Message):
r"""A CloudidentityGroupsDeleteRequest object.
Fields:
name: [Resource name](https://cloud.google.com/apis/design/resource_names)
of the Group in the format: `groups/{group_id}`, where `group_id` is the
unique ID assigned to the Group.
"""
name = _messages.StringField(1, required=True)
class CloudidentityGroupsGetRequest(_messages.Message):
r"""A CloudidentityGroupsGetRequest object.
Fields:
name: [Resource name](https://cloud.google.com/apis/design/resource_names)
of the Group in the format: `groups/{group_id}`, where `group_id` is the
unique ID assigned to the Group.
"""
name = _messages.StringField(1, required=True)
class CloudidentityGroupsListRequest(_messages.Message):
r"""A CloudidentityGroupsListRequest object.
Enums:
ViewValueValuesEnum: Group resource view to be returned. Defaults to
[View.BASIC]().
Fields:
pageSize: The default page size is 200 (max 1000) for the BASIC view, and
50 (max 500) for the FULL view.
pageToken: The next_page_token value returned from a previous list
request, if any.
parent: `Required`. May be made Optional in the future. Customer ID to
list all groups from.
view: Group resource view to be returned. Defaults to [View.BASIC]().
"""
class ViewValueValuesEnum(_messages.Enum):
r"""Group resource view to be returned. Defaults to [View.BASIC]().
Values:
VIEW_UNSPECIFIED: <no description>
BASIC: <no description>
FULL: <no description>
"""
VIEW_UNSPECIFIED = 0
BASIC = 1
FULL = 2
pageSize = _messages.IntegerField(1, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(2)
parent = _messages.StringField(3)
view = _messages.EnumField('ViewValueValuesEnum', 4)
class CloudidentityGroupsLookupRequest(_messages.Message):
r"""A CloudidentityGroupsLookupRequest object.
Fields:
groupKey_id: The ID of the entity within the given namespace. The ID must
be unique within its namespace.
groupKey_namespace: Namespaces provide isolation for IDs, so an ID only
needs to be unique within its namespace. Namespaces are currently only
created as part of IdentitySource creation from Admin Console. A
namespace `"identitysources/{identity_source_id}"` is created
corresponding to every Identity Source `identity_source_id`.
"""
groupKey_id = _messages.StringField(1)
groupKey_namespace = _messages.StringField(2)
class CloudidentityGroupsMembershipsCreateRequest(_messages.Message):
r"""A CloudidentityGroupsMembershipsCreateRequest object.
Fields:
membership: A Membership resource to be passed as the request body.
parent: [Resource
name](https://cloud.google.com/apis/design/resource_names) of the Group
to create Membership within. Format: `groups/{group_id}`, where
`group_id` is the unique ID assigned to the Group.
"""
membership = _messages.MessageField('Membership', 1)
parent = _messages.StringField(2, required=True)
class CloudidentityGroupsMembershipsDeleteRequest(_messages.Message):
r"""A CloudidentityGroupsMembershipsDeleteRequest object.
Fields:
name: [Resource name](https://cloud.google.com/apis/design/resource_names)
of the Membership to be deleted. Format:
`groups/{group_id}/memberships/{member_id}`, where `group_id` is the
unique ID assigned to the Group to which Membership belongs to, and
member_id is the unique ID assigned to the member.
"""
name = _messages.StringField(1, required=True)
class CloudidentityGroupsMembershipsGetRequest(_messages.Message):
r"""A CloudidentityGroupsMembershipsGetRequest object.
Fields:
name: [Resource name](https://cloud.google.com/apis/design/resource_names)
of the Membership to be retrieved. Format:
`groups/{group_id}/memberships/{member_id}`, where `group_id` is the
unique id assigned to the Group to which Membership belongs to, and
`member_id` is the unique ID assigned to the member.
"""
name = _messages.StringField(1, required=True)
class CloudidentityGroupsMembershipsListRequest(_messages.Message):
r"""A CloudidentityGroupsMembershipsListRequest object.
Enums:
ViewValueValuesEnum: Membership resource view to be returned. Defaults to
View.BASIC.
Fields:
pageSize: The default page size is 200 (max 1000) for the BASIC view, and
50 (max 500) for the FULL view.
pageToken: The next_page_token value returned from a previous list
request, if any.
parent: [Resource
name](https://cloud.google.com/apis/design/resource_names) of the Group
to list Memberships within. Format: `groups/{group_id}`, where
`group_id` is the unique ID assigned to the Group.
view: Membership resource view to be returned. Defaults to View.BASIC.
"""
class ViewValueValuesEnum(_messages.Enum):
r"""Membership resource view to be returned. Defaults to View.BASIC.
Values:
VIEW_UNSPECIFIED: <no description>
BASIC: <no description>
FULL: <no description>
"""
VIEW_UNSPECIFIED = 0
BASIC = 1
FULL = 2
pageSize = _messages.IntegerField(1, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(2)
parent = _messages.StringField(3, required=True)
view = _messages.EnumField('ViewValueValuesEnum', 4)
class CloudidentityGroupsMembershipsLookupRequest(_messages.Message):
r"""A CloudidentityGroupsMembershipsLookupRequest object.
Fields:
memberKey_id: The ID of the entity within the given namespace. The ID must
be unique within its namespace.
memberKey_namespace: Namespaces provide isolation for IDs, so an ID only
needs to be unique within its namespace. Namespaces are currently only
created as part of IdentitySource creation from Admin Console. A
namespace `"identitysources/{identity_source_id}"` is created
corresponding to every Identity Source `identity_source_id`.
parent: [Resource
name](https://cloud.google.com/apis/design/resource_names) of the Group
to lookup Membership within. Format: `groups/{group_id}`, where
`group_id` is the unique ID assigned to the Group.
"""
memberKey_id = _messages.StringField(1)
memberKey_namespace = _messages.StringField(2)
parent = _messages.StringField(3, required=True)
class CloudidentityGroupsMembershipsPatchRequest(_messages.Message):
r"""A CloudidentityGroupsMembershipsPatchRequest object.
Fields:
membership: A Membership resource to be passed as the request body.
name: [Resource name](https://cloud.google.com/apis/design/resource_names)
of the Membership in the format:
`groups/{group_id}/memberships/{member_id}`, where group_id is the
unique ID assigned to the Group to which Membership belongs to, and
member_id is the unique ID assigned to the member Must be left blank
while creating a Membership.
updateMask: A string attribute.
"""
membership = _messages.MessageField('Membership', 1)
name = _messages.StringField(2, required=True)
updateMask = _messages.StringField(3)
class CloudidentityGroupsPatchRequest(_messages.Message):
r"""A CloudidentityGroupsPatchRequest object.
Fields:
group: A Group resource to be passed as the request body.
name: [Resource name](https://cloud.google.com/apis/design/resource_names)
of the Group in the format: `groups/{group_id}`, where group_id is the
unique ID assigned to the Group. Must be left blank while creating a
Group.
updateMask: Editable fields: `display_name`, `description`
"""
group = _messages.MessageField('Group', 1)
name = _messages.StringField(2, required=True)
updateMask = _messages.StringField(3)
class CloudidentityGroupsSearchRequest(_messages.Message):
r"""A CloudidentityGroupsSearchRequest object.
Enums:
ViewValueValuesEnum: Group resource view to be returned. Defaults to
[View.BASIC]().
Fields:
pageSize: The default page size is 200 (max 1000) for the BASIC view, and
50 (max 500) for the FULL view.
pageToken: The next_page_token value returned from a previous search
request, if any.
query: `Required`. Query string for performing search on groups. Users can
search on parent and label attributes of groups. EXACT match ('==') is
supported on parent, and CONTAINS match ('in') is supported on labels.
view: Group resource view to be returned. Defaults to [View.BASIC]().
"""
class ViewValueValuesEnum(_messages.Enum):
r"""Group resource view to be returned. Defaults to [View.BASIC]().
Values:
VIEW_UNSPECIFIED: <no description>
BASIC: <no description>
FULL: <no description>
"""
VIEW_UNSPECIFIED = 0
BASIC = 1
FULL = 2
pageSize = _messages.IntegerField(1, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(2)
query = _messages.StringField(3)
view = _messages.EnumField('ViewValueValuesEnum', 4)
class DynamicGroupMetadata(_messages.Message):
r"""Dynamic group metadata like queries and status.
Fields:
queries: Only one entry is supported for now. Memberships will be the
union of all queries.
status: Status of the dynamic group. Output only.
"""
queries = _messages.MessageField('DynamicGroupQuery', 1, repeated=True)
status = _messages.MessageField('DynamicGroupStatus', 2)
class DynamicGroupQuery(_messages.Message):
r"""Defines a query on a resource.
Enums:
ResourceTypeValueValuesEnum:
Fields:
query: Query that determines the memberships of the dynamic group.
resourceType: A ResourceTypeValueValuesEnum attribute.
"""
class ResourceTypeValueValuesEnum(_messages.Enum):
r"""ResourceTypeValueValuesEnum enum type.
Values:
RESOURCE_TYPE_UNSPECIFIED: <no description>
USER: <no description>
"""
RESOURCE_TYPE_UNSPECIFIED = 0
USER = 1
query = _messages.StringField(1)
resourceType = _messages.EnumField('ResourceTypeValueValuesEnum', 2)
class DynamicGroupStatus(_messages.Message):
r"""The current status of a dynamic group along with timestamp.
Enums:
StatusValueValuesEnum: Status of the dynamic group.
Fields:
status: Status of the dynamic group.
statusTime: The latest time at which the dynamic group is guaranteed to be
in the given status. For example, if status is: UP_TO_DATE - The latest
time at which this dynamic group was confirmed to be up to date.
UPDATING_MEMBERSHIPS - The time at which dynamic group was created.
"""
class StatusValueValuesEnum(_messages.Enum):
r"""Status of the dynamic group.
Values:
STATUS_UNSPECIFIED: Default.
UP_TO_DATE: The dynamic group is up-to-date.
UPDATING_MEMBERSHIPS: The dynamic group has just been created and
memberships are being updated.
"""
STATUS_UNSPECIFIED = 0
UP_TO_DATE = 1
UPDATING_MEMBERSHIPS = 2
status = _messages.EnumField('StatusValueValuesEnum', 1)
statusTime = _messages.StringField(2)
class EntityKey(_messages.Message):
r"""An EntityKey uniquely identifies an Entity. Namespaces are used to
provide isolation for IDs. A single ID can be reused across namespaces but
the combination of a namespace and an ID must be unique.
Fields:
id: The ID of the entity within the given namespace. The ID must be unique
within its namespace.
namespace: Namespaces provide isolation for IDs, so an ID only needs to be
unique within its namespace. Namespaces are currently only created as
part of IdentitySource creation from Admin Console. A namespace
`"identitysources/{identity_source_id}"` is created corresponding to
every Identity Source `identity_source_id`.
"""
id = _messages.StringField(1)
namespace = _messages.StringField(2)
class Group(_messages.Message):
r"""Resource representing a Group.
Messages:
LabelsValue: `Required` while Group creation. Labels for Group resource.
Use values ('system/groups/external','') and
('system/groups/discussion_forum', '') for creating an external or
discussion forum Group respectively.
Fields:
createTime: The time when the Group was created. Output only.
description: An extended description to help users determine the purpose
of a Group. For example, you can include information about who should
join the Group, the types of messages to send to the Group, links to
FAQs about the Group, or related Groups. Maximum length is 4,096
characters.
displayName: The Group's display name.
dynamicGroupMetadata: Dynamic group metadata like queries and status.
groupKey: EntityKey of the Group. Must be set when creating a Group,
read-only afterwards.
labels: `Required` while Group creation. Labels for Group resource. Use
values ('system/groups/external','') and
('system/groups/discussion_forum', '') for creating an external or
discussion forum Group respectively.
name: [Resource name](https://cloud.google.com/apis/design/resource_names)
of the Group in the format: `groups/{group_id}`, where group_id is the
unique ID assigned to the Group. Must be left blank while creating a
Group.
parent: The entity under which this Group resides in Cloud Identity
resource hierarchy. Must be set when creating a Group, read-only
afterwards. Currently allowed types: `identitysources` and `customers`.
updateTime: The time when the Group was last updated. Output only.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class LabelsValue(_messages.Message):
r"""`Required` while Group creation. Labels for Group resource. Use values
('system/groups/external','') and ('system/groups/discussion_forum', '')
for creating an external or discussion forum Group respectively.
Messages:
AdditionalProperty: An additional property for a LabelsValue object.
Fields:
additionalProperties: Additional properties of type LabelsValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a LabelsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
createTime = _messages.StringField(1)
description = _messages.StringField(2)
displayName = _messages.StringField(3)
dynamicGroupMetadata = _messages.MessageField('DynamicGroupMetadata', 4)
groupKey = _messages.MessageField('EntityKey', 5)
labels = _messages.MessageField('LabelsValue', 6)
name = _messages.StringField(7)
parent = _messages.StringField(8)
updateTime = _messages.StringField(9)
class ListGroupsResponse(_messages.Message):
r"""Response message for ListGroups operation.
Fields:
groups: Groups returned in response to list request. The results are not
sorted.
nextPageToken: Token to retrieve the next page of results, or empty if
there are no more results available for listing.
"""
groups = _messages.MessageField('Group', 1, repeated=True)
nextPageToken = _messages.StringField(2)
class ListMembershipsResponse(_messages.Message):
r"""A ListMembershipsResponse object.
Fields:
memberships: List of Memberships.
nextPageToken: Token to retrieve the next page of results, or empty if
there are no more results available for listing.
"""
memberships = _messages.MessageField('Membership', 1, repeated=True)
nextPageToken = _messages.StringField(2)
class LookupGroupNameResponse(_messages.Message):
r"""A LookupGroupNameResponse object.
Fields:
name: [Resource name](https://cloud.google.com/apis/design/resource_names)
of the Group in the format: `groups/{group_id}`, where `group_id` is the
unique ID assigned to the Group.
"""
name = _messages.StringField(1)
class LookupMembershipNameResponse(_messages.Message):
r"""A LookupMembershipNameResponse object.
Fields:
name: [Resource name](https://cloud.google.com/apis/design/resource_names)
of the Membership being looked up. Format:
`groups/{group_id}/memberships/{member_id}`, where `group_id` is the
unique ID assigned to the Group to which Membership belongs to, and
`member_id` is the unique ID assigned to the member.
"""
name = _messages.StringField(1)
class Membership(_messages.Message):
r"""Resource representing a Membership within a Group
Fields:
createTime: Creation timestamp of the Membership. Output only.
expiryDetail: Expiry details of the Membership. It can be set only during
the Membership creation/update time.
name: [Resource name](https://cloud.google.com/apis/design/resource_names)
of the Membership in the format:
`groups/{group_id}/memberships/{member_id}`, where group_id is the
unique ID assigned to the Group to which Membership belongs to, and
member_id is the unique ID assigned to the member Must be left blank
while creating a Membership.
preferredMemberKey: EntityKey of the entity to be added as the member.
Must be set while creating a Membership, read-only afterwards.
Currently allowed entity types: `Users`, `Groups`.
roles: Roles for a member within the Group. Currently supported
MembershipRoles: `"MEMBER"`.
updateTime: Last updated timestamp of the Membership. Output only.
"""
createTime = _messages.StringField(1)
expiryDetail = _messages.MessageField('MembershipExpiryDetail', 2)
name = _messages.StringField(3)
preferredMemberKey = _messages.MessageField('EntityKey', 4)
roles = _messages.MessageField('MembershipRole', 5, repeated=True)
updateTime = _messages.StringField(6)
class MembershipExpiryDetail(_messages.Message):
r"""Specifies Membership expiry attributes.
Fields:
expireTime: Expiration time for the Membership.
"""
expireTime = _messages.StringField(1)
class MembershipRole(_messages.Message):
r"""Resource representing a role within a Membership.
Fields:
expiryDetail: Expiry details of the MembershipRole. Currently supported
MembershipRoles: `"MEMBER"`.
name: MembershipRole in string format. Currently supported
MembershipRoles: `"MEMBER", "OWNER", "MANAGER"`.
"""
expiryDetail = _messages.MessageField('MembershipRoleExpiryDetail', 1)
name = _messages.StringField(2)
class MembershipRoleExpiryDetail(_messages.Message):
r"""Specifies Membership expiry attributes.
Fields:
expireTime: Expiration time for the Membership.
"""
expireTime = _messages.StringField(1)
class Operation(_messages.Message):
r"""This resource represents a long-running operation that is the result of
a network API call.
Messages:
MetadataValue: Service-specific metadata associated with the operation.
It typically contains progress information and common metadata such as
create time. Some services might not provide such metadata. Any method
that returns a long-running operation should document the metadata type,
if any.
ResponseValue: The normal response of the operation in case of success.
If the original method returns no data on success, such as `Delete`, the
response is `google.protobuf.Empty`. If the original method is standard
`Get`/`Create`/`Update`, the response should be the resource. For other
methods, the response should have the type `XxxResponse`, where `Xxx` is
the original method name. For example, if the original method name is
`TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
Fields:
done: If the value is `false`, it means the operation is still in
progress. If `true`, the operation is completed, and either `error` or
`response` is available.
error: The error result of the operation in case of failure or
cancellation.
metadata: Service-specific metadata associated with the operation. It
typically contains progress information and common metadata such as
create time. Some services might not provide such metadata. Any method
that returns a long-running operation should document the metadata type,
if any.
name: The server-assigned name, which is only unique within the same
service that originally returns it. If you use the default HTTP mapping,
the `name` should be a resource name ending with
`operations/{unique_id}`.
response: The normal response of the operation in case of success. If the
original method returns no data on success, such as `Delete`, the
response is `google.protobuf.Empty`. If the original method is standard
`Get`/`Create`/`Update`, the response should be the resource. For other
methods, the response should have the type `XxxResponse`, where `Xxx` is
the original method name. For example, if the original method name is
`TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class MetadataValue(_messages.Message):
r"""Service-specific metadata associated with the operation. It typically
contains progress information and common metadata such as create time.
Some services might not provide such metadata. Any method that returns a
long-running operation should document the metadata type, if any.
Messages:
AdditionalProperty: An additional property for a MetadataValue object.
Fields:
additionalProperties: Properties of the object. Contains field @type
with type URL.
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a MetadataValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
@encoding.MapUnrecognizedFields('additionalProperties')
class ResponseValue(_messages.Message):
r"""The normal response of the operation in case of success. If the
original method returns no data on success, such as `Delete`, the response
is `google.protobuf.Empty`. If the original method is standard
`Get`/`Create`/`Update`, the response should be the resource. For other
methods, the response should have the type `XxxResponse`, where `Xxx` is
the original method name. For example, if the original method name is
`TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
Messages:
AdditionalProperty: An additional property for a ResponseValue object.
Fields:
additionalProperties: Properties of the object. Contains field @type
with type URL.
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a ResponseValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
done = _messages.BooleanField(1)
error = _messages.MessageField('Status', 2)
metadata = _messages.MessageField('MetadataValue', 3)
name = _messages.StringField(4)
response = _messages.MessageField('ResponseValue', 5)
class SearchGroupsResponse(_messages.Message):
r"""A SearchGroupsResponse object.
Fields:
groups: List of Groups satisfying the search query.
nextPageToken: Token to retrieve the next page of results, or empty if
there are no more results available for specified query.
"""
groups = _messages.MessageField('Group', 1, repeated=True)
nextPageToken = _messages.StringField(2)
class StandardQueryParameters(_messages.Message):
r"""Query parameters accepted by all methods.
Enums:
FXgafvValueValuesEnum: V1 error format.
AltValueValuesEnum: Data format for response.
Fields:
f__xgafv: V1 error format.
access_token: OAuth access token.
alt: Data format for response.
callback: JSONP
fields: Selector specifying which fields to include in a partial response.
key: API key. Your API key identifies your project and provides you with
API access, quota, and reports. Required unless you provide an OAuth 2.0
token.
oauth_token: OAuth 2.0 token for the current user.
prettyPrint: Returns response with indentations and line breaks.
quotaUser: Available to use for quota purposes for server-side
applications. Can be any arbitrary string assigned to a user, but should
not exceed 40 characters.
trace: A tracing token of the form "token:<tokenid>" to include in api
requests.
uploadType: Legacy upload protocol for media (e.g. "media", "multipart").
upload_protocol: Upload protocol for media (e.g. "raw", "multipart").
"""
class AltValueValuesEnum(_messages.Enum):
r"""Data format for response.
Values:
json: Responses with Content-Type of application/json
media: Media download with context-dependent Content-Type
proto: Responses with Content-Type of application/x-protobuf
"""
json = 0
media = 1
proto = 2
class FXgafvValueValuesEnum(_messages.Enum):
r"""V1 error format.
Values:
_1: v1 error format
_2: v2 error format
"""
_1 = 0
_2 = 1
f__xgafv = _messages.EnumField('FXgafvValueValuesEnum', 1)
access_token = _messages.StringField(2)
alt = _messages.EnumField('AltValueValuesEnum', 3, default=u'json')
callback = _messages.StringField(4)
fields = _messages.StringField(5)
key = _messages.StringField(6)
oauth_token = _messages.StringField(7)
prettyPrint = _messages.BooleanField(8, default=True)
quotaUser = _messages.StringField(9)
trace = _messages.StringField(10)
uploadType = _messages.StringField(11)
upload_protocol = _messages.StringField(12)
class Status(_messages.Message):
r"""The `Status` type defines a logical error model that is suitable for
different programming environments, including REST APIs and RPC APIs. It is
used by [gRPC](https://github.com/grpc). Each `Status` message contains
three pieces of data: error code, error message, and error details. You can
find out more about this error model and how to work with it in the [API
Design Guide](https://cloud.google.com/apis/design/errors).
Messages:
DetailsValueListEntry: A DetailsValueListEntry object.
Fields:
code: The status code, which should be an enum value of google.rpc.Code.
details: A list of messages that carry the error details. There is a
common set of message types for APIs to use.
message: A developer-facing error message, which should be in English. Any
user-facing error message should be localized and sent in the
google.rpc.Status.details field, or localized by the client.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class DetailsValueListEntry(_messages.Message):
r"""A DetailsValueListEntry object.
Messages:
AdditionalProperty: An additional property for a DetailsValueListEntry
object.
Fields:
additionalProperties: Properties of the object. Contains field @type
with type URL.
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a DetailsValueListEntry object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
code = _messages.IntegerField(1, variant=_messages.Variant.INT32)
details = _messages.MessageField('DetailsValueListEntry', 2, repeated=True)
message = _messages.StringField(3)
encoding.AddCustomJsonFieldMapping(
StandardQueryParameters, 'f__xgafv', '$.xgafv')
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_1', '1')
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_2', '2')
| [
"billhuang0916@gmail.com"
] | billhuang0916@gmail.com |
b02ad13a170a0375b85c0f5765f94655c17bb902 | c144f32b20e63da289b40b8fa0488254822e8115 | /accounts/migrations/0002_userprofile.py | 49d6c25a35c56716c8d4d336983b1ff09497b7f0 | [] | no_license | emna-malek/shop | 9d1a40476d11f90d9a25513c29fe1b62633cfe18 | 5275291e3589fec2380d81d16152aca5623358d5 | refs/heads/master | 2022-06-02T23:09:24.471225 | 2020-05-03T14:35:34 | 2020-05-03T14:35:34 | 260,722,454 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,266 | py | # Generated by Django 3.0.3 on 2020-04-19 22:47
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('accounts', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='userProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(blank=True, height_field='height_field', null=True, upload_to='', width_field='width_field')),
('width_field', models.IntegerField(blank=True, default=0, null=True)),
('height_field', models.IntegerField(blank=True, default=0, null=True)),
('city', models.CharField(default='', max_length=100)),
('company', models.CharField(default='', max_length=100)),
('phone', models.IntegerField(default=0)),
('user', models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"noreply@github.com"
] | noreply@github.com |
f7ecbb6e60d58f81414014d4eb23c770a0e6acd9 | c4a8e44b171bbfcce4773fbd5820be40d991afab | /dispatcher_sample.fcgi | 299c4ad0e1726445c0909f317429f2fd66a4824f | [
"MIT"
] | permissive | sveetch/DjangoSveetchies | a2462c29839d60736077f647b3014396ce700f42 | 0fd4f23d601287dbfb5a93b4f9baa33481466a25 | refs/heads/master | 2021-01-01T20:48:08.824288 | 2013-03-10T12:14:56 | 2013-03-10T12:14:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 638 | fcgi | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
FastCGI dispatcher for development environment
"""
import sys, os
sys.path.insert(0, '/home/django/py_libs') # An optionnal path where is installed some Python libs
sys.path.insert(0, '/home/django/gits/') # Path to the directory which contains 'DjangoSveetchies'
# Specify the temporary directory to use for Python Eggs
os.environ['PYTHON_EGG_CACHE'] = "/tmp"
# Set the DJANGO_SETTINGS_MODULE environment variable.
os.environ['DJANGO_SETTINGS_MODULE'] = "DjangoSveetchies.prod_settings"
from django.core.servers.fastcgi import runfastcgi
runfastcgi(method="threaded", daemonize="false")
| [
"sveetch@gmail.com"
] | sveetch@gmail.com |
6843650bcdbb47de6fea60445c591ac5a3354c45 | 16c2d6ab5f4d8d453b48c50e82cfcf57c86bd9b5 | /CT-GANs/Theano_classifier/CT_CIFAR-10_TE.py | d439631fb881c372fd75b5e4475419e123b147a3 | [
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | jason096/CT-GAN | 221e5c3f475b6a132a3a48995b1482d23e4bc82f | 5ebc7e135307b77856926b5c8e0849aacf75c3c4 | refs/heads/master | 2020-10-02T08:40:43.955040 | 2018-04-28T23:59:19 | 2018-04-28T23:59:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,581 | py | import argparse
import time
import numpy as np
import theano as th
import theano.tensor as T
from theano.sandbox.rng_mrg import MRG_RandomStreams
import lasagne
import lasagne.layers as ll
from lasagne.init import Normal
from lasagne.layers import dnn
import nn
import sys
import plotting
import cifar10_data
from scipy import linalg
# settings
factor_M = 0.0
LAMBDA_2 = 1.0
prediction_decay = 0.6
parser = argparse.ArgumentParser()
parser.add_argument('--seed', default=2)
parser.add_argument('--seed_data', default=2)
parser.add_argument('--count', default=400)
parser.add_argument('--batch_size', default=100)
parser.add_argument('--unlabeled_weight', type=float, default=1.)
parser.add_argument('--learning_rate', type=float, default=0.0003)# learning rate, no decay
parser.add_argument('--data_dir', type=str, default='/home/bigdata/Desktop/CT-GANs') #add your own path
args = parser.parse_args()
print(args)
# fixed random seeds
rng_data = np.random.RandomState(args.seed_data)
rng = np.random.RandomState(args.seed)
theano_rng = MRG_RandomStreams(rng.randint(2 ** 15))
lasagne.random.set_rng(np.random.RandomState(rng.randint(2 ** 15)))
# load CIFAR-10
trainx, trainy = cifar10_data.load(args.data_dir, subset='train')
testx, testy = cifar10_data.load(args.data_dir, subset='test')
#######
#pad
#######
trainx = np.pad(trainx, ((0, 0), (0, 0), (2, 2), (2, 2)), 'reflect')
trainx_unl_org = trainx.copy()
trainx_unl2_org = trainx.copy()
nr_batches_train = int(trainx.shape[0]/args.batch_size)
nr_batches_test = int(testx.shape[0]/args.batch_size)
# specify generative model input with 50 dim
noise_dim = (args.batch_size, 50)
noise = theano_rng.uniform(size=noise_dim)
gen_layers = [ll.InputLayer(shape=noise_dim, input_var=noise)]
gen_layers.append(nn.batch_norm(ll.DenseLayer(gen_layers[-1], num_units=4*4*512, W=Normal(0.05), nonlinearity=nn.relu), g=None))
gen_layers.append(ll.ReshapeLayer(gen_layers[-1], (args.batch_size,512,4,4)))
gen_layers.append(nn.batch_norm(nn.Deconv2DLayer(gen_layers[-1], (args.batch_size,256,8,8), (5,5), W=Normal(0.05), nonlinearity=nn.relu), g=None)) # 4 -> 8
gen_layers.append(nn.batch_norm(nn.Deconv2DLayer(gen_layers[-1], (args.batch_size,128,16,16), (5,5), W=Normal(0.05), nonlinearity=nn.relu), g=None)) # 8 -> 16
gen_layers.append(nn.weight_norm(nn.Deconv2DLayer(gen_layers[-1], (args.batch_size,3,32,32), (5,5), W=Normal(0.05), nonlinearity=T.tanh), train_g=True, init_stdv=0.1)) # 16 -> 32
gen_dat = ll.get_output(gen_layers[-1])
## same as the original net the size in tempens 128 - 256
disc_layers = [ll.InputLayer(shape=(None, 3, 32, 32))]
disc_layers.append(ll.DropoutLayer(disc_layers[-1], p=0.2))
disc_layers.append(nn.weight_norm(dnn.Conv2DDNNLayer(disc_layers[-1], 128, (3,3), pad=1, W=Normal(0.05), nonlinearity=nn.lrelu)))
disc_layers.append(nn.weight_norm(dnn.Conv2DDNNLayer(disc_layers[-1], 128, (3,3), pad=1, W=Normal(0.05), nonlinearity=nn.lrelu)))
disc_layers.append(nn.weight_norm(dnn.Conv2DDNNLayer(disc_layers[-1], 128, (3,3), pad=1, stride=2, W=Normal(0.05), nonlinearity=nn.lrelu)))
disc_layers.append(ll.DropoutLayer(disc_layers[-1], p=0.5))
disc_layers.append(nn.weight_norm(dnn.Conv2DDNNLayer(disc_layers[-1], 256, (3,3), pad=1, W=Normal(0.05), nonlinearity=nn.lrelu)))
disc_layers.append(nn.weight_norm(dnn.Conv2DDNNLayer(disc_layers[-1], 256, (3,3), pad=1, W=Normal(0.05), nonlinearity=nn.lrelu)))
disc_layers.append(nn.weight_norm(dnn.Conv2DDNNLayer(disc_layers[-1], 256, (3,3), pad=1, stride=2, W=Normal(0.05), nonlinearity=nn.lrelu)))
disc_layers.append(ll.DropoutLayer(disc_layers[-1], p=0.5))
disc_layers.append(nn.weight_norm(dnn.Conv2DDNNLayer(disc_layers[-1], 512, (3,3), pad=0, W=Normal(0.05), nonlinearity=nn.lrelu)))
disc_layers.append(nn.weight_norm(ll.NINLayer(disc_layers[-1], num_units=256, W=Normal(0.05), nonlinearity=nn.lrelu)))
disc_layers.append(nn.weight_norm(ll.NINLayer(disc_layers[-1], num_units=128, W=Normal(0.05), nonlinearity=nn.lrelu)))
disc_layers.append(ll.GlobalPoolLayer(disc_layers[-1]))
disc_layers.append(nn.weight_norm(ll.DenseLayer(disc_layers[-1], num_units=10, W=Normal(0.05), nonlinearity=None), train_g=True, init_stdv=0.1))
disc_params = ll.get_all_params(disc_layers, trainable=True)
# costs
labels = T.ivector()
x_lab = T.tensor4()
x_unl = T.tensor4()
training_targets =T.matrix('targets')
training_targets2 = T.matrix('targets2')
temp = ll.get_output(gen_layers[-1], deterministic=False, init=True)
temp = ll.get_output(disc_layers[-1], x_lab, deterministic=False, init=True)
init_updates = [u for l in gen_layers+disc_layers for u in getattr(l,'init_updates',[])]
output_before_softmax_lab = ll.get_output(disc_layers[-1], x_lab, deterministic=False) # no softmax labeled dis output
output_before_softmax_unl,output_before_softmax_unl_ = ll.get_output([disc_layers[-1],disc_layers[-2]], x_unl, deterministic=False) # last two layers' output
output_before_softmax_gen = ll.get_output(disc_layers[-1], gen_dat, deterministic=False) #dis of generator output
l_lab = output_before_softmax_lab[T.arange(args.batch_size),labels]
l_unl = nn.log_sum_exp(output_before_softmax_unl)
l_unl_ = nn.log_sum_exp(output_before_softmax_unl_)
l_gen = nn.log_sum_exp(output_before_softmax_gen)
loss_lab = -T.mean(l_lab) + T.mean(T.mean(nn.log_sum_exp(output_before_softmax_lab)))
######################
#the consistency term
######################
loss_ct = T.mean(lasagne.objectives.squared_error(T.nnet.softmax(output_before_softmax_unl),T.nnet.softmax(training_targets)),axis = 1) #last layer should be with softmax,not only seperate the real from fake, but also the class of real it belongs to, D
loss_ct_ = T.mean(lasagne.objectives.squared_error(output_before_softmax_unl_,training_targets2),axis = 1) #D_
CT = LAMBDA_2*(loss_ct+loss_ct_*0.1)-factor_M # 1.0:0.1
CT_ = T.mean(T.maximum(CT,0.0*CT),axis = 0)
loss_unl = 0.5*(CT_ -T.mean(l_unl) + T.mean(T.nnet.softplus(l_unl)) -np.log(1) + T.mean(T.nnet.softplus(l_gen)))
zeros = np.zeros(100)
train_err = T.mean(T.neq(T.argmax(output_before_softmax_lab,axis=1),labels))
train_err2 = T.mean(T.le(T.max(output_before_softmax_lab,axis=1),zeros)) #mis-classification
# test error
output_before_softmax = ll.get_output(disc_layers[-1], x_lab, deterministic=True) # no training
test_err = T.mean(T.neq(T.argmax(output_before_softmax,axis=1),labels))
# Theano functions for training the disc net
lr = T.scalar()
disc_params = ll.get_all_params(disc_layers, trainable=True)
disc_param_updates = nn.adam_updates(disc_params, loss_lab + args.unlabeled_weight*loss_unl, lr=lr, mom1=0.5)
disc_param_avg = [th.shared(np.cast[th.config.floatX](0.*p.get_value())) for p in disc_params]
disc_avg_updates = [(a,a+0.0001*(p-a)) for p,a in zip(disc_params,disc_param_avg)]
disc_avg_givens = [(p,a) for p,a in zip(disc_params,disc_param_avg)]
init_param = th.function(inputs=[x_lab], outputs=None, updates=init_updates) # data based initialization
train_batch_disc = th.function(inputs=[x_lab,labels,x_unl,training_targets,training_targets2,lr], outputs=[loss_lab, loss_unl, train_err,train_err2,output_before_softmax_unl,output_before_softmax_unl_], updates=disc_param_updates+disc_avg_updates)
test_batch = th.function(inputs=[x_lab,labels], outputs=test_err, givens=disc_avg_givens)
samplefun = th.function(inputs=[],outputs=gen_dat)
# Theano functions for training the gen net
output_unl = ll.get_output(disc_layers[-2], x_unl, deterministic=False)
output_gen = ll.get_output(disc_layers[-2], gen_dat, deterministic=False)
m1 = T.mean(output_unl,axis=0)
m2 = T.mean(output_gen,axis=0)
loss_gen = T.mean(abs(m1-m2)) # feature matching loss, L1 loss
gen_params = ll.get_all_params(gen_layers, trainable=True)
gen_param_updates = nn.adam_updates(gen_params, loss_gen, lr=lr, mom1=0.5)
train_batch_gen = th.function(inputs=[x_unl,lr], outputs=loss_gen, updates=gen_param_updates)
# select labeled data
inds = rng_data.permutation(trainx.shape[0])
trainx = trainx[inds]
trainy = trainy[inds]
txs = []
tys = []
for j in range(10):
txs.append(trainx[trainy==j][:args.count])
tys.append(trainy[trainy==j][:args.count])
txs = np.concatenate(txs, axis=0)
tys = np.concatenate(tys, axis=0)
# //////////// perform training //////////////
start_epoch = 0
training_targets = np.float32(np.zeros((len(trainx_unl_org), 10))) # for saving the previous results
training_targets2 = np.float32(np.zeros((len(trainx_unl_org), 128)))
ensemble_prediction = np.float32(np.zeros((len(trainx_unl_org), 10)))
ensemble_prediction2 = np.float32(np.zeros((len(trainx_unl_org), 128)))
training_target_var = np.float32(np.zeros((100, 10)))
training_target_var2 = np.float32(np.zeros((100, 128)))
for epoch in range(1000): #no learning rate decay. More epochs may give better result
begin = time.time()
lr = args.learning_rate #no decay of learning rate
trainx = [] #empty
trainy = []
trainx_unl = []
trainx_unl2 = []
for t in range(int(np.ceil(trainx_unl_org.shape[0]/float(txs.shape[0])))):
inds = rng.permutation(txs.shape[0])
trainx.append(txs[inds]) #shuffle
trainy.append(tys[inds]) #shuffle 50000 labeled!
trainx = np.concatenate(trainx, axis=0)
trainy = np.concatenate(trainy, axis=0) # labeled data
indices_all = rng.permutation(trainx_unl_org.shape[0])
trainx_unl = trainx_unl_org[indices_all] # all can be treated as unlabeled examples
trainx_unl2 = trainx_unl2_org[rng.permutation(trainx_unl2_org.shape[0])] # trainx_unl2 not equals to trainx_unl, the indexs are different
training_target_var = training_targets[indices_all]
training_target_var2 = training_targets2[indices_all] #force the labeled and unlabeled to be the same 50000:50000 1:1
##################
##prepair dataset
##################
if epoch==0:
print(trainx.shape)
init_param(trainx[:1000]) # data based initialization
indices_l = trainx.shape[0]
indices_ul = trainx_unl.shape[0]
#inde = np.range()
noisy_a = []
for start_idx in range(0,indices_l): # from 0 to 50000
img_pre = trainx[start_idx]
if np.random.uniform() >0.5:
img_pre = img_pre[:,:,::-1] # reversal
t = 2
crop = 2
ofs0 = np.random.randint(-t, t + 1) + crop
ofs1 = np.random.randint(-t, t + 1) + crop
img_a = img_pre[:, ofs0:ofs0+32, ofs1:ofs1+32]
noisy_a.append(img_a)
noisy_a = np.array(noisy_a)
trainx = noisy_a
noisy_a, noisy_b,noisy_c = [], [], []
for start_idx in range(0,indices_ul): # from 0 to 50000
img_pre_a = trainx_unl[start_idx]
img_pre_b = trainx_unl2[start_idx]
if np.random.uniform() >0.5:
img_pre_a = img_pre_a[:,:,::-1]
if np.random.uniform() >0.5:
img_pre_b = img_pre_b[:,:,::-1]
img_pre_c = img_pre_a
t = 2
crop = 2
ofs0 = np.random.randint(-t, t + 1) + crop ##crop
ofs1 = np.random.randint(-t, t + 1) + crop
img_a = img_pre_a[:, ofs0:ofs0+32, ofs1:ofs1+32]
ofs0 = np.random.randint(-t, t + 1) + crop
ofs1 = np.random.randint(-t, t + 1) + crop
img_b = img_pre_b[:, ofs0:ofs0+32, ofs1:ofs1+32]
ofs0 = np.random.randint(-t, t + 1) + crop
ofs1 = np.random.randint(-t, t + 1) + crop
img_c = img_pre_c[:, ofs0:ofs0+32, ofs1:ofs1+32]
noisy_a.append(img_a)
noisy_b.append(img_b) # maybe used in the future
noisy_c.append(img_c) # maybe used in the future
noisy_a = np.array(noisy_a)
noisy_b = np.array(noisy_b)
noisy_c = np.array(noisy_c)
trainx_unl = noisy_a
trainx_unl2 = noisy_b
trainx_unl3 = noisy_c
epoch_predictions = np.float32(np.zeros((len(trainx_unl_org), 10)))
epoch_predictions2 = np.float32(np.zeros((len(trainx_unl_org), 128)))
training_targets = np.float32(training_targets)
training_targets2 = np.float32(training_targets2)
# train
loss_lab = 0.
loss_unl = 0.
train_err = 0.
train_err2 = 0.
gen_loss = 0.
for t in range(nr_batches_train):
ran_from = t*args.batch_size
ran_to = (t+1)*args.batch_size
ll, lu, te,te2,prediction,prediction2 = train_batch_disc(trainx[ran_from:ran_to],trainy[ran_from:ran_to],
trainx_unl[ran_from:ran_to],training_target_var[ran_from:ran_to],training_target_var2[ran_from:ran_to],lr)
indices = indices_all[ran_from:ran_to]
loss_lab += ll
loss_unl += lu
train_err += te
train_err2 +=te2
e = train_batch_gen(trainx_unl2[t*args.batch_size:(t+1)*args.batch_size],lr) # disc and gen for unlabeled data are different
gen_loss += float(e)
for i, j in enumerate(indices):
epoch_predictions[j] = prediction[i] # Gather epoch predictions.
epoch_predictions2[j] = prediction2[i] # Gather epoch predictions.
# record the results
ensemble_prediction = (prediction_decay * ensemble_prediction) + (1.0 - prediction_decay) * epoch_predictions
training_targets = ensemble_prediction / (1.0 - prediction_decay ** ((epoch - start_epoch) + 1.0))
ensemble_prediction2 = (prediction_decay * ensemble_prediction2) + (1.0 - prediction_decay) * epoch_predictions2
training_targets2 = ensemble_prediction2 / (1.0 - prediction_decay ** ((epoch - start_epoch) + 1.0))
loss_lab /= nr_batches_train
loss_unl /= nr_batches_train
train_err /= nr_batches_train
train_err2 /=nr_batches_train
# test
test_err = 0.
for t in range(nr_batches_test):
test_err += test_batch(testx[t*args.batch_size:(t+1)*args.batch_size],testy[t*args.batch_size:(t+1)*args.batch_size])
test_err /= nr_batches_test
# report
print("Epoch %d, time = %ds, loss_lab = %.4f, loss_unl = %.4f, train err = %.4f, train err2 = %.4f,gen loss = %.4f,test err = %.4f" % (epoch, time.time()-begin, loss_lab, loss_unl, train_err,train_err2,gen_loss,test_err))
sys.stdout.flush()
# generate samples from the model
sample_x = samplefun()
img_bhwc = np.transpose(sample_x[:100,], (0, 2, 3, 1))
img_tile = plotting.img_tile(img_bhwc, aspect_ratio=1.0, border_color=1.0, stretch=True)
img = plotting.plot_img(img_tile, title='CIFAR10 samples')
plotting.plt.savefig("cifar_sample_CT.png")
# save params
np.savez('disc_params.npz', *[p.get_value() for p in disc_params])
np.savez('gen_params.npz', *[p.get_value() for p in gen_params])
| [
"Bing@DESKTOP-U3JQ8RQ.localdomain"
] | Bing@DESKTOP-U3JQ8RQ.localdomain |
032e5734a831e772bdfff32f11217686a1c04444 | 2c851851595c45bb74dc3ee54c6fcb8930cced70 | /soojuspump.py | 122dc37e5bdf2ad6eb970199c31cd9f3c834fca4 | [] | no_license | RaivoKasepuu/UTpythonProject | 790c4c2f8f33c61959e37575ea734f08b1263671 | 67f63ae58421fada8689f9597090990e4bb01aae | refs/heads/master | 2023-01-30T19:49:24.414271 | 2020-12-16T12:10:34 | 2020-12-16T12:10:34 | 321,965,067 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,425 | py | import json
import time
from inputimeout import inputimeout, TimeoutOccurred
# NB! sisesta käsureal pip install inputimeout
# soojuspump on vaikimisi sisselülitatud.
# Raspberry puhul tähendab see et, konkreetse soojuspumba tööd
# juhtiva pin-i tase on "1":
deviceOn = True
try:
fileName = inputimeout('Sisesta andmefaili nimi (vaikimisi maindata.txt): ', timeout=10)
except TimeoutOccurred:
print('valisin vaikimisi andmefailiks maindata.txt')
fileName = 'maindata.txt'
print("Alustame tööd. Kui terminalis on True, on seade sisse lülitatud, kui False, siis välja lülitatud")
# Timeri ploki algus:
while True:
# Mida teha siis, kui faili ei ole:
try:
with open(fileName, 'r') as filehandle:
mainData = json.load(filehandle)
filehandle.close()
except IOError:
print('file not found')
print('Raspberry puhul anname siin ühele väljundile pinge peale, et LED alarmeeriks')
# kontrollime, kas seade peab olema sisse- või väljalülitatud
# antud juhul on soojuspump top3 elektrihinnaga tundidel väljalülitatud
if mainData[0].get('top_3') is False:
deviceOn = False
else:
deviceOn = True
# Kontrolliks prindime terminali, mis on hetke seis:
print(deviceOn)
# kontrollime olukorda uuesti 5 min (300sec) pärast
# programmi testimiseks kasuta näiteks 3 sec: time.sleep(3)
time.sleep(3)
| [
"raivokasepuu@gmail.com"
] | raivokasepuu@gmail.com |
fd89ac55a8ee527c7f1559161412d97ed0ce200e | 1fd00dfd486f9b184eb5d7b3578ab5b182b3742e | /end2you/training/end2you/end2you/tfrecord_generator/generator.py | d94de92149c035b31d093695613a8e390fa7772f | [] | no_license | dntai/MuSe2020 | a607dc33875d6eace092287882515cd7766c016a | 25be7fa868e6db40563161dc9ede200b5806fc48 | refs/heads/master | 2023-05-14T15:07:17.466731 | 2021-02-11T07:54:00 | 2021-02-11T07:54:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,619 | py | import csv
import os
import numpy as np
import tensorflow as tf
import copy
import sys
import re
sys.path.append("..")
from pathlib import Path
from io import BytesIO
from abc import ABCMeta, abstractmethod
from moviepy.editor import VideoFileClip, AudioFileClip
from ..rw.file_reader import FileReader
from functools import partial
class Generator(metaclass=ABCMeta):
def __init__(self,
reader: FileReader,
input_type:str,
upsample:bool = True,
delimiter:str = ';'):
self.input_type = self._get_input_type(input_type.lower())
self.attributes_name, self.attributes_type, self.data = \
reader.read()
label_idx = self.attributes_name.index('label')
file_idx = self.attributes_name.index('file')
label_type = self.attributes_type[label_idx]
kwargs = {}
if label_type == 'str':
read_label_file = FileReader.read_delimiter_file
kwargs['delimiter'] = delimiter
else:
read_label_file = self._read_single_label
kwargs['label_type'] = label_type
self.dict_files = dict()
for row in self.data[:, [file_idx, label_idx]]:
data_file = row[0]
label_file = row[1]
if label_type != 'str':
kwargs['file'] = data_file
names, types, data = read_label_file(label_file, **kwargs)
time_idx = names.index('time')
labels_idx = np.delete(np.arange(0, len(data[0,:])), time_idx)
num_labels = len(data[0, 1:])
self.dict_files[data_file] = {
'time': np.reshape(self._get_label_type(data[:, time_idx], types[0]),
(-1,1)),
'labels': np.reshape(self._get_label_type(data[:, labels_idx], types[1]),
(-1, num_labels))
}
if upsample and num_labels == 1:
self.dict_files = self.upsample(self.dict_files)
def upsample(self, sample_data):
classes = [int(x['labels'][0]) for x in sample_data.values()]
class_ids = set(classes)
num_samples_per_class = {class_name: sum(x == class_name for x in classes) for class_name in class_ids}
max_samples = np.max(list(num_samples_per_class.values()))
augmented_data = copy.copy(sample_data)
for class_name, n_samples in num_samples_per_class.items():
n_samples_to_add = max_samples - n_samples
while n_samples_to_add > 0:
for key, value in sample_data.items():
label = int(value['labels'][0])
sample = key
if n_samples_to_add <= 0:
break
if label == class_name:
augmented_data[sample + '_' + str(n_samples_to_add)] = label
n_samples_to_add -= 1
return augmented_data
def _get_label_type(self, label, _type):
if 'float' in _type:
return list([np.float32(x) for x in label])
return list([np.int32(x) for x in label])
def _read_single_label(self, label, file=None, label_type=None):
clip = VideoFileClip
if 'audio' in self.input_type:
clip = AudioFileClip
end_time = clip(str(file)).duration
time = np.vstack([0.0, end_time])
label = np.reshape(np.repeat(self._get_label_type(label, label_type), 2), (-1, 1))
return ['time', 'labels'], ['float', label_type], np.reshape(np.hstack( [time, label]) , (-1, 2))
def _get_input_type(self, input_type):
correct_types = ['audio','video','audiovisual']
if input_type not in correct_types:
raise ValueError('input_type should be one of {}'.format(correct_types),
'[{}] found'.format(input_type))
return input_type
def _int_feauture(self, value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feauture(self, value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def write_tfrecords(self, tfrecords_folder):
if not os.path.exists(str(tfrecords_folder)):
os.system('mkdir -p {}'.format(tfrecords_folder))
print('\n Start generating tfrecords \n')
for data_file in self.dict_files.keys():
print('Writing file : {}'.format(data_file))
basename = os.path.basename(os.path.splitext(data_file)[0])
if re.search("_[0-9]+$", data_file):
add = os.path.splitext(data_file)[1].split('_')[1]
basename += '_' + add
data_file = re.sub(r'_[0-9]+$', '', data_file)
writer = tf.python_io.TFRecordWriter(
(Path(tfrecords_folder) / '{}.tfrecords'.format(basename)
).as_posix())
self.serialize_sample(writer, data_file, basename)
@abstractmethod
def _get_samples(self, data_file):
pass
@abstractmethod
def serialize_sample(self, writer, data_file, subject_id):
pass
| [
"stappen@ieee.org"
] | stappen@ieee.org |
62da721bc8ad43f04b7a9be6e96b270824ae74eb | 0b6a08b596bdceb97b64b69d3ea98ace038bf329 | /projects/migrations/0014_auto_20191027_0659.py | 8f511d4c7848e82d454ab5a93c1f01b518e88f61 | [] | no_license | sammydowds/projectautomation | 5806a3f1ca2cb9604433f658e90b41198f08322d | caec4774c2ae350f66a8e525729547bcc2cc9c97 | refs/heads/master | 2022-09-30T16:13:42.677455 | 2020-05-26T21:51:07 | 2020-05-26T21:51:07 | 167,870,654 | 0 | 0 | null | 2020-06-05T21:43:48 | 2019-01-27T23:26:50 | HTML | UTF-8 | Python | false | false | 2,110 | py | # Generated by Django 2.2.3 on 2019-10-27 11:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('projects', '0013_auto_20191017_0425'),
]
operations = [
migrations.RenameField(
model_name='project',
old_name='scheduled',
new_name='Assembly_Scheduled',
),
migrations.AddField(
model_name='project',
name='Customer_Runoff_Scheduled',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='project',
name='Documentation_Scheduled',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='project',
name='Electrical_Release_Scheduled',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='project',
name='Finishing_Scheduled',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='project',
name='Install_FinishSchedulede',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='project',
name='Install_Start_Scheduled',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='project',
name='Internal_Runoff_Scheduled',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='project',
name='Manufacturing_Scheduled',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='project',
name='Mechanical_Release_Scheduled',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='project',
name='Ship_Scheduled',
field=models.BooleanField(default=False),
),
]
| [
"sammydowds1993@gmail.com"
] | sammydowds1993@gmail.com |
fd3e5143b59240a71c1e3b8480a1b3ee447bf8b6 | c16c67a4f94490b60de17afd6a82237650d8a08a | /henry/environments.py | 1e5e4b4fba2415c63b84c09aaa90a40dd96895f1 | [] | no_license | kailIII/HenryFACTService | fc4305a992323139c14fd4c235ea8fea061fd6e7 | 328a47020099bd31631211c8662ee149fb853e4a | refs/heads/master | 2021-01-20T04:16:27.028413 | 2016-04-06T20:10:34 | 2016-04-06T20:10:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 951 | py | import datetime
import os
from jinja2 import Environment, FileSystemLoader
from henry.invoice.dao import PaymentFormat
from henry.misc import id_type, fix_id, abs_string, value_from_cents, get_total
def my_finalize(x):
return '' if x is None else x
def fix_path(x):
return os.path.split(x)[1]
def display_date(x):
if isinstance(x, datetime.datetime):
return x.date().isoformat()
return x.isoformat()
def make_jinja_env(template_paths):
jinja_env = Environment(loader=FileSystemLoader(template_paths),
finalize=my_finalize)
jinja_env.globals.update({
'id_type': id_type,
'fix_id': fix_id,
'abs': abs_string,
'value_from_cents': value_from_cents,
'get_total': get_total,
'today': datetime.date.today,
'PaymentFormat': PaymentFormat,
'fix_path': fix_path,
'display_date': display_date,
})
return jinja_env
| [
"Han Qi"
] | Han Qi |
524a64d718c9a87331dcd95f4b5511761a102a97 | 3e397609ebd59d50ed0f9928e6bd039030e35f9a | /contract_api/lambda_handler.py | 4757ef1411c05ce3dff3425d4a41156dd03276bb | [] | no_license | prashantramangupta/marketplace | d8f64462668f1bb15c37fd52c17236d7565e5ae5 | acae91d90ec8626bc79ae46168c37a4d8bbab46a | refs/heads/master | 2020-06-05T15:48:19.063615 | 2019-06-26T05:28:16 | 2019-06-26T05:28:16 | 159,120,771 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,511 | py | import json
import logging
import re
import traceback
from schema import Schema, And
from common.constant import NETWORKS
from common.repository import Repository
from mpe import MPE
from registry import Registry
NETWORKS_NAME = dict((NETWORKS[netId]['name'], netId) for netId in NETWORKS.keys())
db = dict((netId, Repository(net_id=netId)) for netId in NETWORKS.keys())
def request_handler(event, context):
print(event)
if 'path' not in event:
return get_response(400, "Bad Request")
try:
payload_dict = None
resp_dta = None
path = event['path'].lower()
stage = event['requestContext']['stage']
net_id = NETWORKS_NAME[stage]
if event['httpMethod'] == 'POST':
body = event['body']
if body is not None and len(body) > 0:
payload_dict = json.loads(body)
elif event['httpMethod'] == 'GET':
payload_dict = event.get('queryStringParameters')
else:
return get_response(400, "Bad Request")
if path in ["/service", "/feedback"] or path[0:4] == "/org" or path[0:5] == "/user":
obj_reg = Registry(obj_repo=db[net_id])
if "/org" == path:
resp_dta = obj_reg.get_all_org()
elif re.match("(\/service)[/]{0,1}$", path):
if payload_dict is None:
payload_dict = {}
resp_dta = obj_reg.get_all_srvcs(qry_param=payload_dict)
elif re.match("(\/org\/)[^\/]*(\/service\/)[^\/]*(\/group)[/]{0,1}$", path):
params = path.split("/")
org_id = params[2]
service_id = params[4]
resp_dta = obj_reg.get_group_info(org_id=org_id, service_id=service_id)
elif "/channels" == path:
obj_mpe = MPE(net_id=net_id, obj_repo=db[net_id])
resp_dta = obj_mpe.get_channels_by_user_address(payload_dict['user_address'],
payload_dict.get('org_id', None),
payload_dict.get('service_id', None))
elif re.match("(\/user\/)[^\/]*(\/feedback)[/]{0,1}$", path):
params = path.split("/")
user_address = params[2]
resp_dta = get_user_feedback(user_address=user_address, obj_reg=obj_reg)
elif "/feedback" == path:
resp_dta = set_user_feedback(payload_dict['feedback'], obj_reg=obj_reg, net_id=net_id)
else:
return get_response(400, "Invalid URL path.")
if resp_dta is None:
err_msg = {'status': 'failed', 'error': 'Bad Request', 'api': event['path'], 'payload': payload_dict}
response = get_response(500, err_msg)
else:
response = get_response(200, {"status": "success", "data": resp_dta})
except Exception as e:
err_msg = {"status": "failed", "error": repr(e), 'api': event['path'], 'payload': payload_dict}
response = get_response(500, err_msg)
traceback.print_exc()
return response
def check_for_blank(field):
if field is None or len(field) == 0:
return True
return False
def get_user_feedback(user_address, obj_reg):
if check_for_blank(user_address):
return []
return obj_reg.get_usr_feedbk(user_address)
def set_user_feedback(feedbk_info, obj_reg, net_id):
feedbk_recorded = False
schema = Schema([{'user_address': And(str),
'org_id': And(str),
'service_id': And(str),
'up_vote': bool,
'down_vote': bool,
'comment': And(str),
'signature': And(str)
}])
try:
feedback_data = schema.validate([feedbk_info])
feedbk_recorded = obj_reg.set_usr_feedbk(feedback_data[0], net_id=net_id)
except Exception as err:
print("Invalid Input ", err)
return None
if feedbk_recorded:
return []
return None
def get_response(status_code, message):
return {
'statusCode': status_code,
'body': json.dumps(message),
'headers': {
'Content-Type': 'application/json',
"X-Requested-With": '*',
"Access-Control-Allow-Headers": 'Access-Control-Allow-Origin, Content-Type,X-Amz-Date,Authorization,X-Api-Key,x-requested-with',
"Access-Control-Allow-Origin": '*',
"Access-Control-Allow-Methods": 'GET,OPTIONS,POST'
}
}
| [
"you@example.com"
] | you@example.com |
2085077660f03d47cd34ea2f24d5b600d7736f19 | b7661f8e74e6aec15a9eba85f90cf351005295dc | /plot_class.py | 5ed3353e25c4fde9f2dfe2624c30cfb9290648a1 | [
"MIT"
] | permissive | tabsa/P2P_market_MAD | 5f983e2961e6466c8d9756d4e7bab5bbbe9ee940 | ed54015a8dfd345e8b8de24928c408a01e63ff06 | refs/heads/main | 2023-07-13T05:40:00.684866 | 2021-08-28T17:52:29 | 2021-08-28T17:52:29 | 364,288,301 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,684 | py | ## Class for the plots
# This files contains the plots (as class) used in this repository.
# They are used in the P2P_market_sim.file or in the Jupyter notebooks
#%% Import packages
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import plotly.graph_objs as go
import plotly.express as px
#%% Plot parameters
plt_colmap = plt.get_cmap("tab10", 15)
sns.set_style("whitegrid")
#%% Plot for the total reward over episodes
def plot_reward_per_episode(df_score, y_opt, plt_label, plt_marker, axis_label, plt_title):
# Get the plot parameters
no_RL_agents = len(df_score)
no_episodes = df_score[0].shape[0]
plt.figure(figsize=(10,7))
x = np.arange(0, no_episodes)
for i in range(no_RL_agents):
# y-axis option
if y_opt == 'total_reward':
y = df_score[i]['total_rd']
elif y_opt == 'gamma_rate':
y = df_score[i]['total_rd'] / df_score[i]['final_step'] # gamma_per_epi (success rate)
# plot option: matplotlib or plotly
plt.plot(x,y, label=plt_label[i], marker=plt_marker[i], linestyle='--')
# Legend and labels of the plot
plt.legend(fontsize=16)
plt.ylabel(axis_label[1], fontsize=16)
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
plt.xlabel(axis_label[0], fontsize=16)
plt.title(plt_title, fontsize=20)
plt.show()
def plot_reward_distribution(df_score, plt_label, axis_label, plt_title):
# Get the plot parameters
no_RL_agents = len(df_score)
plt_colmap = plt.get_cmap("tab10", no_RL_agents)
plt.figure(figsize=(10,7))
color = ['blue', 'green', 'orange']
# For-loop for subplots
for i in range(no_RL_agents):
x = df_score[i]['final_state']
y = df_score[i]['total_rd']
#plt.subplot(no_RL_agents,1,i+1)
plt.scatter(x, y, label=plt_label[i], c=color[i])
# Legend and labels of the plot
plt.legend(fontsize=16)
plt.xlabel(axis_label[0], fontsize=16)
plt.ylabel(axis_label[1], fontsize=16)
plt.show()
def plot_action_choice(agent, axis_label, plt_title):
plt.figure(figsize=(10,7))
trials = np.arange(0, agent.env.no_trials)
# Subplot 1
plt.subplot(211) # 2 rows and 1 column
plt.scatter(trials, agent.action_n[0,:], cmap=plt_colmap, c=agent.action_n[0,:], marker='.', alpha=1)
plt.title(plt_title[0], fontsize=16)
plt.xlabel(axis_label[0], fontsize=16)
plt.ylabel(axis_label[1], fontsize=16)
plt.yticks(list(range(agent.env.no_offers)))
plt.colorbar()
# Subplot 2
plt.subplot(212)
plt.bar(trials, agent.action_n[1,:])
#plt.bar(trials, self.state_n)
plt.title(plt_title[1], fontsize=16)
plt.xlabel(axis_label[0], fontsize=16)
plt.ylabel(axis_label[2], fontsize=16)
#plt.legend()
plt.show()
def plot_regret_prob(regret_prob, epi_id, plt_label, axis_label, plt_title):
# Plot regret over trial (opportunity cost of selecting a better action)
agents = regret_prob.shape[0] # no RL agents
plt.figure(figsize=(10, 7))
# Subplot 1
plt.subplot(211) # 2 rows and 1 column
for a in range(agents):
plt.plot(np.cumsum(1 - regret_prob[a, epi_id, :]), label=plt_label[a]) # Plot per RL_agent
plt.xlabel(axis_label[0], fontsize=16)
plt.ylabel(axis_label[1], fontsize=16)
plt.title(plt_title[0], fontsize=16)
plt.legend()
# Subplot 2
plt.subplot(212)
for a in range(agents):
plt.plot(1 - regret_prob[a, epi_id, :], label=plt_label[a])
plt.xlabel(axis_label[0], fontsize=16)
plt.ylabel(axis_label[1], fontsize=16)
plt.title(plt_title[1], fontsize=16)
plt.legend()
plt.show() | [
"tas.tiago.sousa@gmail.com"
] | tas.tiago.sousa@gmail.com |
699bfc4d77e051f4b0b9b95cde59fbb62b5cf72d | 4edbeb3e2d3263897810a358d8c95854a468c3ca | /python3/version/python_version.py | dec52afee6ab14dce005f6ae31bb0e544017de89 | [
"MIT"
] | permissive | jtraver/dev | f505d15d45b67a59d11306cc7252114c265f388b | 2197e3443c7619b856470558b737d85fe1f77a5a | refs/heads/master | 2023-08-06T02:17:58.601861 | 2023-08-01T16:58:44 | 2023-08-01T16:58:44 | 14,509,952 | 0 | 1 | MIT | 2020-10-14T18:32:48 | 2013-11-19T00:51:19 | Python | UTF-8 | Python | false | false | 672 | py | #!/usr/bin/env python3
#!/usr/bin/python
import os
import platform
import sys
import aerospike
def main():
print("\nos")
print("os.name = %s" % str(os.name))
print("sys.platform = %s" % str(sys.platform))
print("platform.platform() = %s" % str(platform.platform()))
print("\npython")
print("sys.version = %s" % str(sys.version))
print("sys.version_info = %s" % str(sys.version_info))
print("sys.version_info[0] = %s" % str(sys.version_info[0]))
print("\naerospike")
try:
print("aerospike client version is %s" % str(aerospike.__version__))
except Exception as e:
print("e = %s" % str(e))
pass
main()
| [
"john@aeropsike.com"
] | john@aeropsike.com |
ee4db2ca88503d29c70e9cbeb5e52c9f5bdaf050 | c22ed655a42ca61fc1212c493fc099c05e81c0c3 | /ray_dqn_agent.py | 34d8d16321e8c8db00da6292077d9126fcfc0188 | [
"MIT"
] | permissive | xiawenwen49/Multi-Commander | 89b5e969b0dfa4a9936f1c0c0dba2f9607a48d6f | 88d1e10f593f0a3edfcdd196e2142020487ce736 | refs/heads/master | 2020-06-25T17:09:46.124740 | 2019-08-12T12:00:55 | 2019-08-12T12:00:55 | 199,373,782 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,615 | py | import ray
import ray.rllib.agents.dqn as dqn
from ray.rllib.agents.dqn import DQNTrainer
from ray.tune.logger import pretty_print
import gym
import gym_cityflow
from gym_cityflow.envs.cityflow_env import CityflowGymEnv
from utility import parse_roadnet
import logging
from datetime import datetime
from tqdm import tqdm
import argparse
import json
def env_config(args):
# preparing config
# # for environment
config = json.load(open(args.config))
config["num_step"] = args.num_step
# config["replay_data_path"] = "replay"
cityflow_config = json.load(open(config['cityflow_config_file']))
roadnetFile = cityflow_config['dir'] + cityflow_config['roadnetFile']
config["lane_phase_info"] = parse_roadnet(roadnetFile)
config["state_time_span"] = args.state_time_span
config["time_span"] = args.time_span
# # for agent
intersection_id = list(config['lane_phase_info'].keys())[0]
phase_list = config['lane_phase_info'][intersection_id]['phase']
logging.info(phase_list)
# config["state_size"] = len(config['lane_phase_info'][intersection_id]['start_lane']) + 1 # 1 is for the current phase. [vehicle_count for each start lane] + [current_phase]
config["state_size"] = len(config['lane_phase_info'][intersection_id]['start_lane'])
config["action_size"] = len(phase_list)
config["batch_size"] = args.batch_size
return config
def agent_config(config_env):
config = dqn.DEFAULT_CONFIG.copy()
config["num_gpus"] = 0
config["num_workers"] = 0
config["num_cpus_per_worker"] = 8
# config["num_cpus_per_worker"] = 8
config["env"] = CityflowGymEnv
config["env_config"] = config_env
return config
def main():
ray.init()
logging.getLogger().setLevel(logging.INFO)
date = datetime.now().strftime('%Y%m%d_%H%M%S')
parser = argparse.ArgumentParser()
# parser.add_argument('--scenario', type=str, default='PongNoFrameskip-v4')
parser.add_argument('--config', type=str, default='config/global_config.json', help='config file')
parser.add_argument('--algo', type=str, default='DQN', choices=['DQN', 'DDQN', 'DuelDQN'],
help='choose an algorithm')
parser.add_argument('--inference', action="store_true", help='inference or training')
parser.add_argument('--ckpt', type=str, help='inference or training')
parser.add_argument('--epoch', type=int, default=100, help='number of training epochs')
parser.add_argument('--num_step', type=int, default=10 ** 3,
help='number of timesteps for one episode, and for inference')
parser.add_argument('--save_freq', type=int, default=100, help='model saving frequency')
parser.add_argument('--batch_size', type=int, default=128, help='model saving frequency')
parser.add_argument('--state_time_span', type=int, default=5, help='state interval to receive long term state')
parser.add_argument('--time_span', type=int, default=30, help='time interval to collect data')
args = parser.parse_args()
config_env = env_config(args)
# ray.tune.register_env('gym_cityflow', lambda env_config:CityflowGymEnv(config_env))
config_agent = agent_config(config_env)
trainer = DQNTrainer(
env=CityflowGymEnv,
config=config_agent)
for i in range(1000):
# Perform one iteration of training the policy with DQN
result = trainer.train()
print(pretty_print(result))
if (i+1) % 100 == 0:
checkpoint = trainer.save()
print("checkpoint saved at", checkpoint)
if __name__ == '__main__':
main()
| [
"xiawenwen49@gmail.com"
] | xiawenwen49@gmail.com |
ec204e589862d7db078962cf5fe0c41711f5cbcb | d2c4934325f5ddd567963e7bd2bdc0673f92bc40 | /tests/artificial/transf_Logit/trend_Lag1Trend/cycle_30/ar_12/test_artificial_32_Logit_Lag1Trend_30_12_20.py | b308b4cbc07f69e3c5a3e07412703d09b5786f7b | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | jmabry/pyaf | 797acdd585842474ff4ae1d9db5606877252d9b8 | afbc15a851a2445a7824bf255af612dc429265af | refs/heads/master | 2020-03-20T02:14:12.597970 | 2018-12-17T22:08:11 | 2018-12-17T22:08:11 | 137,104,552 | 0 | 0 | BSD-3-Clause | 2018-12-17T22:08:12 | 2018-06-12T17:15:43 | Python | UTF-8 | Python | false | false | 266 | py | import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "Lag1Trend", cycle_length = 30, transform = "Logit", sigma = 0.0, exog_count = 20, ar_order = 12); | [
"antoine.carme@laposte.net"
] | antoine.carme@laposte.net |
44bbd220a867b475b17b953f3f8b931f279851a6 | 76f55428277f1a951f648b202d5c6757bf9c446a | /utils/command_utils.py | 34d854f5bc8847615a2080e9a952c9a3bd523ee3 | [] | no_license | zdenekhynek/browser_snapshots | 5003378e01d73dbb877771097ad5ddb0a9aec997 | b63937032a0620f47b1f49fdd8e87ffd3a45a2d2 | refs/heads/master | 2023-01-30T14:10:48.321789 | 2018-12-02T22:39:32 | 2018-12-02T23:19:21 | 113,761,833 | 0 | 0 | null | 2023-01-12T23:55:40 | 2017-12-10T15:39:13 | Python | UTF-8 | Python | false | false | 1,091 | py | from races.models import RaceTask
from snapshots.models import Snapshot
def add_arguments(parser):
parser.add_argument('--limit', type=int)
parser.add_argument('--offset', type=int)
parser.add_argument('--override', type=bool)
parser.add_argument('--pk', type=int)
parser.add_argument('--race_id', type=int)
def get_snapshots_for_race(race_id):
sessions = RaceTask.objects.filter(race_id=race_id).values('task__session_id')
session_ids = [session['task__session_id'] for session in sessions]
return Snapshot.objects.filter(session_id__in=session_ids)
def get_snapshots(pk=False, race_id=False, limit=False, offset=False):
if pk:
snapshots = Snapshot.objects.filter(pk=pk)
elif race_id:
snapshots = get_snapshots_for_race(race_id)
else:
snapshots = Snapshot.objects.all()
if limit and offset:
snapshots = snapshots[offset:offset+limit]
elif limit:
snapshots = snapshots[:limit]
elif offset:
snapshots = snapshots[offset:]
else:
snapshots = snapshots
return snapshots
| [
"zdenek@signal-noise.co.uk"
] | zdenek@signal-noise.co.uk |
ec158af26b75ab0538218f360d8b6e41684761a2 | 4fe6a2c93d0895ea6e9e531d00d3eaec5fae6e41 | /affineCipher.py | f51c2da30e0b0d5083a3c0da70bcb0e43546dd16 | [] | no_license | thesrikarpaida/Few-Cryptography-Algorithms | fd10d21bf151ef34bb1cf1ce3c89392fc511a1d6 | 9f2a3c9b55b6b0a796b097dd6565f320adab3a1a | refs/heads/main | 2023-05-14T21:18:25.127188 | 2021-06-10T09:01:56 | 2021-06-10T09:01:56 | 375,634,436 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,164 | py | # Affine Cipher encryption is the process of encrypting the data by multiplying each character
# with a key in Zn* and then adding a key in Zn.
print("--------------------\n--------------------\n--------------------\n")
print("In this program, we will encrypt the plain text using Affine Cipher.\n")
print("---------------------------------------------------------------------------------\n")
plainText = input("Enter the text (only alphabets) you want to encrypt:\n")
k1, k2 = input("Enter the key pair (space separated):").split() # k1 is multiplied and k2 is added
k1, k2 = int(k1), int(k2)
cipherText = ""
for i in range(len(plainText)):
ch = plainText[i]
ch = chr(((ord(ch)%32 - 1)*k1 + k2)%26 + (ord(ch)//32)*32 + 1)
# The above single line can replace the below 2 if conditions that have been quoted
'''
if ch.islower():
ch = chr(((ord(ch) - 97)*k1 + k2)%26 + 97)
elif ch.isupper():
ch = chr(((ord(ch) - 65)*k1 + k2)%26 + 65)
'''
#elif ch.isnumeric():
#ch = chr((ord(ch)*k1 + k2 - 48)%10 + 48)
cipherText = cipherText + ch
print("The encrypted text is:",cipherText) | [
"noreply@github.com"
] | noreply@github.com |
4c95071a51fe02bb18a1115c68728b88ea01fed9 | bcf3ef8d1c22efbce5d60b5824a084fb72ad9fbf | /mysite/blog/models.py | c71d46206a813d9558698fe44491cf59247e4800 | [] | no_license | wzpchris/djangotest | 2c1295b394acd782c637823d1ac0efb018a47ff1 | b8e5fa0d02fd746c1d6aa61bf53b6cb211173c6a | refs/heads/master | 2021-04-06T02:34:26.148195 | 2018-03-09T15:26:47 | 2018-03-09T15:26:47 | 124,557,576 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 355 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
# Create your models here.
class Article(models.Model):
title = models.CharField(max_length=32, default="title")
content = models.TextField(null=True)
pub_time = models.DateTimeField(null=True)
def __unicode__(self):
return self.title
| [
"271737050@qq.com"
] | 271737050@qq.com |
c16433c538e011662ea8eceff59a1b1c276665c8 | 8200911f29e15c30ead299c5eaf78a0181a95098 | /swiper/migrations/0016_userprofile_receive_email.py | 3cd3c06aff93ea1a0afce9b17ed0385c5aca8d67 | [
"MIT"
] | permissive | GFynbo/GoudaTime | a1a02ff0063e76ff47bcde1953a1ed7d68a823bd | add6723fd42f01be8ca12eee0f86c99f5600d8b9 | refs/heads/master | 2021-09-07T08:17:10.422540 | 2018-02-20T03:31:49 | 2018-02-20T03:31:49 | 108,687,629 | 2 | 5 | null | 2017-10-31T22:55:17 | 2017-10-28T22:05:46 | Python | UTF-8 | Python | false | false | 450 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-02-11 00:03
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('swiper', '0015_group'),
]
operations = [
migrations.AddField(
model_name='userprofile',
name='receive_email',
field=models.BooleanField(default=False),
),
]
| [
"gavfynbo@gmail.com"
] | gavfynbo@gmail.com |
712043d3b7115696d5bdd6313d0f7f1a528b8ed5 | 5aa93fb40c2b5f3f041094be5c67db72a38a8fe5 | /manage.py | 064aee261e8f6d7af01a257af05dfa810a644ef3 | [] | no_license | silviuz07/WebAtivLPC | 87dfee677493a1b40a908a29db9ac1260bccbde3 | 712ed5f70cdc981c409ea8d6a92294579dc3d303 | refs/heads/master | 2020-07-30T03:55:22.915936 | 2019-09-22T02:26:04 | 2019-09-22T02:26:04 | 210,071,567 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 630 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'WebAtivLPC.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"silviojrcarvalho@hotmail.com"
] | silviojrcarvalho@hotmail.com |
6fe4034060a6c7f766c9209cbf574bfaa93f0ddc | e321bf75e536df3565240e032caf812ed9467d59 | /main.py | 1d0f473df93d5d7e9069272555960e702901477b | [] | no_license | numaan0/Ping-pong-game-using-Kivy-python | 8e678c557679c4a72ee09c688e3bf0345fbe14f9 | 62479de34987a31526905e057c91c0b126772373 | refs/heads/master | 2023-03-20T18:25:50.277836 | 2021-03-07T08:24:39 | 2021-03-07T08:24:39 | 345,293,421 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,870 | py | from kivy.app import App
from kivy.uix.widget import Widget
from kivy.properties import NumericProperty,ReferenceListProperty,ObjectProperty
from kivy.vector import Vector
from kivy.clock import Clock
from random import randint
class PongPaddle(Widget):
score = NumericProperty(0)
def bounce_ball(self,ball):
if self.collide_widget(ball):
ball.velocity_x *= -1
class PongBall(Widget):
velocity_x = NumericProperty(0)
velocity_y = NumericProperty(0)
velocity = ReferenceListProperty(velocity_x,velocity_y)
#latest pos = current velocity + current position
def move(self):
self.pos = Vector(*self.velocity) + self.pos
#update --moving the ball by calling the move and rest
class PongGame(Widget):
ball = ObjectProperty(None)
player1 = ObjectProperty(None)
player2 = ObjectProperty(None)
def serve_ball(self):
self.ball.velocity = Vector(4,0).rotate(randint(0, 360))
def update(self,dt):
self.ball.move()
#bounce off top and bottom
if (self.ball.y <0 ) or (self.ball.y > self.height - 50):
self.ball.velocity_y *= -1.1
#bounce off right and left
if (self.ball.x < 0) :
self.ball.velocity_x *= -1
self.player1.score +=1
if (self.ball.x > self.width - 50):
self.ball.velocity_x *= -1
self.player2.score += 1
self.player1.bounce_ball(self.ball)
self.player2.bounce_ball(self.ball)
def on_touch_move(self, touch):
if touch.x < self.width / 1/4:
self.player1.center_y = touch.y
if touch.x > self.width * 3/4:
self.player2.center_y = touch.y
class PongApp(App):
def build(self):
game = PongGame()
game.serve_ball()
Clock.schedule_interval(game.update,1.0/60.0)
return game
PongApp().run()
| [
"syed07nomi@gmail.com"
] | syed07nomi@gmail.com |
125dfc3862cdc8ffda88fc4ac3b297c82fab1791 | b2f887a5590eada2a3914af12673211278c2941d | /hw2/multiAgents.py | 3dc28a3dcb557237bde3307b87a1bd4ec2adf67a | [] | no_license | anurag6/CSE537 | 7c269d08ef109556a9b1a704d70f58cfab137c13 | 2a82e86965ce031749dd3ec414a24fe39c906b2e | refs/heads/master | 2021-07-08T17:30:09.418956 | 2017-10-04T04:50:08 | 2017-10-04T04:50:08 | 105,730,317 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,264 | py | # multiAgents.py
# --------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
from util import manhattanDistance
from game import Directions
import random, util
from game import Agent
from game import Actions
class ReflexAgent(Agent):
"""
A reflex agent chooses an action at each choice point by examining
its alternatives via a state evaluation function.
The code below is provided as a guide. You are welcome to change
it in any way you see fit, so long as you don't touch our method
headers.
"""
def getAction(self, gameState):
"""
You do not need to change this method, but you're welcome to.
getAction chooses among the best options according to the evaluation function.
Just like in the previous project, getAction takes a GameState and returns
some Directions.X for some X in the set {North, South, West, East, Stop}
"""
# Collect legal moves and successor states
legalMoves = gameState.getLegalActions()
# Choose one of the best actions
"""print ("******New set of posns******")
print ("current pos:",gameState.getPacmanPosition())
print ("Legal neighbours:", Actions.getLegalNeighbors(gameState.getPacmanPosition(),gameState.getWalls()))"""
scores = [self.evaluationFunction(gameState, action) for action in legalMoves]
bestScore = max(scores)
#print("best scores available:",bestScore)
bestIndices = [index for index in range(len(scores)) if scores[index] == bestScore]
chosenIndex = random.choice(bestIndices) # Pick randomly among the best
"Add more of your code here if you want to"
#print ("Action taken:",legalMoves[chosenIndex])
return legalMoves[chosenIndex]
def evaluationFunction(self, currentGameState, action):
"""
Design a better evaluation function here.
The evaluation function takes in the current and proposed successor
GameStates (pacman.py) and returns a number, where higher numbers are better.
The code below extracts some useful information from the state, like the
remaining food (newFood) and Pacman position after moving (newPos).
newScaredTimes holds the number of moves that each ghost will remain
scared because of Pacman having eaten a power pellet.
Print out these variables to see what you're getting, then combine them
to create a masterful evaluation function.
"""
# Useful information you can extract from a GameState (pacman.py)
successorGameState = currentGameState.generatePacmanSuccessor(action)
newPos = successorGameState.getPacmanPosition()
newFood = successorGameState.getFood().asList()
if successorGameState.getNumFood() > 0:
minFoodDistance = min([manhattanDistance(newPos,pos) for pos in newFood])
else:
minFoodDistance = 0
"""print("***")
print ("New food:",newFood)
print ("newPos:", newPos)
print ("minFoodDistance:",minFoodDistance)
print ("successor has food:", successorGameState.hasFood(newPos[0],newPos[1]))
print ("current has food", currentGameState.hasFood(newPos[0],newPos[1]))"""
newGhostStates = successorGameState.getGhostStates()
newGhostPositions = nextPossibleGhostStates(currentGameState)
newScaredTimes = [ghostState.scaredTimer for ghostState in newGhostStates]
"*** YOUR CODE HERE ***"
score = successorGameState.getScore()
if newPos in newGhostPositions:
score -= 400
if currentGameState.hasFood(newPos[0],newPos[1]):
score += 50
score -= minFoodDistance
return score
def nextPossibleGhostStates(currentGameState):
result = []
for index in range(1,currentGameState.getNumAgents()):
ghostState = currentGameState.data.agentStates[index]
if ghostState.scaredTimer > 0:
continue
validPositions=Actions.getLegalNeighbors(ghostState.getPosition(),currentGameState.getWalls())
result.extend(validPositions)
return result
def scoreEvaluationFunction(currentGameState):
"""
This default evaluation function just returns the score of the state.
The score is the same one displayed in the Pacman GUI.
This evaluation function is meant for use with adversarial search agents
(not reflex agents).
"""
return currentGameState.getScore()
class MultiAgentSearchAgent(Agent):
"""
This class provides some common elements to all of your
multi-agent searchers. Any methods defined here will be available
to the MinimaxPacmanAgent, AlphaBetaPacmanAgent & ExpectimaxPacmanAgent.
You *do not* need to make any changes here, but you can if you want to
add functionality to all your adversarial search agents. Please do not
remove anything, however.
Note: this is an abstract class: one that should not be instantiated. It's
only partially specified, and designed to be extended. Agent (game.py)
is another abstract class.
"""
def __init__(self, evalFn = 'scoreEvaluationFunction', depth = '2'):
self.index = 0 # Pacman is always agent index 0
self.evaluationFunction = util.lookup(evalFn, globals())
self.depth = int(depth)
class MinimaxAgent(MultiAgentSearchAgent):
"""
Your minimax agent (question 2)
"""
def getAction(self, gameState):
"""
Returns the minimax action from the current gameState using self.depth
and self.evaluationFunction.
Here are some method calls that might be useful when implementing minimax.
gameState.getLegalActions(agentIndex):
Returns a list of legal actions for an agent
agentIndex=0 means Pacman, ghosts are >= 1
gameState.generateSuccessor(agentIndex, action):
Returns the successor game state after an agent takes an action
gameState.getNumAgents():
Returns the total number of agents in the game
"""
"*** YOUR CODE HERE ***"
successors = self.getSuccessorsWithValuesAction(gameState,0,0)
value = max(successors,key=lambda s: s[0])
return value[1]
def getSuccessorsWithValuesAction(self, gameState, agentId, currDepth):
#print("\n***getSuccessorsWithValuesAction***")
#print ("agentId:",agentId,"currDepth:", currDepth,"target depth:",self.depth)
if agentId == 0:
# print ("pacman")
currDepth+=1
legalMoves = gameState.getLegalActions(agentId)
if (agentId == gameState.getNumAgents()-1) and (currDepth == self.depth): #if it is the last agent for given depth, dont evaluate expand xuccessors, jsut get scores.
successorsValues = [(self.evaluationFunction(gameState.generateSuccessor(agentId,action)),action) for action in legalMoves]
else:
successors = [[gameState.generateSuccessor(agentId,action),action] for action in legalMoves]
# print ("Successors:",successors)
successorsValues = []
for successor in successors:
# print ("Successor:",successor)
if successor[0].isWin() or successor[0].isLose(): #is a leaf node
# print ("Is a leaf node")
value = self.evaluationFunction(successor[0])
else:
if (agentId+1)%successor[0].getNumAgents() == 0:
# print ("Picking max value")
value = max(self.getSuccessorsWithValuesAction(successor[0],(agentId+1)%successor[0].getNumAgents(),currDepth),key=lambda s: s[0])[0]
else:
# print ("Picking min value")
value = min(self.getSuccessorsWithValuesAction(successor[0],(agentId+1)%successor[0].getNumAgents(),currDepth),key=lambda s: s[0])[0]
successorsValues.append((value,successor[1]))
#print ("successorsValues",successorsValues)
return successorsValues
class AlphaBetaAgent(MultiAgentSearchAgent):
"""
Your minimax agent with alpha-beta pruning (question 3)
"""
def getAction(self, gameState):
"""
Returns the minimax action using self.depth and self.evaluationFunction
"""
"*** YOUR CODE HERE ***"
successors = self.getSuccessorsWithValuesAction(gameState, 0, 0,float("-inf"),float("inf"))
value = max(successors, key=lambda s: s[0])
return value[1]
def getSuccessorsWithValuesAction(self, gameState, agentId, currDepth,alpha,beta):
#print("\n***getSuccessorsWithValuesAction***")
#print ("agentId:",agentId,"currDepth:", currDepth,"target depth:",self.depth,"alpha:",alpha,"beta:",beta)
if agentId == 0:
#print ("pacman")
currDepth+=1
legalMoves = gameState.getLegalActions(agentId)
if (agentId == gameState.getNumAgents()-1) and (currDepth == self.depth): #if it is the last agent for given depth, dont evaluate expand xuccessors, jsut get scores.
successorsValues = []
for action in legalMoves:
value = (self.evaluationFunction(gameState.generateSuccessor(agentId,action)))
successorsValues.append((value,action))
if value < beta:
beta = value
if alpha > beta:
break
else:
#print ("Successors:",successors)
successorsValues = []
for index,action in enumerate(legalMoves):
successor = [gameState.generateSuccessor(agentId,action),action]
#print ("Successor:",successor,"currdepth:",currDepth,"index:",index)
if successor[0].isWin() or successor[0].isLose(): #is a leaf node
#print ("Is a leaf node")
value = self.evaluationFunction(successor[0])
if (agentId) % successor[0].getNumAgents() == 0:
if value > alpha:
alpha = value
else:
if value < beta:
beta = value
else:
if (agentId+1)%successor[0].getNumAgents() == 0:
#print ("next agent is max")
value = max(self.getSuccessorsWithValuesAction(successor[0],(agentId+1)%successor[0].getNumAgents(),currDepth,alpha,beta),key=lambda s: s[0])[0]
#print ("Got value for max:",value)
#if value < beta:
# beta = value
else:
#print ("nextagent is min")
value = min(self.getSuccessorsWithValuesAction(successor[0],(agentId+1)%successor[0].getNumAgents(),currDepth,alpha,beta),key=lambda s: s[0])[0]
#print("Got value for min:",value)
#if value > alpha:
# alpha = value
if (agentId) % successor[0].getNumAgents() == 0:
if value > alpha:
alpha = value
else:
if value < beta:
beta = value
successorsValues.append((value,successor[1]))
#print ("For currdepth:",currDepth,"index:",index,"alpha:",alpha,"beta:",beta,"value:",value)
if alpha > beta:
# print ("pruning")
break
#print ("successorsValues",successorsValues)
#print ("***end***\n")
return successorsValues
class ExpectimaxAgent(MultiAgentSearchAgent):
"""
Your expectimax agent (question 4)
"""
def getAction(self, gameState):
"""
Returns the expectimax action using self.depth and self.evaluationFunction
All ghosts should be modeled as choosing uniformly at random from their
legal moves.
"""
"*** YOUR CODE HERE ***"
successors = self.getSuccessorsWithValuesAction(gameState, 0, 0)
value = max(successors, key=lambda s: s[0])
return value[1]
def getSuccessorsWithValuesAction(self, gameState, agentId, currDepth):
#print("\n***getSuccessorsWithValuesAction***")
#print ("agentId:",agentId,"currDepth:", currDepth,"target depth:",self.depth)
if agentId == 0:
# print ("pacman")
currDepth+=1
legalMoves = gameState.getLegalActions(agentId)
if (agentId == gameState.getNumAgents()-1) and (currDepth == self.depth): #if it is the last agent for given depth, dont evaluate expand xuccessors, jsut get scores.
successorsValues = [(self.evaluationFunction(gameState.generateSuccessor(agentId,action)),action) for action in legalMoves]
else:
successors = [[gameState.generateSuccessor(agentId,action),action] for action in legalMoves]
# print ("Successors:",successors)
successorsValues = []
for successor in successors:
# print ("Successor:",successor)
if successor[0].isWin() or successor[0].isLose(): #is a leaf node
# print ("Is a leaf node")
value = self.evaluationFunction(successor[0])
else:
if (agentId+1)%successor[0].getNumAgents() == 0:
# print ("Picking max value")
value = max(self.getSuccessorsWithValuesAction(successor[0],(agentId+1)%successor[0].getNumAgents(),currDepth),key=lambda s: s[0])[0]
else:
# print ("Picking min value")
value = mean([suc[0] for suc in self.getSuccessorsWithValuesAction(successor[0],(agentId+1)%successor[0].getNumAgents(),currDepth)])
successorsValues.append((value,successor[1]))
#print ("successorsValues",successorsValues)
return successorsValues
def mean(iter):
return float(sum(iter))/max(len(iter),1)
def betterEvaluationFunction(currentGameState):
"""
Your extreme ghost-hunting, pellet-nabbing, food-gobbling, unstoppable
evaluation function (question 5).
DESCRIPTION: <write something here so we know what you did>
"""
"*** YOUR CODE HERE ***"
#successorGameState = currentGameState.generatePacmanSuccessor(action)
newPos = currentGameState.getPacmanPosition()
newFood = currentGameState.getFood().asList()
if currentGameState.getNumFood() > 0:
minFoodDistance = min([manhattanDistance(newPos, pos) for pos in newFood])
else:
minFoodDistance = 0
"""print("***")
print ("New food:",newFood)
print ("newPos:", newPos)
print ("minFoodDistance:",minFoodDistance)
print ("successor has food:", successorGameState.hasFood(newPos[0],newPos[1]))
print ("current has food", currentGameState.hasFood(newPos[0],newPos[1]))"""
newGhostStates = currentGameState.getGhostStates()
newGhostPositions = nextPossibleGhostStates(currentGameState)
newScaredTimes = [ghostState.scaredTimer for ghostState in newGhostStates]
"*** YOUR CODE HERE ***"
score = currentGameState.getScore()
if newPos in newGhostPositions:
score -= 400
if currentGameState.hasFood(newPos[0], newPos[1]):
score += 50
score -= minFoodDistance
return score
# Abbreviation
better = betterEvaluationFunction
| [
"anurag.porripireddi@stonybrook.edu"
] | anurag.porripireddi@stonybrook.edu |
bffc4998a73a001af96ff4d89986c7f07ba844b4 | 65f94b2fe3794b6fd682e52c7f4047a737cae6c7 | /env/bin/symilar | f88c0874a96d9c0b5e2366b5b1482cc352a5092d | [] | no_license | udoyen/vgg-project-challenge | 47e7e0c5352437f3df00aff9ac055dbadaadebb5 | 76a005edec6e77f9467b67bda20002c58abef7a9 | refs/heads/master | 2022-10-04T14:42:46.267458 | 2020-02-11T10:47:22 | 2020-02-11T10:47:22 | 238,899,753 | 0 | 1 | null | 2022-09-16T18:17:10 | 2020-02-07T10:45:53 | Python | UTF-8 | Python | false | false | 276 | #!/home/george/Documents/vgg-docs/vgg-project-challenge/env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pylint import run_symilar
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run_symilar())
| [
"datameshprojects@gmail.com"
] | datameshprojects@gmail.com | |
2d9f94c5939c209e95cd90f452b218045cd65527 | 373c43096384a2ea7f351fdedc64312660a1c344 | /src/cli.py | f3ccd5fb4c85d42a6d92d16f6863a85c68bacb64 | [
"MIT"
] | permissive | VanirLab/weever | 7ad69c76227ac0981b1dd0570e3dbae4dd67de21 | b602e90ddecb8e469a28e092da3ca7fec514e3dc | refs/heads/master | 2020-05-27T20:57:48.320430 | 2019-05-27T09:02:33 | 2019-05-27T09:02:33 | 188,788,722 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 49,577 | py | """
Implementation of weever's command line interface.
"""
import sys
import traceback
import argparse
import logging
import getpass
import typing as typ
from src.wrapper.bad_cluster import BadClusterWrapper
from src.wrapper.cluster_allocation import ClusterAllocation
from src.fat.fat_filesystem.fat_wrapper import create_fat
from src.fat.fat_filesystem.fattools import FATtools
from src.wrapper.file_slack import FileSlack
from src.metadata import Metadata
from src.wrapper.mft_slack import MftSlack
from src.wrapper.osd2 import OSD2
from src.wrapper.obso_faddr import FADDR
from src.wrapper.reserved_gdt_blocks import ReservedGDTBlocks
from src.wrapper.superblock_slack import SuperblockSlack
from src.wrapper.inode_padding import inodePadding
from src.wrapper.write_gen import write_gen
from src.wrapper.timestamp_hiding import timestampHiding
from src.wrapper.xfield_padding import xfieldPadding
LOGGER = logging.getLogger("cli")
def do_metadata(args: argparse.Namespace) -> None:
"""
handles metadata subcommand execution
:param args: argparse.Namespace
"""
if args.password is False:
meta = Metadata()
else:
print("Please enter password: ")
pw = getpass.getpass()
meta = Metadata(password=pw)
meta.read(args.metadata)
meta.info()
def do_fattools(args: argparse.Namespace, device: typ.BinaryIO) -> None:
"""
handles fattools subcommand execution
:param args: argparse.Namespace
:param device: stream of the filesystem
"""
fattool = FATtools(create_fat(device))
if args.fat:
fattool.list_fat()
elif args.info:
fattool.list_info()
elif args.list is not None:
fattool.list_directory(args.list)
def do_fileslack(args: argparse.Namespace, device: typ.BinaryIO) -> None:
"""
hanles fileslack subcommand execution
:param args: argparse.Namespace
:param device: stream of the filesystem
"""
if args.info:
slacker = FileSlack(device, Metadata(), args.dev)
slacker.info(args.destination)
if args.write:
if args.password is False:
slacker = FileSlack(device, Metadata(), args.dev)
else:
print("Please enter password: ")
pw = getpass.getpass()
slacker = FileSlack(device, Metadata(password=pw), args.dev)
if not args.file:
# write from stdin into fileslack
slacker.write(sys.stdin.buffer, args.destination)
else:
# write from files into fileslack
with open(args.file, 'rb') as fstream:
slacker.write(fstream, args.destination, args.file)
with open(args.metadata, 'wb+') as metadata_out:
slacker.metadata.write(metadata_out)
elif args.read:
# read file slack of a single hidden file to stdout
with open(args.metadata, 'rb') as metadata_file:
if args.password is False:
meta = Metadata()
else:
print("Please enter password: ")
pw = getpass.getpass()
meta = Metadata(password=pw)
meta.read(metadata_file)
slacker = FileSlack(device, meta, args.dev)
slacker.read(sys.stdout.buffer)
elif args.outfile:
# read hidden data in fileslack into outfile
with open(args.metadata, 'rb') as metadata_file:
if args.password is False:
meta = Metadata()
else:
print("Please enter password: ")
pw = getpass.getpass()
meta = Metadata(password=pw)
meta.read(metadata_file)
slacker = FileSlack(device, meta, args.dev)
slacker.read_into_file(args.outfile)
elif args.clear:
# clear fileslack
with open(args.metadata, 'rb') as metadata_file:
if args.password is False:
meta = Metadata()
else:
print("Please enter password: ")
pw = getpass.getpass()
meta = Metadata(password=pw)
meta.read(metadata_file)
slacker = FileSlack(device, meta, args.dev)
slacker.clear()
def do_mftslack(args: argparse.Namespace, device: typ.BinaryIO) -> None:
"""
hanles mftslack subcommand execution
:param args: argparse.Namespace
:param device: stream of the filesystem
"""
if args.info:
slacker = MftSlack(device, Metadata(), args.dev)
slacker.info(args.offset, args.limit)
if args.write:
if args.password is False:
slacker = MftSlack(device, Metadata(), args.dev, args.domirr)
else:
print("Please enter password: ")
pw = getpass.getpass()
slacker = MftSlack(device, Metadata(password=pw), args.dev, args.domirr)
if not args.file:
# write from stdin into mftslack
slacker.write(sys.stdin.buffer, offset=args.offset)
else:
# write from files into mftslack
with open(args.file, 'rb') as fstream:
slacker.write(fstream, args.file, args.offset)
with open(args.metadata, 'wb+') as metadata_out:
slacker.metadata.write(metadata_out)
elif args.read:
# read file slack of a single hidden file to stdout
with open(args.metadata, 'rb') as metadata_file:
if args.password is False:
meta = Metadata()
else:
print("Please enter password: ")
pw = getpass.getpass()
meta = Metadata(password=pw)
meta.read(metadata_file)
slacker = MftSlack(device, meta, args.dev)
slacker.read(sys.stdout.buffer)
elif args.outfile:
# read hidden data in fileslack into outfile
with open(args.metadata, 'rb') as metadata_file:
if args.password is False:
meta = Metadata()
else:
print("Please enter password: ")
pw = getpass.getpass()
meta = Metadata(password=pw)
meta.read(metadata_file)
slacker = MftSlack(device, meta, args.dev)
slacker.read_into_file(args.outfile)
elif args.clear:
# clear fileslack
with open(args.metadata, 'rb') as metadata_file:
if args.password is False:
meta = Metadata()
else:
print("Please enter password: ")
pw = getpass.getpass()
meta = Metadata(password=pw)
meta.read(metadata_file)
slacker = MftSlack(device, meta, args.dev)
slacker.clear()
def do_addcluster(args: argparse.Namespace, device: typ.BinaryIO) -> None:
"""
hanles addcluster subcommand execution
:param args: argparse.Namespace
:param device: stream of the filesystem
"""
if args.write:
if args.password is False:
allocator = ClusterAllocation(device, Metadata(), args.dev)
else:
print("Please enter password: ")
pw = getpass.getpass()
allocator = ClusterAllocation(device, Metadata(password=pw), args.dev)
if not args.file:
# write from stdin into additional clusters
allocator.write(sys.stdin.buffer, args.destination)
else:
# write from files into additional clusters
with open(args.file, 'rb') as fstream:
allocator.write(fstream, args.destination, args.file)
with open(args.metadata, 'wb+') as metadata_out:
allocator.metadata.write(metadata_out)
elif args.read:
# read file slack of a single hidden file to stdout
with open(args.metadata, 'rb') as metadata_file:
if args.password is False:
meta = Metadata()
else:
print("Please enter password: ")
pw = getpass.getpass()
meta = Metadata(password=pw)
meta.read(metadata_file)
allocator = ClusterAllocation(device, meta, args.dev)
allocator.read(sys.stdout.buffer)
elif args.outfile:
# read hidden data from additional clusters into outfile
with open(args.metadata, 'rb') as metadata_file:
if args.password is False:
meta = Metadata()
else:
print("Please enter password: ")
pw = getpass.getpass()
meta = Metadata(password=pw)
meta.read(metadata_file)
allocator = ClusterAllocation(device, meta, args.dev)
allocator.read_into_file(args.outfile)
elif args.clear:
# clear additional clusters
with open(args.metadata, 'rb') as metadata_file:
if args.password is False:
meta = Metadata()
else:
print("Please enter password: ")
pw = getpass.getpass()
meta = Metadata(password=pw)
meta.read(metadata_file)
allocator = ClusterAllocation(device, meta, args.dev)
allocator.clear()
def do_badcluster(args: argparse.Namespace, device: typ.BinaryIO) -> None:
"""
hanles badcluster subcommand execution
:param args: argparse.Namespace
:param device: stream of the filesystem
"""
if args.write:
if args.password is False:
allocator = BadClusterWrapper(device, Metadata(), args.dev)
else:
print("Please enter password: ")
pw = getpass.getpass()
allocator = BadClusterWrapper(device, Metadata(password=pw), args.dev)
if not args.file:
# write from stdin into bad clusters
allocator.write(sys.stdin.buffer)
else:
# write from file into bad cluster
with open(args.file, 'rb') as fstream:
allocator.write(fstream, args.file)
with open(args.metadata, 'wb+') as metadata_out:
allocator.metadata.write(metadata_out)
elif args.read:
# read bad cluster to stdout
with open(args.metadata, 'rb') as metadata_file:
if args.password is False:
meta = Metadata()
else:
print("Please enter password: ")
pw = getpass.getpass()
meta = Metadata(password=pw)
meta.read(metadata_file)
allocator = BadClusterWrapper(device, meta, args.dev)
allocator.read(sys.stdout.buffer)
elif args.outfile:
# read hidden data from bad cluster into outfile
with open(args.metadata, 'rb') as metadata_file:
if args.password is False:
meta = Metadata()
else:
print("Please enter password: ")
pw = getpass.getpass()
meta = Metadata(password=pw)
meta.read(metadata_file)
allocator = BadClusterWrapper(device, meta, args.dev)
allocator.read_into_file(args.outfile)
elif args.clear:
# clear bad cluster
with open(args.metadata, 'rb') as metadata_file:
if args.password is False:
meta = Metadata()
else:
print("Please enter password: ")
pw = getpass.getpass()
meta = Metadata(password=pw)
meta.read(metadata_file)
allocator = BadClusterWrapper(device, meta, args.dev)
allocator.clear()
def do_reserved_gdt_blocks(args: argparse.Namespace, device: typ.BinaryIO) -> None:
"""
handles reserved_gdt_blocks subcommand execution
:param args: argparse.Namespace
:param device: stream of the filesystem
"""
if args.write:
if args.password is False:
reserve = ReservedGDTBlocks(device, Metadata(), args.dev)
else:
print("Please enter password: ")
pw = getpass.getpass()
reserve = ReservedGDTBlocks(device, Metadata(password=pw), args.dev)
if not args.file:
# write from stdin into reserved GDT blocks
reserve.write(sys.stdin.buffer)
else:
# write from files into reserved GDT blocks
with open(args.file, 'rb') as fstream:
reserve.write(fstream, args.file)
with open(args.metadata, 'wb+') as metadata_out:
reserve.metadata.write(metadata_out)
elif args.read:
# read hidden file to stdout
with open(args.metadata, 'rb') as metadata_file:
if args.password is False:
meta = Metadata()
else:
print("Please enter password: ")
pw = getpass.getpass()
meta = Metadata(password=pw)
meta.read(metadata_file)
reserve = ReservedGDTBlocks(device, meta, args.dev)
reserve.read(sys.stdout.buffer)
elif args.outfile:
# read hidden file into outfile
with open(args.metadata, 'rb') as metadata_file:
if args.password is False:
meta = Metadata()
else:
print("Please enter password: ")
pw = getpass.getpass()
meta = Metadata(password=pw)
meta.read(metadata_file)
reserve = ReservedGDTBlocks(device, meta, args.dev)
reserve.read_into_file(args.outfile)
elif args.clear:
# clear reserved GDT blocks
with open(args.metadata, 'rb') as metadata_file:
if args.password is False:
meta = Metadata()
else:
print("Please enter password: ")
pw = getpass.getpass()
meta = Metadata(password=pw)
meta.read(metadata_file)
reserve = ReservedGDTBlocks(device, meta, args.dev)
reserve.clear()
elif args.info:
# show info
with open(args.metadata, 'rb') as metadata_file:
if args.password is False:
meta = Metadata()
else:
print("Please enter password: ")
pw = getpass.getpass()
meta = Metadata(password=pw)
meta.read(metadata_file)
reserve = ReservedGDTBlocks(device, meta, args.dev)
reserve.info()
def do_superblock_slack(args: argparse.Namespace, device: typ.BinaryIO) -> None:
"""
handles superblock_slack subcommand execution
:param args: argparse.Namespace
:param device: stream of the filesystem
"""
if args.write:
if args.password is False:
slack = SuperblockSlack(device, Metadata(), args.dev)
else:
print("Please enter password: ")
pw = getpass.getpass()
slack = SuperblockSlack(device, Metadata(password=pw), args.dev)
if not args.file:
# write from stdin into superblock slack
slack.write(sys.stdin.buffer)
else:
# write from files into superblock slack
with open(args.file, 'rb') as fstream:
slack.write(fstream, args.file)
with open(args.metadata, 'wb+') as metadata_out:
slack.metadata.write(metadata_out)
elif args.read:
# read hidden file to stdout
with open(args.metadata, 'rb') as metadata_file:
if args.password is False:
meta = Metadata()
else:
print("Please enter password: ")
pw = getpass.getpass()
meta = Metadata(password=pw)
meta.read(metadata_file)
slack = SuperblockSlack(device, meta, args.dev)
slack.read(sys.stdout.buffer)
elif args.outfile:
# read hidden file into outfile
with open(args.metadata, 'rb') as metadata_file:
if args.password is False:
meta = Metadata()
else:
print("Please enter password: ")
pw = getpass.getpass()
meta = Metadata(password=pw)
meta.read(metadata_file)
slack = SuperblockSlack(device, meta, args.dev)
slack.read_into_file(args.outfile)
elif args.clear:
# clear superblock slack
with open(args.metadata, 'rb') as metadata_file:
if args.password is False:
meta = Metadata()
else:
print("Please enter password: ")
pw = getpass.getpass()
meta = Metadata(password=pw)
meta.read(metadata_file)
slack = SuperblockSlack(device, meta, args.dev)
slack.clear()
elif args.info:
# show info
with open(args.metadata, 'rb') as metadata_file:
if args.password is False:
meta = Metadata()
else:
print("Please enter password: ")
pw = getpass.getpass()
meta = Metadata(password=pw)
meta.read(metadata_file)
slack = SuperblockSlack(device, meta, args.dev)
slack.info()
def do_osd2(args: argparse.Namespace, device: typ.BinaryIO) -> None:
"""
handles osd2 subcommand execution
:param args: argparse.Namespace
:param device: stream of the filesystem
"""
if args.write:
if args.password is False:
osd2 = OSD2(device, Metadata(), args.dev)
else:
print("Please enter password: ")
pw = getpass.getpass()
osd2 = OSD2(device, Metadata(password=pw), args.dev)
if not args.file:
# write from stdin into osd2 fields
osd2.write(sys.stdin.buffer)
else:
# write from files into osd2 fields
with open(args.file, 'rb') as fstream:
osd2.write(fstream, args.file)
with open(args.metadata, 'wb+') as metadata_out:
osd2.metadata.write(metadata_out)
elif args.read:
# read hidden file to stdout
with open(args.metadata, 'rb') as metadata_file:
if args.password is False:
meta = Metadata()
else:
print("Please enter password: ")
pw = getpass.getpass()
meta = Metadata(password=pw)
meta.read(metadata_file)
osd2 = OSD2(device, meta, args.dev)
osd2.read(sys.stdout.buffer)
elif args.outfile:
# read hidden file into outfile
with open(args.metadata, 'rb') as metadata_file:
if args.password is False:
meta = Metadata()
else:
print("Please enter password: ")
pw = getpass.getpass()
meta = Metadata(password=pw)
meta.read(metadata_file)
osd2 = OSD2(device, meta, args.dev)
osd2.read_into_file(args.outfile)
elif args.clear:
# clear osd2 fields
with open(args.metadata, 'rb') as metadata_file:
if args.password is False:
meta = Metadata()
else:
print("Please enter password: ")
pw = getpass.getpass()
meta = Metadata(password=pw)
meta.read(metadata_file)
osd2 = OSD2(device, meta, args.dev)
osd2.clear()
elif args.info:
# show info
with open(args.metadata, 'rb') as metadata_file:
if args.password is False:
meta = Metadata()
else:
print("Please enter password: ")
pw = getpass.getpass()
meta = Metadata(password=pw)
meta.read(metadata_file)
osd2 = OSD2(device, meta, args.dev)
osd2.info()
def do_obso_faddr(args: argparse.Namespace, device: typ.BinaryIO) -> None:
"""
handles obso_faddr subcommand execution
:param args: argparse.Namespace
:param device: stream of the filesystem
"""
if args.write:
if args.password is False:
faddr = FADDR(device, Metadata(), args.dev)
else:
print("Please enter password: ")
pw = getpass.getpass()
faddr = FADDR(device, Metadata(password=pw), args.dev)
if not args.file:
# write from stdin into faddr fields
faddr.write(sys.stdin.buffer)
else:
# write from files into faddr fields
with open(args.file, 'rb') as fstream:
faddr.write(fstream, args.file)
with open(args.metadata, 'wb+') as metadata_out:
faddr.metadata.write(metadata_out)
elif args.read:
# read hidden file to stdout
with open(args.metadata, 'rb') as metadata_file:
if args.password is False:
meta = Metadata()
else:
print("Please enter password: ")
pw = getpass.getpass()
meta = Metadata(password=pw)
meta.read(metadata_file)
faddr = FADDR(device, meta, args.dev)
faddr.read(sys.stdout.buffer)
elif args.outfile:
# read hidden file into outfile
with open(args.metadata, 'rb') as metadata_file:
if args.password is False:
meta = Metadata()
else:
print("Please enter password: ")
pw = getpass.getpass()
meta = Metadata(password=pw)
meta.read(metadata_file)
faddr = FADDR(device, meta, args.dev)
faddr.read_into_file(args.outfile)
elif args.clear:
# clear faddr fields
with open(args.metadata, 'rb') as metadata_file:
if args.password is False:
meta = Metadata()
else:
print("Please enter password: ")
pw = getpass.getpass()
meta = Metadata(password=pw)
meta.read(metadata_file)
faddr = FADDR(device, meta, args.dev)
faddr.clear()
elif args.info:
# show info
with open(args.metadata, 'rb') as metadata_file:
if args.password is False:
meta = Metadata()
else:
print("Please enter password: ")
pw = getpass.getpass()
meta = Metadata(password=pw)
meta.read(metadata_file)
faddr = FADDR(device, meta, args.dev)
faddr.info()
def do_inode_padding(args: argparse.Namespace, device: typ.BinaryIO) -> None:
if args.write:
if args.password is False:
ipad = inodePadding(device, Metadata(), args.dev)
else:
print("Please enter password: ")
pw = getpass.getpass()
ipad = inodePadding(device, Metadata(password=pw), args.dev)
if not args.file:
ipad.write(sys.stdin.buffer)
else:
with open(args.file, 'rb') as fstream:
ipad.write(fstream, args.file)
with open(args.metadata, 'wb+') as metadata_out:
ipad.metadata.write(metadata_out)
elif args.read:
# read hidden file to stdout
with open(args.metadata, 'rb') as metadata_file:
if args.password is False:
meta = Metadata()
else:
print("Please enter password: ")
pw = getpass.getpass()
meta = Metadata(password=pw)
meta.read(metadata_file)
ipad = inodePadding(device, meta, args.dev)
ipad.read(sys.stdout.buffer)
elif args.outfile:
# read hidden file into outfile
with open(args.metadata, 'rb') as metadata_file:
if args.password is False:
meta = Metadata()
else:
print("Please enter password: ")
pw = getpass.getpass()
meta = Metadata(password=pw)
meta.read(metadata_file)
ipad = inodePadding(device, meta, args.dev)
ipad.read_into_file(args.outfile)
elif args.clear:
# clear faddr fields
with open(args.metadata, 'rb') as metadata_file:
if args.password is False:
meta = Metadata()
else:
print("Please enter password: ")
pw = getpass.getpass()
meta = Metadata(password=pw)
meta.read(metadata_file)
ipad = inodePadding(device, meta, args.dev)
ipad.clear()
def do_write_gen(args: argparse.Namespace, device: typ.BinaryIO) -> None:
if args.write:
if args.password is False:
wgen = write_gen(device, Metadata(), args.dev)
else:
print("Please enter password: ")
pw = getpass.getpass()
wgen = write_gen(device, Metadata(password=pw), args.dev)
if not args.file:
wgen.write(sys.stdin.buffer)
else:
with open(args.file, 'rb') as fstream:
wgen.write(fstream, args.file)
with open(args.metadata, 'wb+') as metadata_out:
wgen.metadata.write(metadata_out)
elif args.read:
# read hidden file to stdout
with open(args.metadata, 'rb') as metadata_file:
if args.password is False:
meta = Metadata()
else:
print("Please enter password: ")
pw = getpass.getpass()
meta = Metadata(password=pw)
meta.read(metadata_file)
wgen = write_gen(device, meta, args.dev)
wgen.read(sys.stdout.buffer)
elif args.outfile:
# read hidden file into outfile
with open(args.metadata, 'rb') as metadata_file:
if args.password is False:
meta = Metadata()
else:
print("Please enter password: ")
pw = getpass.getpass()
meta = Metadata(password = pw)
meta.read(metadata_file)
wgen = write_gen(device, meta, args.dev)
wgen.read_into_file(args.outfile)
elif args.clear:
# clear faddr fields
with open(args.metadata, 'rb') as metadata_file:
if args.password is False:
meta = Metadata()
else:
print("Please enter password: ")
pw = getpass.getpass()
meta = Metadata(password=pw)
meta.read(metadata_file)
wgen = write_gen(device, meta, args.dev)
wgen.clear()
def do_timestamp_hiding(args: argparse.Namespace, device: typ.BinaryIO) -> None:
if args.write:
if args.password is False:
timestamp = timestampHiding(device, Metadata(), args.dev)
else:
print("Please enter password: ")
pw = getpass.getpass()
timestamp = timestampHiding(device, Metadata(password=pw), args.dev)
if not args.file:
timestamp.write(sys.stdin.buffer)
else:
with open(args.file, 'rb') as fstream:
timestamp.write(fstream, args.file)
with open(args.metadata, 'wb+') as metadata_out:
timestamp.metadata.write(metadata_out)
elif args.read:
# read hidden file to stdout
with open(args.metadata, 'rb') as metadata_file:
if args.password is False:
meta = Metadata()
else:
print("Please enter password: ")
pw = getpass.getpass()
meta = Metadata(password=pw)
meta.read(metadata_file)
timestamp = timestampHiding(device, meta, args.dev)
timestamp.read(sys.stdout.buffer)
elif args.outfile:
# read hidden file into outfile
with open(args.metadata, 'rb') as metadata_file:
if args.password is False:
meta = Metadata()
else:
print("Please enter password: ")
pw = getpass.getpass()
meta = Metadata(password=pw)
meta.read(metadata_file)
timestamp = timestampHiding(device, meta, args.dev)
timestamp.read_into_file(args.outfile)
elif args.clear:
# clear faddr fields
with open(args.metadata, 'rb') as metadata_file:
if args.password is False:
meta = Metadata()
else:
print("Please enter password: ")
pw = getpass.getpass()
meta = Metadata(password=pw)
meta.read(metadata_file)
timestamp = timestampHiding(device, meta, args.dev)
timestamp.clear()
def do_xfield_padding(args: argparse.Namespace, device: typ.BinaryIO) -> None:
if args.write:
if args.password is False:
xfield = xfieldPadding(device, Metadata(), args.dev)
else:
print("Please enter password: ")
pw = getpass.getpass()
xfield = xfieldPadding(device, Metadata(password=pw), args.dev)
if not args.file:
xfield.write(sys.stdin.buffer)
else:
with open(args.file, 'rb') as fstream:
xfield.write(fstream, args.file)
with open(args.metadata, 'wb+') as metadata_out:
xfield.metadata.write(metadata_out)
elif args.read:
# read hidden file to stdout
with open(args.metadata, 'rb') as metadata_file:
if args.password is False:
meta = Metadata()
else:
print("Please enter password: ")
pw = getpass.getpass()
meta = Metadata(password=pw)
meta.read(metadata_file)
xfield = xfieldPadding(device, meta, args.dev)
xfield.read(sys.stdout.buffer)
elif args.outfile:
# read hidden file into outfile
with open(args.metadata, 'rb') as metadata_file:
if args.password is False:
meta = Metadata()
else:
print("Please enter password: ")
pw = getpass.getpass()
meta = Metadata(password=pw)
meta.read(metadata_file)
xfield = xfieldPadding(device, meta, args.dev)
xfield.read_into_file(args.outfile)
elif args.clear:
# clear faddr fields
with open(args.metadata, 'rb') as metadata_file:
if args.password is False:
meta = Metadata()
else:
print("Please enter password: ")
pw = getpass.getpass()
meta = Metadata(password=pw)
meta.read(metadata_file)
xfield = xfieldPadding(device, meta, args.dev)
xfield.clear()
def build_parser() -> argparse.ArgumentParser:
"""
Get the cli parser
:rtype: argparse.ArgumentParser
"""
parser = argparse.ArgumentParser(description='Toolkit for filesystem based data hiding techniques.')
# TODO: Maybe this option should be required for hiding technique
# subcommand but not for metadata.... needs more thoughs than I
# currently have
parser.set_defaults(which='no_arguments')
parser.add_argument('-d', '--device', dest='dev', required=False, help='Path to filesystem')
parser.add_argument('-p', '--password', dest='password', action='store_true', required=False, help='Password for encryption of metadata')
# TODO Maybe we should provide a more fine grained option to choose between different log levels
parser.add_argument('--verbose', '-v', action='count', help="Increase verbosity. Use it multiple times to increase verbosity further.")
subparsers = parser.add_subparsers(help='Hiding techniques sub-commands')
# FAT Tools
fatt = subparsers.add_parser('fattools', help='List statistics about FAT filesystem')
fatt.set_defaults(which='fattools')
fatt.add_argument('-l', '--ls', dest='list', type=int, metavar='CLUSTER_ID', help='List files under cluster id. Use 0 for root directory')
fatt.add_argument('-f', '--fat', dest='fat', action='store_true', help='List content of FAT')
fatt.add_argument('-i', '--info', dest='info', action='store_true', help='Show some information about the filesystem')
# Metadata info
metadata = subparsers.add_parser('metadata', help='list information about a metadata file')
metadata.set_defaults(which='metadata')
metadata.add_argument('-m', '--metadata', dest='metadata', type=argparse.FileType('rb'), help="filepath to metadata file")
# FileSlack
fileslack = subparsers.add_parser('fileslack', help='Operate on file slack')
fileslack.set_defaults(which='fileslack')
fileslack.add_argument('-d', '--dest', dest='destination', action='append', required=False, help='absolute path to file or directory on filesystem, directories will be parsed recursively')
fileslack.add_argument('-m', '--metadata', dest='metadata', required=True, help='Metadata file to use')
fileslack.add_argument('-r', '--read', dest='read', action='store_true', help='read hidden data from slackspace to stdout')
fileslack.add_argument('-o', '--outfile', dest='outfile', metavar='OUTFILE', help='read hidden data from slackspace to OUTFILE')
fileslack.add_argument('-w', '--write', dest='write', action='store_true', help='write to slackspace')
fileslack.add_argument('-c', '--clear', dest='clear', action='store_true', help='clear slackspace')
fileslack.add_argument('-i', '--info', dest='info', action='store_true', help='print file slack information of given files')
fileslack.add_argument('file', metavar='FILE', nargs='?', help="File to write into slack space, if nothing provided, use stdin")
# MftSlack
mftslack = subparsers.add_parser('mftslack', help='Operate on mft slack')
mftslack.set_defaults(which='mftslack')
mftslack.add_argument('-s', '--seek', dest='offset', default=0, type=int, required=False, help='sector offset to the start of the first mft entry to be used when hiding data. To avoid overwriting data use the "Next position" provided by the last execution of this module.')
mftslack.add_argument('-m', '--metadata', dest='metadata', required=True, help='Metadata file to use')
mftslack.add_argument('-r', '--read', dest='read', action='store_true', help='read hidden data from slackspace to stdout')
mftslack.add_argument('-o', '--outfile', dest='outfile', metavar='OUTFILE', help='read hidden data from slackspace to OUTFILE')
mftslack.add_argument('-w', '--write', dest='write', action='store_true', help='write to slackspace')
mftslack.add_argument('-c', '--clear', dest='clear', action='store_true', help='clear slackspace')
mftslack.add_argument('-d', '--domirr', dest='domirr', action='store_true', help='write copy of data to $MFTMirr. Avoids detection with chkdsk')
mftslack.add_argument('-i', '--info', dest='info', action='store_true', help='print mft slack information of entries in limit')
mftslack.add_argument('-l', '--limit', dest='limit', default=-1, type=int, required=False, help='limit the amount of mft entries to print information for when using the "--info" switch')
mftslack.add_argument('file', metavar='FILE', nargs='?', help="File to write into slack space, if nothing provided, use stdin")
# Additional Cluster Allocation
addcluster = subparsers.add_parser('addcluster', help='Allocate more clusters for a file')
addcluster.set_defaults(which='addcluster')
addcluster.add_argument('-d', '--dest', dest='destination', required=False, help='absolute path to file or directory on filesystem')
addcluster.add_argument('-m', '--metadata', dest='metadata', required=True, help='Metadata file to use')
addcluster.add_argument('-r', '--read', dest='read', action='store_true', help='read hidden data from allocated clusters to stdout')
addcluster.add_argument('-o', '--outfile', dest='outfile', metavar='OUTFILE', help='read hidden data from allocated clusters to OUTFILE')
addcluster.add_argument('-w', '--write', dest='write', action='store_true', help='write to additional allocated clusters')
addcluster.add_argument('-c', '--clear', dest='clear', action='store_true', help='clear allocated clusters')
addcluster.add_argument('file', metavar='FILE', nargs='?', help="File to write into additionally allocated clusters, if nothing provided, use stdin")
# Additional Cluster Allocation
badcluster = subparsers.add_parser('badcluster', help='Allocate more clusters for a file')
badcluster.set_defaults(which='badcluster')
badcluster.add_argument('-m', '--metadata', dest='metadata', required=True, help='Metadata file to use')
badcluster.add_argument('-r', '--read', dest='read', action='store_true', help='read hidden data from allocated clusters to stdout')
badcluster.add_argument('-o', '--outfile', dest='outfile', metavar='OUTFILE', help='read hidden data from allocated clusters to OUTFILE')
badcluster.add_argument('-w', '--write', dest='write', action='store_true', help='write to additional allocated clusters')
badcluster.add_argument('-c', '--clear', dest='clear', action='store_true', help='clear allocated clusters')
badcluster.add_argument('file', metavar='FILE', nargs='?', help="File to write into additionally allocated clusters, if nothing provided, use stdin")
# Reserved GDT blocks
reserved_gdt_blocks = subparsers.add_parser('reserved_gdt_blocks', help='hide data in reserved GDT blocks')
reserved_gdt_blocks.set_defaults(which='reserved_gdt_blocks')
reserved_gdt_blocks.add_argument('-m', '--metadata', dest='metadata', required=True, help='Metadata file to use')
reserved_gdt_blocks.add_argument('-r', '--read', dest='read', action='store_true', help='read hidden data from reserved GDT blocks to stdout')
reserved_gdt_blocks.add_argument('-o', '--outfile', dest='outfile', metavar='OUTFILE', help='read hidden data from reserved GDT blocks to OUTFILE')
reserved_gdt_blocks.add_argument('-w', '--write', dest='write', action='store_true', help='write to reserved GDT blocks')
reserved_gdt_blocks.add_argument('-c', '--clear', dest='clear', action='store_true', help='clear reserved GDT blocks')
reserved_gdt_blocks.add_argument('-i', '--info', dest='info', action='store_true', help='show infor1mation about reserved gdt')
reserved_gdt_blocks.add_argument('file', metavar='FILE', nargs='?', help="File to write into reserved GDT blocks, if nothing provided, use stdin")
# Superblock slack
superblock_slack = subparsers.add_parser('superblock_slack', help='hide data in superblock slack')
superblock_slack.set_defaults(which='superblock_slack')
superblock_slack.add_argument('-m', '--metadata', dest='metadata', required=True, help='Metadata file to use')
superblock_slack.add_argument('-r', '--read', dest='read', action='store_true', help='read hidden data from superblock slack to stdout')
superblock_slack.add_argument('-o', '--outfile', dest='outfile', metavar='OUTFILE', help='read hidden data from superblock slack to OUTFILE')
superblock_slack.add_argument('-w', '--write', dest='write', action='store_true', help='write to superblock slack')
superblock_slack.add_argument('-c', '--clear', dest='clear', action='store_true', help='clear superblock slack')
superblock_slack.add_argument('-i', '--info', dest='info', action='store_true', help='show information about superblock')
superblock_slack.add_argument('file', metavar='FILE', nargs='?', help="File to write into superblock slack, if nothing provided, use stdin")
# OSD2
osd2 = subparsers.add_parser('osd2', help='hide data in osd2 fields of inodes')
osd2.set_defaults(which='osd2')
osd2.add_argument('-m', '--metadata', dest='metadata', required=True, help='Metadata file to use')
osd2.add_argument('-r', '--read', dest='read', action='store_true', help='read hidden data from osd2 fields to stdout')
osd2.add_argument('-o', '--outfile', dest='outfile', metavar='OUTFILE', help='read hidden data from osd2 fields to OUTFILE')
osd2.add_argument('-w', '--write', dest='write', action='store_true', help='write to osd2 fields')
osd2.add_argument('-c', '--clear', dest='clear', action='store_true', help='clear osd2 fields')
osd2.add_argument('-i', '--info', dest='info', action='store_true', help='show information about osd2')
osd2.add_argument('file', metavar='FILE', nargs='?', help="File to write into osd2 fields, if nothing provided, use stdin")
# obso_faddr
obso_faddr = subparsers.add_parser('obso_faddr', help='hide data in obso_faddr fields of inodes')
obso_faddr.set_defaults(which='obso_faddr')
obso_faddr.add_argument('-m', '--metadata', dest='metadata', required=True, help='Metadata file to use')
obso_faddr.add_argument('-r', '--read', dest='read', action='store_true', help='read hidden data from obso_faddr fields to stdout')
obso_faddr.add_argument('-o', '--outfile', dest='outfile', metavar='OUTFILE', help='read hidden data from obso_faddr fields to OUTFILE')
obso_faddr.add_argument('-w', '--write', dest='write', action='store_true', help='write to obso_faddr fields')
obso_faddr.add_argument('-c', '--clear', dest='clear', action='store_true', help='clear obso_faddr fields')
obso_faddr.add_argument('-i', '--info', dest='info', action='store_true', help='show information about obso_faddr')
obso_faddr.add_argument('file', metavar='FILE', nargs='?', help="File to write into obso_faddr fields, if nothing provided, use stdin")
# inode Padding
inode_padding = subparsers.add_parser('inode_padding', help='hide data in padding fields of inodes')
inode_padding.set_defaults(which='inode_padding')
inode_padding.add_argument('-m', '--metadata', dest='metadata', required=True, help='Metadata file to use')
inode_padding.add_argument('-r', '--read', dest='read', action='store_true', help='read hidden data from padding fields to stdout')
inode_padding.add_argument('-o', '--outfile', dest='outfile', metavar='OUTFILE', help='read hidden data from padding fields to OUTFILE')
inode_padding.add_argument('-w', '--write', dest='write', action='store_true', help='write to padding fields')
inode_padding.add_argument('-c', '--clear', dest='clear', action='store_true', help='clear padding fields')
inode_padding.add_argument('file', metavar='FILE', nargs='?', help="File to write into padding fields, if nothing provided, use stdin")
# write gen
write_gen = subparsers.add_parser('write_gen', help='hide data in write_gen fields of inodes')
write_gen.set_defaults(which='write_gen')
write_gen.add_argument('-m', '--metadata', dest='metadata', required=True, help='Metadata file to use')
write_gen.add_argument('-r', '--read', dest='read', action='store_true', help='read hidden data from write_gen fields to stdout')
write_gen.add_argument('-o', '--outfile', dest='outfile', metavar='OUTFILE', help='read hidden data from write_gen fields to OUTFILE')
write_gen.add_argument('-w', '--write', dest='write', action='store_true', help='write to write_gen fields')
write_gen.add_argument('-c', '--clear', dest='clear', action='store_true', help='clear write_gen fields')
write_gen.add_argument('file', metavar='FILE', nargs='?', help="File to write into write_gen fields, if nothing provided, use stdin")
# timestamp hiding
timestamp = subparsers.add_parser('timestamp_hiding', help='hide data in inode timestamps')
timestamp.set_defaults(which='timestamp_hiding')
timestamp.add_argument('-m', '--metadata', dest='metadata', required=True, help='Metadata file to use')
timestamp.add_argument('-r', '--read', dest='read', action='store_true', help='read hidden data from timestamps to stdout')
timestamp.add_argument('-o', '--outfile', dest='outfile', metavar='OUTFILE', help='read hidden data from timestamps to OUTFILE')
timestamp.add_argument('-w', '--write', dest='write', action='store_true', help='write to timestamps')
timestamp.add_argument('-c', '--clear', dest='clear', action='store_true', help='clear timestamps')
timestamp.add_argument('file', metavar='FILE', nargs='?', help="File to write into timestamps, if nothing provided, use stdin")
# xfield padding
xfield = subparsers.add_parser('xfield_padding', help='hide data in inode extended fields')
xfield.set_defaults(which='xfield_padding')
xfield.add_argument('-m', '--metadata', dest='metadata', required=True, help='Metadata file to use')
xfield.add_argument('-r', '--read', dest='read', action='store_true', help='read hidden data from extended fields to stdout')
xfield.add_argument('-o', '--outfile', dest='outfile', metavar='OUTFILE', help='read hidden data from extended fields to OUTFILE')
xfield.add_argument('-w', '--write', dest='write', action='store_true', help='write to extended fields')
xfield.add_argument('-c', '--clear', dest='clear', action='store_true', help='clear extended fields')
xfield.add_argument('file', metavar='FILE', nargs='?', help="File to write into extended fields, if nothing provided, use stdin")
return parser
def main():
# set exception handler
sys.excepthook = general_excepthook
# Parse cli arguments
parser = build_parser()
args = parser.parse_args()
# Set logging level (verbosity)
if args.verbose is None: args.verbose = 0
if args.verbose == 1:
logging.basicConfig(level=logging.INFO)
elif args.verbose >= 2:
logging.basicConfig(level=logging.DEBUG)
if args.verbose > 2:
fish = """
.|_-
___.-´ /_.
.--´` `´`-,/ .
..--.-´-. ´-. /|
(o( o( o ) ./.
` ´ -
( `. /
-....-- .\ \--..- \\
`--´ -.-´ \.-
\|
"""
LOGGER.debug(fish)
LOGGER.debug("Thank you for debugging so hard! We know it is "
"a mess. So, here is a friend, who will support you :)")
# if 'metadata' was chosen
if args.which == 'no_arguments':
parser.print_help()
elif args.which == 'metadata':
do_metadata(args)
else:
with open(args.dev, 'rb+') as device:
# if 'fattools' was chosen
if args.which == "fattools":
do_fattools(args, device)
# if 'fileslack' was chosen
if args.which == 'fileslack':
do_fileslack(args, device)
# if 'mftslack' was chosen
if args.which == 'mftslack':
do_mftslack(args, device)
# if 'addcluster' was chosen
if args.which == 'addcluster':
do_addcluster(args, device)
# if 'badcluster' was chosen
if args.which == 'badcluster':
do_badcluster(args, device)
# if 'reserved_gdt_blocks' was chosen
if args.which == 'reserved_gdt_blocks':
do_reserved_gdt_blocks(args, device)
# if 'osd2' was chosen
if args.which == "osd2":
do_osd2(args, device)
# if 'obso_faddr' was chosen
if args.which == "obso_faddr":
do_obso_faddr(args, device)
# if 'inode_padding' was chosen
if args.which == "inode_padding":
do_inode_padding(args, device)
# if 'timestamp_hiding' was chosen
if args.which == "timestamp_hiding":
do_timestamp_hiding(args, device)
# if 'xfield_padding' was chosen
if args.which == "xfield_padding":
do_xfield_padding(args, device)
# if 'write_gen' was chosen
if args.which == "write_gen":
do_write_gen(args, device)
# if 'superblock_slack' was chosen
if args.which == 'superblock_slack':
do_superblock_slack(args,device)
def general_excepthook(errtype, value, tb):
"""
This function serves as a general exception handler, who catches all
exceptions, that were not handled at a higher lever
"""
LOGGER.critical("Error: %s: %s.", errtype, value)
LOGGER.info("".join(traceback.format_exception(type, value, tb)))
sys.exit(1)
if __name__ == "__main__":
main() | [
"noreply@github.com"
] | noreply@github.com |
913b22b55b2b66dd81a565174d6e79ac4d9ded03 | 7398b8196c769af2bb84f0c1e3e079c7c9bf0c22 | /Measurement Converter.py | 4f4a50715d39e3462e94bb4454942a540ea80674 | [] | no_license | Inglaciela/PastaPython | e15576da7d1f9fe45e361b7c6ab054982801160a | cc31d9f20c128c2ac5361e30ec6eb64bbdfa02d5 | refs/heads/main | 2023-08-27T10:18:53.959821 | 2021-10-25T16:09:08 | 2021-10-25T16:09:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 558 | py | medida = float(input('Uma distancia em metros:'))
cm = medida * 100
mm = medida *1000
print('A medida de {} m corresponde a {} cm e {} mm'.format(medida, cm, mm))
#NA PRIMEIRA E SEGUNDA {} PODE SER COLOCADO :.0f PARA NUM FLOAT SEM CASAS DECIMAS
#PARA CALCULAR Quilómetro(km), Hectómetro(hm), Decametro(dam), Decímetro(dm), Centímetro(cm), Milímetro(mm)
#m = float(input('Uma distancia em metros:'))
#print('A medida de {}m corresponde a \n{}km \n{}hm \n{}dam \n{}dm \n{}cm \n{}mm '.format(m,m/1000,m/100,m/10,m*10,m*100,m*1000))
| [
"noreply@github.com"
] | noreply@github.com |
a7e437ece982cc2ec71e49e9dc3b0d2230b15089 | 2d2aaf6b93bca99443463046c2f391588533bf7c | /lesson_8/instagram/settings.py | 903900de867961c74f12d01e24985226499ed577 | [] | no_license | chernova-ann/Data-collection-and-processing-methods | db86739c6876edcf2bb4c241444d9525bf72e395 | 238d4528f5ea108925a39f2d64a0a05e42f719aa | refs/heads/master | 2022-12-06T15:17:42.636713 | 2020-09-02T18:05:35 | 2020-09-02T18:05:35 | 282,038,579 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,219 | py | # Scrapy settings for instagram project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'instagram'
SPIDER_MODULES = ['instagram.spiders']
NEWSPIDER_MODULE = 'instagram.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:80.0) Gecko/20100101 Firefox/80.0'
LOG_ENABLED = True
LOG_LEVEL = 'DEBUG'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
DOWNLOAD_DELAY = 1.25
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
SPIDER_MIDDLEWARES = {
'instagram.middlewares.InstagramSpiderMiddleware': 543,
}
# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
'instagram.middlewares.InstagramDownloaderMiddleware': 543,
'instagram.middlewares.TooManyRequestsRetryMiddleware': 200
}
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'instagram.pipelines.InstagramPipeline': 300,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
AUTOTHROTTLE_ENABLED = True
# The initial download delay
AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
AUTOTHROTTLE_DEBUG = True
# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| [
"noreply@github.com"
] | noreply@github.com |
c4cbbf7636695a0ca4c879867e213c7d40e40e58 | a4f3eeecaac5d1cd43954997d1faffb98103aa89 | /topperProject/topperProject/topperApp/migrations/0003_addalbum_rating.py | 90d7f6be6972977b7a61d63568bab4a93d2bb9ce | [] | no_license | Ben-Stevenson/DjangoFullStack | f99874ba01e95feff0b235af7414beab77a1809d | 9b6425a38998c919d37087154fbb747aa6de08eb | refs/heads/master | 2022-11-27T20:44:20.626933 | 2020-08-05T14:54:45 | 2020-08-05T14:54:45 | 285,320,566 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 583 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-23 17:23
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('topperApp', '0002_addreview_rating'),
]
operations = [
migrations.AddField(
model_name='addalbum',
name='rating',
field=models.IntegerField(default=1, validators=[django.core.validators.MaxValueValidator(5), django.core.validators.MinValueValidator(1)]),
),
]
| [
"bww.stevenson@btinternet.com"
] | bww.stevenson@btinternet.com |
dae9dc485e3fb180f377368fb642b0eeeb1004c6 | 1640189b5bf78114e2749a8ed1216e099bae9814 | /src/xmlsec/rsa_x509_pem/pyasn1/debug.py | 5aa42ced36ef65aadacddb629cebd74977b9d1a4 | [
"BSD-2-Clause",
"BSD-3-Clause"
] | permissive | hfalcic/pyXMLSecurity | fb69cce12c1b417928d85b91a4c3dc87f46935ec | b29a68e6d21a0485b9190be45d532b9042fdc918 | refs/heads/master | 2020-04-03T13:19:13.016532 | 2014-07-08T17:57:55 | 2014-07-08T17:57:55 | 21,471,398 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,512 | py | import sys
from .compat.octets import octs2ints
from . import error
from . import __version__
flagNone = 0x0000
flagEncoder = 0x0001
flagDecoder = 0x0002
flagAll = 0xffff
flagMap = {
'encoder': flagEncoder,
'decoder': flagDecoder,
'all': flagAll
}
class Debug:
defaultPrinter = sys.stderr.write
def __init__(self, *flags):
self._flags = flagNone
self._printer = self.defaultPrinter
self('running pyasn1 version %s' % __version__)
for f in flags:
if f not in flagMap:
raise error.PyAsn1Error('bad debug flag %s' % (f,))
self._flags = self._flags | flagMap[f]
self('debug category \'%s\' enabled' % f)
def __str__(self):
return 'logger %s, flags %x' % (self._printer, self._flags)
def __call__(self, msg):
self._printer('DBG: %s\n' % msg)
def __and__(self, flag):
return self._flags & flag
def __rand__(self, flag):
return flag & self._flags
logger = 0
def setLogger(l):
global logger
logger = l
def hexdump(octets):
return ' '.join(
[ '%s%.2X' % (n%16 == 0 and ('\n%.5d: ' % n) or '', x)
for n,x in zip(range(len(octets)), octs2ints(octets)) ]
)
class Scope:
def __init__(self):
self._list = []
def __str__(self): return '.'.join(self._list)
def push(self, token):
self._list.append(token)
def pop(self):
return self._list.pop()
scope = Scope()
| [
"harvey.falcic@gmail.com"
] | harvey.falcic@gmail.com |
1bab025bfdd198402464361de2c9ab6092f42c10 | fb90c54f2b4f3d852a9380f23b837c9b79af9656 | /sort_calendar_mar_15.py | babce2930d0e4bdcc46ba57e3584e804347f1746 | [] | no_license | Daniel-Chin/airbnb | 0cd05209049e123dba763b32e7c55ae7688ccef7 | fddfd3861292fd6a191a76b969df533c67933119 | refs/heads/master | 2021-03-23T21:23:33.374301 | 2020-11-17T13:11:41 | 2020-11-17T13:11:41 | 247,485,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,349 | py | import csv
import datetime
def distribute():
with open('raw/calendar_mar_15.csv', 'r') as f:
now_id = None
outF = None
all_id = set()
try:
c = csv.reader(f)
head = next(c)
for line in c:
id = line[0]
if id != now_id:
if int(id) % 16 == 0: print(id)
now_id = id
if outF is not None:
outF.flush()
outF.close()
if id in all_id:
outF = open('data_mar/' + id + '.csv', 'a', newline = '')
out = csv.writer(outF)
else:
outF = open('data_mar/' + id + '.csv', 'w+', newline = '')
out = csv.writer(outF)
out.writerow(head)
all_id.add(id)
out.writerow(line)
finally:
if outF is not None:
outF.close()
def check():
import os
list_dir = os.listdir('data_mar/')
set_dir = set()
for filename in list_dir:
set_dir.add(filename.split('.')[0])
del list_dir
print('step 2')
with open('raw/calendar_mar_15.csv', 'r') as f:
c = csv.reader(f)
next(c)
for line in c:
id = line[0]
assert id in set_dir
print('ok')
def sortFile(filename):
with open('data_mar/' + filename, 'r') as f:
c = csv.reader(f)
head = next(c)
list_dates = []
dict_data = {}
for line in c:
str_date = line[1]
date = datetime.datetime.strptime(str_date, '%Y-%m-%d')
list_dates.append(date)
dict_data[date] = line
list_dates.sort()
with open('data_mar/' + filename, 'w', newline='') as f:
c = csv.writer(f)
c.writerow(head)
for date in list_dates:
c.writerow(dict_data[date])
def checkFileSort(filename):
list_dates = []
with open('data_mar/' + filename, 'r') as f:
c = csv.reader(f)
_ = next(c)
for line in c:
str_date = line[1]
date = datetime.datetime.strptime(str_date, '%Y-%m-%d')
list_dates.append(date)
assert sorted(list_dates) == list_dates
def checkAll():
import os
list_dir = os.listdir('data_mar/')
len_list_dir = len(list_dir)
for i, filename in enumerate(list_dir):
checkFileSort(filename)
if i % 8 == 0:
print(i / len_list_dir)
print('ok')
print('Will distribute calendar_mar_15.csv to ./data_mar/')
assert(input('Files could be overwritten. Type "YES" to go: ') == 'YES')
distribute()
| [
"daniel_chin@yahoo.com"
] | daniel_chin@yahoo.com |
c9cbfca3f4c84cb5e219730e43194e7238cda653 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/358/usersdata/296/102792/submittedfiles/estatistica.py | 79ada402f0ec9ec03a71780b75717f4fa32662f5 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,036 | py | # -*- coding: utf-8 -*-
def media(lista):
soma = 0
for i in range(0,len(lista),1):
soma = soma + lista[i]
resultado = soma/len(lista)
return resultado
def media(lista):
media = sum(lista)/len(lista)
return media
def desvio_padrao(lista):
somatorio = 0
for i in range (0,len(lista),1):
somatorio = ((media(lista)-lista[i])**2) + somatorio
desvio = (somatorio/(n-1))**0.5
return desvio
m = int(input("Digite o número da lista: "))
n = int(input("Digite o número de elementos de cada lista: "))
matriz=[]
for i in range (0,m,1):
matriz_linha=[]
for j in range (0,n,1):
matriz_linha.append(int(input("Digite o elemento (%d,%d): "%(i+1,j+1))))
matriz.append(matriz_linha)
for i in range (0,m,1):
print(media(matriz[i]))
print("%.2f"%(desvio_padrao(matriz[i])))
#Baseado na função acima, escreva a função para calcular o desvio padrão de uma lista
#Por último escreva o programa principal, que pede a entrada e chama as funções criadas. | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
1dd263aa244648cfd3f1c586ba8b042eae7e4f39 | cb1d888146c36d2be4517baf34ffaabe00082afa | /prog/__main__.py | fa47c9db6bcf5392c943e308a23795e4e62e16c3 | [] | no_license | Cheeel666/BMSTU_CP_CG | 37d555d654c8d77f1b863ad4a89856f85bab883d | 0cde22840233f67d34d4ef0681c28e8f38205c61 | refs/heads/master | 2023-04-30T03:55:28.331290 | 2021-05-21T21:45:12 | 2021-05-21T21:45:12 | 320,270,779 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,512 | py | from src.model import NLM
from tkinter import *
class Window:
def __init__(self):
self.window = Tk()
self.window.title("NLM")
self.window.geometry('500x280')
self.lbl = Label(self.window, text="Введите название изображения:")
self.lbl.grid(column=0, row=0)
self.entry = Entry(self.window, width = 50)
self.entry.grid(column = 0, row = 1)
self.lbl1 = Label(self.window, text="Введите радиус участка:")
self.lbl1.grid(column = 0, row = 2)
self.entry1 = Entry(self.window, width = 50)
self.entry1.grid(column = 0, row = 3)
self.lbl2 = Label(self.window, text="Введите радиус окна:")
self.lbl2.grid(column = 0, row = 4)
self.entry2 = Entry(self.window, width = 50)
self.entry2.grid(column = 0, row = 5)
self.lbl3 = Label(self.window, text="Введите сигму:")
self.lbl3.grid(column = 0, row = 6)
self.entry3 = Entry(self.window, width = 50)
self.entry3.grid(column = 0, row = 7)
self.btn = Button(self.window, text="Ввод", width = 50, command = self.submit)
self.btn.grid(column=0, row=8)
self.window.mainloop()
def submit(self):
t = self.entry.get()
t1 = self.entry1.get()
t2 = self.entry2.get()
t3 = self.entry3.get()
arr = [t,t1,t2,t3]
NLM.setup(arr).run()
if __name__ == '__main__':
win = Window()
| [
"IChelyadinov"
] | IChelyadinov |
60f59e3d60f1e9f3547b430a782fc7a3067456ac | 582e74cace57aae609522f803776fc7c784fb203 | /Crypto/Decrypt.py | 6a202df9335c38485f11f4c4c46abe6819394c1c | [] | no_license | tirelesslearner-1901/MiniSpy | 5d8691e1e0e62e22ddfd34617b2f4372132c93da | b74e393d339c75e39de28f8e2a8deb28f31e9d95 | refs/heads/main | 2023-08-22T20:31:02.295392 | 2021-10-16T08:36:19 | 2021-10-16T08:36:19 | 417,756,656 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 515 | py | from cryptography.fernet import Fernet
key = " "
system_info_e = "e_system.txt"
clipboard_info_e = "e_clipboard.txt"
keys_info_e = "e_keys_log.txt"
encrypted_files = [system_info_e,clipboard_info_e,keys_info_e]
count = 0
for decryption in encrypted_files:
with open(encrypted_files[count], 'rb') as f:
data = f.read()
fernet = Fernet(key)
decrypted = fernet.encrypt(data)
with open(encrypted_files[count], 'wb') as f:
f.write(decrypted)
count += 1 | [
"noreply@github.com"
] | noreply@github.com |
19b0f1f8a7c09dc649e0cad037b8f1d8ebb8b242 | 81579ecd0678d652bbb57ff97529631fcfb74b12 | /corehq/motech/openmrs/tests/test_repeater_helpers.py | 83282f2a7427ec1fa865d3dd1356587c444939b9 | [
"BSD-3-Clause"
] | permissive | dungeonmaster51/commcare-hq | 64fece73671b03c1bca48cb9d1a58764d92796ea | 1c70ce416564efa496fb4ef6e9130c188aea0f40 | refs/heads/master | 2022-12-03T21:50:26.035495 | 2020-08-11T07:34:59 | 2020-08-11T07:34:59 | 279,546,551 | 1 | 0 | BSD-3-Clause | 2020-07-31T06:13:03 | 2020-07-14T09:51:32 | Python | UTF-8 | Python | false | false | 1,089 | py | from unittest import skip
from nose.tools import assert_regexp_matches
from corehq.motech.auth import BasicAuthManager
from corehq.motech.openmrs.repeater_helpers import generate_identifier
from corehq.motech.requests import Requests
DOMAIN = 'openmrs-test'
BASE_URL = 'https://demo.mybahmni.org/openmrs/'
USERNAME = 'superman'
PASSWORD = 'Admin123'
# Patient identifier type for use by the Bahmni Registration System
# https://demo.mybahmni.org/openmrs/admin/patients/patientIdentifierType.form?patientIdentifierTypeId=3
IDENTIFIER_TYPE = '81433852-3f10-11e4-adec-0800271c1b75'
@skip('Uses third-party web services')
def test_generate_identifier():
auth_manager = BasicAuthManager(USERNAME, PASSWORD)
requests = Requests(
DOMAIN,
BASE_URL,
verify=False, # demo.mybahmni.org uses a self-issued cert
auth_manager=auth_manager,
logger=dummy_logger,
)
identifier = generate_identifier(requests, IDENTIFIER_TYPE)
assert_regexp_matches(identifier, r'^BAH\d{6}$') # e.g. BAH203001
def dummy_logger(*args, **kwargs):
pass
| [
"nhooper@dimagi.com"
] | nhooper@dimagi.com |
d157dc7adafb7e4b11b40e1cb6df040e6aa52c00 | b06f4dc6b1703f7e05026f2f3ff87ab776105b18 | /google/cloud/logging_v2/services/logging_service_v2/transports/grpc_asyncio.py | 1f33ad78a14fb422539ecb389430bec68882bee1 | [
"Apache-2.0"
] | permissive | anilit99/python-logging | 040eeb4eed7abe92757b285a1047db09242e89c9 | 9307ad72cb5a6d524ed79613a05858dbf88cc156 | refs/heads/master | 2023-06-01T04:49:38.975408 | 2021-06-17T10:52:15 | 2021-06-17T10:52:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,347 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1 # type: ignore
from google.api_core import grpc_helpers_async # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import packaging.version
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.logging_v2.types import logging
from google.protobuf import empty_pb2 # type: ignore
from .base import LoggingServiceV2Transport, DEFAULT_CLIENT_INFO
from .grpc import LoggingServiceV2GrpcTransport
class LoggingServiceV2GrpcAsyncIOTransport(LoggingServiceV2Transport):
"""gRPC AsyncIO backend transport for LoggingServiceV2.
Service for ingesting and querying logs.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(
cls,
host: str = "logging.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
self_signed_jwt_kwargs = cls._get_self_signed_jwt_kwargs(host, scopes)
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
**self_signed_jwt_kwargs,
**kwargs,
)
def __init__(
self,
*,
host: str = "logging.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def delete_log(
self,
) -> Callable[[logging.DeleteLogRequest], Awaitable[empty_pb2.Empty]]:
r"""Return a callable for the delete log method over gRPC.
Deletes all the log entries in a log. The log
reappears if it receives new entries. Log entries
written shortly before the delete operation might not be
deleted. Entries received after the delete operation
with a timestamp before the operation will be deleted.
Returns:
Callable[[~.DeleteLogRequest],
Awaitable[~.Empty]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_log" not in self._stubs:
self._stubs["delete_log"] = self.grpc_channel.unary_unary(
"/google.logging.v2.LoggingServiceV2/DeleteLog",
request_serializer=logging.DeleteLogRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs["delete_log"]
@property
def write_log_entries(
self,
) -> Callable[
[logging.WriteLogEntriesRequest], Awaitable[logging.WriteLogEntriesResponse]
]:
r"""Return a callable for the write log entries method over gRPC.
Writes log entries to Logging. This API method is the
only way to send log entries to Logging. This method is
used, directly or indirectly, by the Logging agent
(fluentd) and all logging libraries configured to use
Logging. A single request may contain log entries for a
maximum of 1000 different resources (projects,
organizations, billing accounts or folders)
Returns:
Callable[[~.WriteLogEntriesRequest],
Awaitable[~.WriteLogEntriesResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "write_log_entries" not in self._stubs:
self._stubs["write_log_entries"] = self.grpc_channel.unary_unary(
"/google.logging.v2.LoggingServiceV2/WriteLogEntries",
request_serializer=logging.WriteLogEntriesRequest.serialize,
response_deserializer=logging.WriteLogEntriesResponse.deserialize,
)
return self._stubs["write_log_entries"]
@property
def list_log_entries(
self,
) -> Callable[
[logging.ListLogEntriesRequest], Awaitable[logging.ListLogEntriesResponse]
]:
r"""Return a callable for the list log entries method over gRPC.
Lists log entries. Use this method to retrieve log entries that
originated from a project/folder/organization/billing account.
For ways to export log entries, see `Exporting
Logs <https://cloud.google.com/logging/docs/export>`__.
Returns:
Callable[[~.ListLogEntriesRequest],
Awaitable[~.ListLogEntriesResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_log_entries" not in self._stubs:
self._stubs["list_log_entries"] = self.grpc_channel.unary_unary(
"/google.logging.v2.LoggingServiceV2/ListLogEntries",
request_serializer=logging.ListLogEntriesRequest.serialize,
response_deserializer=logging.ListLogEntriesResponse.deserialize,
)
return self._stubs["list_log_entries"]
@property
def list_monitored_resource_descriptors(
self,
) -> Callable[
[logging.ListMonitoredResourceDescriptorsRequest],
Awaitable[logging.ListMonitoredResourceDescriptorsResponse],
]:
r"""Return a callable for the list monitored resource
descriptors method over gRPC.
Lists the descriptors for monitored resource types
used by Logging.
Returns:
Callable[[~.ListMonitoredResourceDescriptorsRequest],
Awaitable[~.ListMonitoredResourceDescriptorsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_monitored_resource_descriptors" not in self._stubs:
self._stubs[
"list_monitored_resource_descriptors"
] = self.grpc_channel.unary_unary(
"/google.logging.v2.LoggingServiceV2/ListMonitoredResourceDescriptors",
request_serializer=logging.ListMonitoredResourceDescriptorsRequest.serialize,
response_deserializer=logging.ListMonitoredResourceDescriptorsResponse.deserialize,
)
return self._stubs["list_monitored_resource_descriptors"]
@property
def list_logs(
self,
) -> Callable[[logging.ListLogsRequest], Awaitable[logging.ListLogsResponse]]:
r"""Return a callable for the list logs method over gRPC.
Lists the logs in projects, organizations, folders,
or billing accounts. Only logs that have entries are
listed.
Returns:
Callable[[~.ListLogsRequest],
Awaitable[~.ListLogsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_logs" not in self._stubs:
self._stubs["list_logs"] = self.grpc_channel.unary_unary(
"/google.logging.v2.LoggingServiceV2/ListLogs",
request_serializer=logging.ListLogsRequest.serialize,
response_deserializer=logging.ListLogsResponse.deserialize,
)
return self._stubs["list_logs"]
@property
def tail_log_entries(
self,
) -> Callable[
[logging.TailLogEntriesRequest], Awaitable[logging.TailLogEntriesResponse]
]:
r"""Return a callable for the tail log entries method over gRPC.
Streaming read of log entries as they are ingested.
Until the stream is terminated, it will continue reading
logs.
Returns:
Callable[[~.TailLogEntriesRequest],
Awaitable[~.TailLogEntriesResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "tail_log_entries" not in self._stubs:
self._stubs["tail_log_entries"] = self.grpc_channel.stream_stream(
"/google.logging.v2.LoggingServiceV2/TailLogEntries",
request_serializer=logging.TailLogEntriesRequest.serialize,
response_deserializer=logging.TailLogEntriesResponse.deserialize,
)
return self._stubs["tail_log_entries"]
__all__ = ("LoggingServiceV2GrpcAsyncIOTransport",)
| [
"noreply@github.com"
] | noreply@github.com |
585eae1b49fd2617ff07cd0c7343294fbf78333c | 96d9dc3f29661b1fbc732d6ae054eccb97e18843 | /ifind | a9971082afeb319cde78a1f9150a150dc3cc049c | [
"CC-BY-4.0",
"CC-BY-3.0"
] | permissive | mtxstarship/mattutils | 5ccd990fdb6252a9f318c6773f058a4dc763633b | 703df7a464e047399b3915b77241b73148502a1b | refs/heads/master | 2020-07-04T10:55:38.157363 | 2016-11-16T06:45:37 | 2016-11-16T06:45:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,937 | #!/usr/bin/env python3
'''
An ncurses wrapper around `find`.
'''
import curses, grp, os, pwd, random, shutil, stat, subprocess, sys, tempfile, time
EDITOR = os.environ.get('EDITOR', 'vim')
PDF_VIEWER = 'evince'
VIEWER = os.environ.get('VIEWER', 'view')
MPLAYABLE = ['.mp3', '.ogg', '.ogv', '.mp4', '.avi', '.flv', '.flac', '.wav', '.m4a']
class Item(object):
def __init__(self, path):
self.path = path.decode('utf-8')
self.marked = False
buf = os.stat(path)
self.permissions = str(stat.filemode(buf.st_mode))
self.hardlinks = buf.st_nlink
self.owner = pwd.getpwuid(buf.st_uid).pw_name[:8].ljust(8)
self.group = grp.getgrgid(buf.st_gid).gr_name[:8].ljust(8)
self.size = buf.st_size
self.timestamp = time.ctime(buf.st_mtime)
def init():
'''Initialise curses'''
stdscr = curses.initscr()
curses.start_color()
curses.noecho()
curses.cbreak()
stdscr.keypad(1)
curses.curs_set(0)
return stdscr
def deinit(screen):
curses.nocbreak()
screen.keypad(0)
curses.echo()
curses.endwin()
def refresh(screen, found, errors, selected):
height, width = screen.getmaxyx()
limit = height - 4
page = int(selected / limit)
screen.clear()
screen.addstr(1, 1, ' '.join(sys.argv), curses.A_BOLD)
for i, f in enumerate(found[page * limit:(page + 1) * limit]):
style = curses.A_REVERSE if page * limit + i == selected else 0
screen.addstr(2 + i, 2, '[%s] ' % ('X' if f.marked else ' '), style)
screen.addstr(2 + i, 6, ('%(permissions)s %(hardlinks)2d %(owner)s %(group)s %(size)6d %(timestamp)s %(path)s' % f.__dict__)[:width - 8].ljust(width - 8),
style)
screen.addstr(height - 2, 1, 'a = select all | d = delete | e = edit | n = select none | q = quit | r = -exec | s = shuffle | v = view | x = execute | space = mark/unmark',
curses.A_BOLD)
if errors > 0:
warning = '%d errors' % errors
curses.init_pair(1, curses.COLOR_RED, curses.COLOR_BLACK)
screen.addstr(height - 2, width - len(warning) - 1, warning,
curses.A_BOLD|curses.color_pair(1))
if page > 0 and len(found) > limit:
screen.addstr(2, width - 3, '/\\')
if (page + 1) * limit < len(found):
screen.addstr(2 + limit, width - 3, '\\/')
screen.refresh()
def run(commands, screen):
deinit(screen)
for i, c in enumerate(commands):
print('Running %s (%d/%d)...' % \
(' '.join(c), i + 1, len(commands)))
try:
subprocess.call(c)
except:
pass
scr = init()
return scr
def get_files(found, selected):
if found[selected].marked:
return [x for x in found if x.marked]
return [found[selected]]
def main():
print('Running %s...' % ' '.join(sys.argv))
p = subprocess.Popen(['find'] + sys.argv[1:] + ['-print0'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if stderr:
print(stderr.decode('utf-8'), end='', file=sys.stderr)
found = list(map(Item, [x for x in stdout.split(b'\x00') if x != b'']))
errors = stderr.count(b'\n')
selected = 0
if len(found) == 0:
if errors:
print(stderr, end='', file=sys.stderr)
return -1
print('nothing relevant found')
return 0
scr = init()
while True:
refresh(scr, found, errors, selected)
c = scr.getch()
if c == ord('a'):
for f in found:
f.marked = True
elif c == ord('d'):
files = get_files(found, selected)
deinit(scr)
confirm = input('The following will be removed:\n %s\nAre you sure [y/N]? ' \
% '\n '.join([x.path for x in files]))
if confirm == 'y':
for f in files:
if os.path.isdir(f.path):
shutil.rmtree(f.path)
else:
os.remove(f.path)
scr = init()
elif c == ord('e'):
files = get_files(found, selected)
cmds = []
for f in files:
ext = os.path.splitext(f.path)[1].lower()
if ext in ['.mp3', '.ogg', '.flac', '.wav']:
prog = ['audacity']
else:
prog = [EDITOR]
cmds.append(prog + [f.path])
scr = run(cmds, scr)
elif c == ord('n'):
for f in found:
f.marked = False
elif c == ord('q'):
break
elif c == ord('r'):
files = get_files(found, selected)
deinit(scr)
base = input('command: ')
if base:
for i, cmd in enumerate([base.replace('{}', x.path) for x in files]):
print('Running %s (%d/%d)...' % (cmd, i + 1, len(files)))
try:
subprocess.call(cmd, shell=True)
except:
pass
input(' -- Done -- ')
scr = init()
elif c == ord('s'):
random.shuffle(found)
elif c == ord('v'):
files = get_files(found, selected)
files = [(f, os.path.splitext(f.path)[1].lower()) for f in files]
cmds = []
playlist = None
if len(files) == len(list(filter(lambda x: x[1] in MPLAYABLE, files))):
# Everything will be opened by mplayer, so put it in a playlist
# so the user can easily navigate within mplayer.
_, playlist = tempfile.mkstemp()
with open(playlist, 'w') as p:
p.write('\n'.join([os.path.abspath(f[0].path) for f in files]))
cmds.append(['mplayer', '-fs', '-playlist', playlist])
else:
for f in files:
ext = f[1]
if ext == '.pdf':
prog = [PDF_VIEWER]
elif ext in MPLAYABLE:
prog = ['mplayer', '-fs']
else:
prog = [VIEWER]
cmds.append(prog + [f[0].path])
scr = run(cmds, scr)
if playlist is not None:
# Delete the temporary playlist we created.
os.remove(playlist)
elif c == ord('x'):
files = get_files(found, selected)
scr = run([[x.path] for x in files], scr)
elif c == ord(' '):
found[selected].marked = not found[selected].marked
elif c == curses.KEY_UP:
if selected > 0:
selected -= 1
elif c == curses.KEY_DOWN:
if selected < len(found) - 1:
selected += 1
deinit(scr)
return 0
if __name__ == '__main__':
sys.exit(main())
| [
"matthew.fernandez@gmail.com"
] | matthew.fernandez@gmail.com | |
080a6e7e8f5e0c897c92846f23df703ff1cf81f0 | a8750439f200e4efc11715df797489f30e9828c6 | /LeetCodeContests/87/845_longest_mountain.py | 141d5018c884992b88f6afeddcd2fd5ae122f0db | [] | no_license | rajlath/rkl_codes | f657174305dc85c3fa07a6fff1c7c31cfe6e2f89 | d4bcee3df2f501349feed7a26ef9828573aff873 | refs/heads/master | 2023-02-21T10:16:35.800612 | 2021-01-27T11:43:34 | 2021-01-27T11:43:34 | 110,989,354 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,187 | py | '''
Let's call any (contiguous) subarray B (of A) a mountain if the following properties hold:
B.length >= 3
There exists some 0 < i < B.length - 1 such that B[0] < B[1] < ... B[i-1] < B[i] > B[i+1] > ... > B[B.length - 1]
(Note that B could be any subarray of A, including the entire array A.)
Given an array A of integers, return the length of the longest mountain.
Return 0 if there is no mountain.
Example 1:
Input: [2,1,4,7,3,2,5]
Output: 5
Explanation: The largest mountain is [1,4,7,3,2] which has length 5.
Example 2:
Input: [2,2,2]
Output: 0
Explanation: There is no mountain.
Note:
0 <= A.length <= 10000
0 <= A[i] <= 10000
'''
class Solution:
def longestMountain(self, A):
"""
:type A: List[int]
:rtype: int
"""
size = len(A)
ans = 0
for i in range(1, size-1):
if A[i] > A[i-1] and A[i] > A[i+1]:
l = i - 1
r = i + 1
while l > 0 and A[l] > A[l-1]: l-= 1
while r < size-1 and A[r] > A[r+1]: r +=1
ans = max(ans, r - l + 1)
return ans
sol = Solution()
print(sol.longestMountain([0,1,2,3,4,5,4,3,2,1,0]))
| [
"raj.lath@gmail.com"
] | raj.lath@gmail.com |
2821a21f0162a4a77a2f98f168a897b8996859f9 | c75a22d65a2cc8afe905ef73ae9d5db7556ac2d6 | /python/Binary Tree/bst_recursion.py | 4a5f1db2e3ec076322b9a25f28e07ff267e431da | [
"MIT"
] | permissive | SoumyaMalgonde/AlgoBook | 2956885fe69451a61f03c8efc1607d6d88926ac9 | b21386e54a49d03c74f27eee9818b6d2a715371d | refs/heads/master | 2023-01-05T01:14:39.054932 | 2020-10-29T09:45:43 | 2020-10-29T09:45:43 | 308,100,608 | 3 | 0 | MIT | 2020-10-28T18:18:06 | 2020-10-28T18:02:55 | Jupyter Notebook | UTF-8 | Python | false | false | 2,248 | py | class Node:
def __init__(self,key):
self.left = None
self.right = None
self.value = key
def insert(root,key):
if root is None:
return Node(key)
else:
if root.value == key:
print("Value already exists")
return root
elif root.value < key:
root.right = insert(root.right, key)
else:
root.left = insert(root.left, key)
return root
def inorder(root):
if root is not None:
inorder(root.left)
print(root.value)
inorder(root.right)
def search(root, key):
if root is None:
print("Element not found")
return None
elif root.value == key:
print("Element found")
return root
if root.value > key:
search(root.left,key)
else:
search(root.right, key)
def delete(root, key):
if root is None:
return root
if key < root.value:
root.left = delete(root.left, key)
elif(key > root.value):
root.right = delete(root.right, key)
else:
if root.left is None :
temp = root.right
root = None
return temp
elif root.right is None :
temp = root.left
root = None
return temp
temp = getMin(root.right)
root.value = temp.value
root.right = delete(root.right , temp.value)
return root
def getMin(node):
current = node
while(current.left is not None):
current = current.left
return current
if __name__=="__main__":
r = insert(None,int(input("Enter root node: ")))
while True:
choice = int(input("1 - Insert Node, 2 - Print in inorder, 3 - Delete, 4 - Search: , 5 - Exit: "))
if choice == 1:
r = insert(r,int(input("Enter node value: ")))
elif choice == 2:
inorder(r)
elif choice == 3:
r = delete(r,int(input("Enter the value of the node to be deleted: ")))
elif choice == 4:
search(r,int(input("Enter the node value to be searched: ")))
elif choice == 5:
break | [
"pranavkarnani@icloud.com"
] | pranavkarnani@icloud.com |
56f61545798151002ae6430bfffe35590ecc58ae | 72c411cb982de8289f50b3794968c33373e776c5 | /requirements/area.py | f3c9a887aabc6dd64d864f58f8dcb48740b12020 | [] | no_license | xDrone-DSL/flask-backend | 3592b93d873732180fac7a7887b4593f7bdb072f | 3f3a7bbf1f035beab76d2cfe603d9ee63c4705de | refs/heads/master | 2022-08-26T03:10:25.143696 | 2021-04-02T02:53:16 | 2021-04-02T02:53:16 | 217,087,101 | 1 | 1 | null | 2022-08-06T05:36:44 | 2019-10-23T15:09:09 | Python | UTF-8 | Python | false | false | 530 | py |
class Area:
def __init__(self, z):
self.x_low = min(z["x_low"], z["x_high"])
self.x_high = max(z["x_low"], z["x_high"])
self.y_low = min(z["y_low"], z["y_high"])
self.y_high = max(z["y_low"], z["y_high"])
self.z_low = min(z["z_low"], z["z_high"])
self.z_high = max(z["z_low"], z["z_high"])
def contains(self, x, y, z):
return self.x_low <= x <= self.x_high and \
self.y_low <= y <= self.y_high and \
self.z_low <= z <= self.z_high
| [
"mb4617@ic.ac.uk"
] | mb4617@ic.ac.uk |
883164030eb6e0bc556907b8b9b6df0a83a0820a | f67473326cda6fb53dfdf4a3bef851d395413a81 | /build/lib/keras_rcnn/datasets/__init__.py | dce2be54a159b5b284e7c9611f0f5dffb41a10dd | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | jeffreysijuntan/MaskRCNN | e846f5b2ded8c8f389c9fc9dfa27052e3f98b948 | a552e5958413f59d5c67bd7822a70b426c00b4a7 | refs/heads/master | 2020-03-06T20:59:04.326051 | 2018-03-28T18:42:37 | 2018-03-28T18:42:37 | 127,067,330 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,472 | py | # -*- coding: utf-8 -*-
import json
import os.path
import keras.utils.data_utils
def load_data(name):
origin = "http://storage.googleapis.com/tsjbucket/{}.tar.gz".format(name)
pathname = keras.utils.data_utils.get_file(
fname=name,
origin=origin,
untar=True
)
training_images_pathname = os.path.join(pathname, "train")
testing_images_pathname = os.path.join(pathname, 'test')
masks_pathname = os.path.join(pathname, "masks")
if not os.path.exists(masks_pathname):
masks_pathname = None
training_pathname = '../data/training.json'
training = get_file_data(training_pathname, training_images_pathname, masks_pathname)
test_pathname = '../data/testing.json'
test = get_file_data(test_pathname, tesing_images_pathname)
return training, test
def get_file_data(json_pathname, images_pathname, masks_pathname=None):
if os.path.exists(json_pathname):
with open(json_pathname) as data:
dictionaries = json.load(data)
else:
dictionaries = []
for dictionary in dictionaries:
dictionary["image"]["pathname"] = os.path.join(images_pathname, dictionary["image"]["pathname"])
if masks_pathname:
for index, instance in enumerate(dictionary["objects"]):
dictionary["objects"][index]["mask"]["pathname"] = os.path.join(masks_pathname, dictionary["objects"][index]["mask"]["pathname"])
return dictionaries
| [
"sijuntan@yeah.net"
] | sijuntan@yeah.net |
426d140370b3d594e0a398fb1603285aa7ead675 | 0c212aa63d07e84fbad849d15f2ee6a72aea82d2 | /11-格式化文件储存/p03.py | 39a910a3f2badbfbeb222fdbbe1cf96c8e57ffde | [] | no_license | flyingtothe/Python | e55b54e1b646d391550c8ced12ee92055c902c63 | 064964cb30308a38eefa5dc3059c065fcb89dd9f | refs/heads/master | 2021-08-06T19:44:42.137076 | 2018-12-03T12:15:15 | 2018-12-03T12:15:15 | 145,518,863 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 495 | py | import xml.etree.ElementTree as et
tree = et.parse(r'to_edit.xml')
root = tree.getroot()
for e in root.iter('Name'):
print(e.text)
for stu in root.iter('Student'):
name = stu.find('Name')
if name != None:
name.set( 'test', name.text * 2)
stu = root.find('Student')
#生成一个新的 元素
e = et.Element('ADDer')
e.attrib = {'a':'b'}
e.text = '我加的'
stu.append(e)
# 一定要把修改后的内容写回文件,否则修改无效
tree.write('to_edit.xml')
| [
"heidemeirenai@163.com"
] | heidemeirenai@163.com |
9cab9dc37c46f3af6d44d688dd5c03fcf4425162 | 10c459a49cbc8ee2dc3bc2a8353c48b5a96f0c1d | /AI/nai_bayes.py | 131804de264e30fc0df16076a4ac00543533cbaf | [] | no_license | alinzel/Demo | 1a5d0e4596ab4c91d7b580da694b852495c4ddcc | cc22bbcdbd77190014e9c26e963abd7a9f4f0829 | refs/heads/master | 2020-03-10T22:26:30.247695 | 2018-04-15T15:37:28 | 2018-04-15T15:37:28 | 129,619,168 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,994 | py | import numpy as np
from sklearn.naive_bayes import MultinomialNB, BernoulliNB
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import GridSearchCV
from sklearn import metrics
from time import time
from pprint import pprint
import matplotlib.pyplot as plt
import matplotlib as mpl
def make_test(classfier):
print('分类器:', classfier)
alpha_can = np.logspace(-3, 2, 10)
model = GridSearchCV(classfier, param_grid={'alpha': alpha_can}, cv=5)
model.set_params(param_grid={'alpha': alpha_can})
t_start = time()
model.fit(x_train, y_train)
t_end = time()
t_train = (t_end - t_start) / (5 * alpha_can.size)
print('5折交叉验证的训练时间为:%.3f秒/(5*%d)=%.3f秒' % ((t_end - t_start), alpha_can.size, t_train))
print('最优超参数为:', model.best_params_)
t_start = time()
y_hat = model.predict(x_test)
t_end = time()
t_test = t_end - t_start
print('测试时间:%.3f秒' % t_test)
acc = metrics.accuracy_score(y_test, y_hat)
print('测试集准确率:%.2f%%' % (100 * acc))
name = str(classfier).split('(')[0]
index = name.find('Classifier')
if index != -1:
name = name[:index] # 去掉末尾的Classifier
return t_train, t_test, 1 - acc, name
if __name__ == "__main__":
remove = ('headers', 'footers', 'quotes')
categories = 'alt.atheism', 'talk.religion.misc', 'comp.graphics', 'sci.space' # 选择四个类别进行分类
# 下载数据
data_train = fetch_20newsgroups(subset='train', categories=categories, shuffle=True, random_state=0, remove=remove)
data_test = fetch_20newsgroups(subset='test', categories=categories, shuffle=True, random_state=0, remove=remove)
print('训练集包含的文本数目:', len(data_train.data))
print('测试集包含的文本数目:', len(data_test.data))
print('训练集和测试集使用的%d个类别的名称:' % len(categories))
categories = data_train.target_names
pprint(categories)
y_train = data_train.target
y_test = data_test.target
print(' -- 前10个文本 -- ')
for i in np.arange(10):
print('文本%d(属于类别 - %s):' % (i + 1, categories[y_train[i]]))
print(data_train.data[i])
print('\n\n')
# tf-idf处理
vectorizer = TfidfVectorizer(input='content', stop_words='english', max_df=0.5, sublinear_tf=True)
x_train = vectorizer.fit_transform(data_train.data)
x_test = vectorizer.transform(data_test.data)
print('训练集样本个数:%d,特征个数:%d' % x_train.shape)
print('停止词:\n', end=' ')
#pprint(vectorizer.get_stop_words())
feature_names = np.asarray(vectorizer.get_feature_names())
# 比较分类器结果
clfs = (MultinomialNB(), # 0.87(0.017), 0.002, 90.39%
BernoulliNB(), # 1.592(0.032), 0.010, 88.54%
)
result = []
for clf in clfs:
r = make_test(clf)
result.append(r)
print('\n')
result = np.array(result)
time_train, time_test, err, names = result.T
time_train = time_train.astype(np.float)
time_test = time_test.astype(np.float)
err = err.astype(np.float)
x = np.arange(len(time_train))
mpl.rcParams['font.sans-serif'] = ['simHei']
mpl.rcParams['axes.unicode_minus'] = False
plt.figure(figsize=(10, 7), facecolor='w')
ax = plt.axes()
b1 = ax.bar(x, err, width=0.25, color='#77E0A0')
ax_t = ax.twinx()
b2 = ax_t.bar(x + 0.25, time_train, width=0.25, color='#FFA0A0')
b3 = ax_t.bar(x + 0.5, time_test, width=0.25, color='#FF8080')
plt.xticks(x + 0.5, names)
plt.legend([b1[0], b2[0], b3[0]], ('错误率', '训练时间', '测试时间'), loc='upper left', shadow=True)
plt.title('新闻组文本数据不同分类器间的比较', fontsize=18)
plt.xlabel('分类器名称')
plt.grid(True)
plt.tight_layout(2)
plt.show()
| [
"944951481@qq.com"
] | 944951481@qq.com |
7782ae2cf530c772011e7e4dd7449946e8542ad0 | 959c5b935f4a039878a995ffa102b860ab0867fa | /hello/migrations/0001_initial.py | a9ab96cc6a3a3072da562e8286306f664344c8a5 | [] | no_license | hari2014/test | b92d2eee83b1aed9ffa91e2edb5c4651a712b407 | f4fb1109ac0665652a499292fb4cadaaa2191e3e | refs/heads/master | 2021-01-22T12:35:31.339143 | 2017-09-04T16:40:30 | 2017-09-04T16:40:30 | 102,350,278 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,204 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-09-04 09:48
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='detail',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(default='', max_length=100)),
('last_name', models.CharField(default='', max_length=100)),
('email', models.CharField(default='', max_length=100)),
('phone', models.CharField(max_length=100)),
('password', models.CharField(default='', max_length=100)),
],
),
migrations.CreateModel(
name='login',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(max_length=100)),
('password', models.CharField(max_length=100)),
],
),
]
| [
"noreply@github.com"
] | noreply@github.com |
8df2f07e84e4f4a4d53706f3fb37fdbd8c2be89b | 237ac9cdbb5edcf84abc736efd5833bc077961b6 | /bin/chardetect | b507bdb15b6d0f28815df91c67f821949496f79e | [
"MIT"
] | permissive | leonbroadbent/vmware-ansible | 3a8427ca4dc84231619dee21270d21c7b287a1a1 | 8253dfcbe3bb44ed859b9084006c8d81e143ba12 | refs/heads/main | 2022-12-30T09:29:40.038125 | 2020-10-07T00:54:35 | 2020-10-07T00:54:35 | 301,825,278 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 261 | #!/home/leon/Documents/Projects/vmware-ansible/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from chardet.cli.chardetect import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"leon@leonbroadbent.com"
] | leon@leonbroadbent.com | |
06f235fd1143199f8fbe874e8717f8ab7ab6aadd | 9b524391b71abf948471d2c982fadcefc6f09c66 | /day04.py | c1483ed086e687e992a55bea27d0cf691be6e67b | [] | no_license | chena/aoc-2018 | a4d30cc3116e0e1e9f6fa54971c95c018a05f3ac | 2300ac22a30b5c9b1d03dd479b14d77206b2186c | refs/heads/master | 2020-04-09T05:16:32.015415 | 2019-02-23T02:22:24 | 2019-02-23T02:22:24 | 160,057,948 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,770 | py | from util import get_input
from collections import defaultdict
import datetime
import re
def process():
logs = []
guard_hrs = {}
# sort the logs
for log in get_input('day04.txt'):
timestamp = datetime.datetime.strptime(log[1:17], '%Y-%m-%d %H:%M')
content = 'sleep'
pattern = re.compile('Guard #(.+) begins')
found_guard = pattern.match(log[19:])
if found_guard:
content = found_guard.group(1)
guard_hrs[content] = 0
elif log.find('wake') > 0:
content = 'wake'
logs.append((timestamp, content))
logs.sort(key=lambda log: log[0])
# calculate total hours slept
index = 0
track_minutes = defaultdict(list)
while index < len(logs):
time, guard = logs[index]
index += 1
while index < len(logs) and logs[index][1] == 'sleep':
sleep_time = logs[index][0]
index += 1
wake_time = logs[index][0]
index += 1
sleep_duration = (wake_time - sleep_time).seconds / 60
for i in xrange(sleep_time.minute, sleep_time.minute + sleep_duration):
track_minutes[i].append(guard)
guard_hrs[guard] += sleep_duration
# part1: find the guard that has the most minutes asleep and the max minute the max guard is alseep at
max_guard = max(guard_hrs, key=guard_hrs.get)
max_minute = max(map(lambda (k, v): (k, v.count(max_guard)), track_minutes.items()), key=lambda t: t[1])[0]
# return max_minute * int(max_guard)
# part2: find guard that is most frequently asleep on the same minute
for m, guards in track_minutes.items():
distinct_g = list(set(guards))
counts = zip(distinct_g, [guards.count(g) for g in distinct_g])
track_minutes[m] = max(counts, key=lambda c: c[1])
max_guard_minute = max(track_minutes.items(), key=lambda (k, v): v[1])
return max_guard_minute[0] * int(max_guard_minute[1][0])
print(process())
| [
"alice@perka.com"
] | alice@perka.com |
7d084be3b90386de03a9247bbd700b449d4d3c35 | 1609e3853dd35a3bd7269ac909c487354dbd3307 | /kpsinvo/core_app/migrations/0001_initial.py | ded6f2331b2e608d204058594ae6f461d47f26d8 | [] | no_license | Hyrla/KPSInvo | 33a477ee34df04ab6382b8a7790ae4aa95f87e10 | 674d90456ea52ce65245db3279d0888e7f970242 | refs/heads/master | 2023-01-02T06:47:08.048760 | 2020-10-15T14:35:47 | 2020-10-15T14:35:47 | 304,132,977 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 686 | py | # Generated by Django 3.1.2 on 2020-10-14 20:02
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Thing',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('picture', models.FileField(upload_to='')),
('barcode_kps', models.CharField(max_length=30)),
('barcode_manufacturer', models.CharField(max_length=50)),
],
),
]
| [
"fournier@et.esiea.fr"
] | fournier@et.esiea.fr |
16b25ff5a3aed45017c060619a98e4dfce6a60d6 | cbda89443b351bb2047180dad4e300c13dc3df7f | /Crystals/Morpurgo_sp_outer/Jobs/Pc/Pc_neut_neut_inner1_outer4/Pc_neut_neut_inner1_outer4.py | 416385fcff412563b8446a40888f5125d9823323 | [] | no_license | sheridanfew/pythonpolarisation | 080f52979f98d26360a46412a10c8e3f51ee4549 | 178e2684e9a239a8e60af5f7b1eb414ac5f31e92 | refs/heads/master | 2021-07-10T01:07:40.978790 | 2021-03-11T16:56:37 | 2021-03-11T16:56:37 | 96,101,351 | 0 | 0 | null | 2017-07-03T13:37:06 | 2017-07-03T10:54:52 | null | UTF-8 | Python | false | false | 5,279 | py | import sys
sys.path.append('../../../../../')
from BasicElements import *
from BasicElements.Register import GetRegister
from BasicElements.MoleculeFactory import ReadMoleculeType
from BasicElements.MoleculeFactory import GetMolecule
from BasicElements.Crystal import *
from Polarizability.GetDipoles import get_dipoles,split_dipoles_onto_atoms
from Polarizability import *
from Polarizability.GetEnergyFromDips import *
from Polarizability.JMatrix import JMatrix
import numpy as np
from math import *
from time import gmtime, strftime
import os
print strftime("%a, %d %b %Y %X +0000", gmtime())
name='Pc_neut_neut_inner1_outer4'
#For crystals here, all cubic and centred at centre
insize=1
#number of TVs in each dir central mol is from edge of inner region
outsize=4
mols_cen=['Pc_mola_neut_aniso_cifstruct_chelpg.xyz','Pc_molb_neut_aniso_cifstruct_chelpg.xyz']
mols_sur=['Pc_mola_neut_aniso_cifstruct_chelpg.xyz','Pc_molb_neut_aniso_cifstruct_chelpg.xyz']
mols_outer=['sp_Pc_mola_neut.xyz','sp_Pc_molb_neut.xyz']
#From cif:
'''
Pc
_cell_length_a 7.900
_cell_length_b 6.060
_cell_length_c 16.010
_cell_angle_alpha 101.90
_cell_angle_beta 112.60
_cell_angle_gamma 85.80
_cell_volume 692.384
'''
#Get translation vectors:
a=7.900/0.5291772109217
b=6.060/0.5291772109217
c=16.010/0.5291772109217
alpha=101.90*(pi/180)
beta=112.60*(pi/180)
gamma=90*(pi/180)
cif_unit_cell_volume=692.384/(a*b*c*(0.5291772109217**3))
cell_volume=sqrt(1 - (cos(alpha)**2) - (cos(beta)**2) - (cos(gamma)**2) + (2*cos(alpha)*cos(beta)*cos(gamma)))
#Converts frac coords to carts
matrix_to_cartesian=np.matrix( [[a, b*cos(gamma), c*cos(beta)],
[0, b*sin(gamma), c*(cos(alpha) - cos(beta)*cos(gamma))/sin(gamma)],
[0, 0, c*cell_volume/sin(gamma)]])
#carts to frac
matrix_to_fractional=matrix_to_cartesian.I
#TVs, TV[0,1,2] are the three translation vectors.
TV=matrix_to_cartesian.T
cut=8.0
totsize=insize+outsize
#number of TVs in each dir nearest c inner mol is from edge of outer region
cenpos=[totsize,totsize,totsize]
length=[2*totsize+1,2*totsize+1,2*totsize+1]
maxTVs=insize
outer_maxTVs=insize+outsize
#for diamond outer, don't specify for cube and will fill to cube edges.
print 'name: ',name,'mols_cen: ', mols_cen,' mols_sur: ',mols_sur,' TVs: ', TV
# Place Molecules
prot_neut_cry=Crystal(name=name,mols_cen=mols_cen,mols_sur=mols_sur,cenpos=cenpos,length=length,TVs=TV,maxTVs=maxTVs,mols_outer=mols_outer,outer_maxTVs=outer_maxTVs)
#prot_neut_cry._mols contains all molecules.
#mols[0] contains a list of all molecules in position a, mols[1] all mols in pos'n b, etc.
#mols[0][x,y,z] contains molecule a in position x,y,z
#mols may as such be iterated over in a number of ways to consider different molecules.
prot_neut_cry().print_posns()
#Calculate Properties:
print strftime("%a, %d %b %Y %X +0000", gmtime())
E0 = np.matrix([0.,0.,0.])
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'Calc jm'
jm = JMatrix(cutoff=cut)
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'Calc dips:'
d = get_dipoles(E0=E0,jm=jm._m,cutoff=cut)
print strftime("%a, %d %b %Y %X +0000", gmtime())
Efield = get_electric_field(E0)
potential = get_potential()
print strftime("%a, %d %b %Y %X +0000", gmtime())
#print 'dips', d
print 'splitting dips onto atoms'
split_d = split_dipoles_onto_atoms(d)
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'summing dips:'
tot = np.matrix([0.,0.,0.])
for dd in split_d:
tot += dd
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'total dip moment', tot
Uqq = np.multiply(get_U_qq(potential=potential),27.211)
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'Uqq', Uqq
Uqd = np.multiply(get_U_qdip(dips=d,Efield=Efield),27.211)
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'Uqd', Uqd
Udd = np.multiply(get_U_dipdip(jm=jm._m,dips=d.T),27.211)
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'Udd', Udd
energyev = Udd+Uqd+Uqq
print 'energyev', energyev
energy=energyev/27.211
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'Making .dat cross sections for gnuplot'
# print TVs
if not os.path.exists('Dips_Posns_TVs'): os.makedirs('Dips_Posns_TVs')
f = open('Dips_Posns_TVs/%s_TVs.dat' % name, 'w')
TVstr=str(str(TV[0,0]) + ' ' + str(TV[0,1]) + ' ' + str(TV[0,2]) + '\n' + str(TV[1,0]) + ' ' + str(TV[1,1]) + ' ' + str(TV[1,2]) + '\n' + str(TV[2,0]) + ' ' + str(TV[2,1]) + ' ' + str(TV[2,2])+ '\n')
f.write(TVstr)
f.flush()
f.close()
# print dipoles
if not os.path.exists('Dips_Posns_TVs'): os.makedirs('Dips_Posns_TVs')
f = open('Dips_Posns_TVs/%s_dipoles.dat' % name, 'w')
for dd in split_d:
dstr=str(dd)
f.write(dstr)
f.write('\n')
f.flush()
f.close()
# print properties for charge in centrepos
time=strftime("%a, %d %b %Y %X +0000", gmtime())
f = open('%s_properties.csv' % name, 'w')
f.write ('time\tname\tmols_cen\tmols_sur\tmols_outer\tinsize\toutsize\tenergyev\tUqq\tUqd\tUdd\tTotdip_x\tTotdip_y\tTotdip_z')
f.write ('\n%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s' % (time,name,mols_cen,mols_sur,mols_outer,insize,outsize,energyev,Uqq,Uqd,Udd,tot[0,0],tot[0,1],tot[0,2]))
f.flush()
f.close()
print 'Job Completed Successfully.'
| [
"sheridan.few@gmail.com"
] | sheridan.few@gmail.com |
3b6132e8675885d7b3c34747abc099bd39c9fcd5 | 59b7d65c84575aa6dd3e4831a9a3923a456b1be3 | /query.py | 09c22e65dfef71c3b90646aba398175bab6f9a1e | [] | no_license | leehelenah/Analyze-H1B-PW-data | 84095b7809a8ed851aede5f1e1662d93b8f1b73a | 75f71c7e57b7834fef0e0f5c20975d40f3ee394a | refs/heads/main | 2023-08-31T11:05:49.149378 | 2021-09-26T02:53:07 | 2021-09-26T02:53:07 | 405,804,569 | 0 | 0 | null | 2021-09-25T05:37:07 | 2021-09-13T02:12:32 | Jupyter Notebook | UTF-8 | Python | false | false | 561 | py | LCA = """
SELECT *,
CASE WHEN WAGE_UNIT_OF_PAY='Year' THEN WAGE_RATE_OF_PAY_FROM ELSE WAGE_RATE_OF_PAY_FROM*50000 END AS ANNUAL_INCOME,
'{file_source}' AS FILE_SOURCE,
UPPER(WORKSITE_COUNTY) AS WORKSITE_COUNTY_UPPER
FROM {view}
WHERE CASE_STATUS='Certified'
AND FULL_TIME_POSITION='Y'
AND WAGE_UNIT_OF_PAY in ('Year', 'Hour')
"""
PW = """
SELECT *,
'{file_source}' AS FILE_SOURCE,
UPPER(PRIMARY_WORKSITE_COUNTY) AS WORKSITE_COUNTY_UPPER
FROM {view}
WHERE CASE_STATUS='Determination Issued'
AND EMPLOYER_COUNTRY='UNITED STATES OF AMERICA'
"""
| [
"noreply@github.com"
] | noreply@github.com |
681440009127bf5750638d9a87a4155477b2fda3 | 43b34ed0be64771f236c086f716bc6a92ae3db32 | /kt_ph_n.py | 8073dcefb011746731594e8d83f99505915ad414 | [] | no_license | elonca/LWB-benchmark-generator | ad5b6dc5e591941184056476db3ad13f01900879 | 7a7f28800f7574c98a3883f6edccad727dd509bc | refs/heads/main | 2023-07-28T01:42:22.532324 | 2021-09-16T07:22:56 | 2021-09-16T07:22:56 | 407,061,367 | 0 | 0 | null | 2021-09-16T07:12:38 | 2021-09-16T07:12:37 | null | UTF-8 | Python | false | false | 135 | py | from k_ph_n import *
def kt_ph_n(n):
return str(kt_ph_n_f(n))[1:-1]
def kt_ph_n_f(n):
return left(n) |IMPLIES| Dia(right(n))
| [
"u6427001@anu.edu.au"
] | u6427001@anu.edu.au |
0daa3d13c4d74a0a76bdd1193d5124f2a006877e | ccda463cf6eb0ef6690f811e3c54c439d462019e | /Exercise 1_7_rps.py | 75bd8fd9459f355771463912ee3d4a92ac8f291a | [] | no_license | ViralGor/Assignment-1 | bae6e9fca993990a3afc50a294f55170a976e445 | 81d8184d962165398225d98f091adbfa1550a943 | refs/heads/master | 2021-08-26T08:12:20.412634 | 2017-11-22T11:37:03 | 2017-11-22T11:37:03 | 111,676,864 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 740 | py | player1 = input("Enter player1?: ")
player2 = input("Enter player2?: ")
if(player1=='rock' and player2=='scissors'):
print("Player1 won")
elif(player1=='rock' and player2=='paper'):
print("Player2 won")
elif(player1=='rock' and player2=='rock'):
print("Tie")
elif(player1=='paper' and player2=='rock'):
print("Player1 won")
elif(player1=='paper' and player2=='scissors'):
print("Player2 won")
elif(player1=='paper' and player2=='paper'):
print("Tie")
elif(player1=='scissors' and player2=='paper'):
print("Player1 won")
elif(player1=='scissors' and player2=='rock'):
print("Player2 won")
elif(player1=='scissors' and player2=='scissors'):
print("scissors")
else:
print("Invalid input")
| [
"33891077+ViralGor@users.noreply.github.com"
] | 33891077+ViralGor@users.noreply.github.com |
498bf8c8f040a41ab9c636bdd1e38a5376c813cc | fec70eda5a0833887632dbf5a2408b2cb67a98a3 | /condicionales/main.py | 84e565a71213a2238a2360fd0f264bf3d1a1b761 | [] | no_license | jmav94/topicos-con-python-2021 | d07dedd76a8375b94b7e03fe7a8dd4698b0feae4 | 72797f8d59101f7f45b7ca668ba733e898bf75ad | refs/heads/master | 2023-05-23T23:17:16.427899 | 2021-06-14T05:10:27 | 2021-06-14T05:10:27 | 342,257,794 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,458 | py | """
IF
SI se cumple esta condicion:
Ejecutar estas instrucciones
SI NO:
Se ejecutan estas otras instrucciones
if condicion:
instrucciones
else:
otras instrucciones
# Operadores de comparacion
== igual
!= diferente
< menor que
> mayor que
<= menor o igual que
>= mayor o igual que
# Operadores Logicos
and Y
or O
! negacion
not no
"""
# Ejemplo
color = "amarillo"
#color = input("Adivina cual es mi color favorito: ")
if color == "rojo":
print("Felicidades adivinaste el color.")
else:
print("Este no es mi color.")
print("########### condicionales comparando enteros con operadores relacionales ###############")
# ejemplo 2
anio = 2021
#anio = int(input("En que año estamos? "))
if anio < 2022:
print("Estamos antes del 2022")
else:
print("Es un año posterior al 2021")
print("________________________________")
# ejemplo 3 if anidados
"""nombre = input("Captura tu nombre: ")
apellido = input("Captura tu apellido: ")
numControl = int(input("Captura tu numero de control: "))
edad = int(input("Captura tu edad: "))
semestre = int(input("Captura el semestre: "))"""
nombre = "Juan"
apellido = "Perez"
numControl = 20100105
edad = 23
semestre = 9
if semestre >= 7:
print(f"{nombre} felicidades estas listo para elegir tu especialidad.")
if edad >= 21:
print("Tambien puedes realizar tu servicio social.")
else:
print("Por el momento no eres apto para realizar el servicio social.")
else:
print("No estas listo para seleccionar una especialidad.")
print("############## ejemplo con elif ##################")
# ejemplo 4 elif
#dia = int(input("Capture el numero de dia de la semana: "))
dia = 2
"""
if dia == 1:
print("Lunes")
else:
if dia == 2:
print("Martes")
else:
if dia == 3:
print("Miercoles")
else:
if dia == 4:
print("Jueves")
else:
if dia == 5:
print("Viernes")
else:
print("Es fin de semada")
"""
if dia == 1:
print("Lunes")
elif dia == 2:
print("Martes")
elif dia == 3:
print("Miercoles")
elif dia == 4:
print("Jueves")
elif dia == 5:
print("Viernes")
else:
print("Es fin de semada")
print("######## ejemplo edades - operadores realacionales y AND #########")
edad_minima = 18
edad_maxima = 65
# input de usuario
#edad = int(input("¿Tienes edad para trabajar? Captura tu edad: "))
edad = 38
if edad >= edad_minima and edad <= edad_maxima:
print("Estas en edad para trabajar.")
else:
print("No estas en edad para trabajar.")
print("######## ejemplo operadores relacionales y logicos con condicionales #########")
pais = "Mexico"
#pais = input("Capture el pais que desee validar: ")
if pais == "Mexico" or pais == "España" or pais == "Colombia":
print("En este pais se habla español")
else:
print("En este pais no se habla español")
print("######## ejemplo operadores relacionales y logicos con condicionales + not #########")
pais = "Mexico"
#pais = input("Capture el pais que desee validar: ")
if not (pais == "Mexico" or pais == "España" or pais == "Colombia"):
print(f"{pais} En este pais no se habla español")
else:
print(f"{pais} En este pais se habla español")
print("######## ejemplo operadores relacionales - != #########")
pais = input("Capture el pais que desee validar: ")
if pais != "Mexico" and pais != "España" and pais != "Colombia":
print(f"{pais} En este pais no se habla español")
else:
print(f"{pais} En este pais se habla español")
| [
"juan.ahumada94@gmail.com"
] | juan.ahumada94@gmail.com |
d13041f7c47daa9b42d651b0e7c12a895ce9ff78 | 41ecc22fe68d9849f956c282b03f9203c4c2dbe3 | /main/admin.py | 313fc75f2b07be72f8f4e2d55effb16426f71861 | [] | no_license | awmleer/one-word-one-story | 21e7dd5601b7457ffeb0fca528e2eb0017f7a3e2 | 28715f5e97da8772b5bc03e27613b67f67420543 | refs/heads/master | 2021-06-01T19:15:59.217416 | 2016-08-22T02:09:03 | 2016-08-22T02:09:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 167 | py | from django.contrib import admin
from .models import *
# Register your models here.
admin.site.register(Person)
admin.site.register(Story)
admin.site.register(Word)
| [
"haoguangbo@yeah.net"
] | haoguangbo@yeah.net |
54255480175da94416aaacaf9d2c55cbf4dbef26 | 2d68548aeb61dc2c66b4b600580db4e496202c4a | /manage.py | 75ad49068f8a0d236bdf195daf97e02166ad5548 | [] | no_license | liuyi0906/netshop | 1bd0b1985ebe4b91271b70a51801bb70d80f4fdc | 4a93f03b2c20a9e7cb312facd3fca498683cbeb1 | refs/heads/master | 2021-05-15T02:23:05.573881 | 2020-03-26T13:17:04 | 2020-03-26T13:17:04 | 250,262,996 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 539 | py | #!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'netshop.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| [
"1776756669@qq.com"
] | 1776756669@qq.com |
a861c16f64b145bf95867c56ed01252b9a341a5e | 0e412122a9967f6788b94f7ef6965eff7578a080 | /Part 2/Py script vol/v1.0.2/__init__.py | 0901d3795f7199c008053ea8383189f23fc7cc9e | [] | no_license | ibrahimkhalilmasud/-intrusion-detection-system-host-base-with-Python-port-and-SSH-server-scanner | be7ccdb956c2fdc832fdfba2a3f91427789e6974 | 1b0939f12e8cb89aab9011b6bd77e64a6b4bce53 | refs/heads/master | 2022-06-01T04:11:47.068095 | 2020-05-04T06:44:44 | 2020-05-04T06:44:44 | 250,737,122 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 45 | py | #__all__ = ["port", "Port2", "ssh_scanner_1"] | [
"36391855+ibrahimkhalilmasud@users.noreply.github.com"
] | 36391855+ibrahimkhalilmasud@users.noreply.github.com |
368c766224aa60b38f1fa2f175c3c59d12a092d9 | 16c8357d2df4c9fb02440db93881fa2c6f52d03b | /vst/urls.py | f6631b7cd7b3e04caa96bce7bf8076945b551303 | [] | no_license | nikhilmuz/Bioinfo_molecule_sorting | 09d458d2f21cc3145a5b316fbfc4cb68122ea216 | ca17ef5699745d1d2ab6348bccd5441392581373 | refs/heads/master | 2020-03-21T14:20:55.211606 | 2019-01-27T17:14:14 | 2019-01-27T17:14:14 | 135,333,150 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 773 | py | """vst URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^main/', include('main.urls')),
]
| [
"nikhil.nikhil.muz@gmail.com"
] | nikhil.nikhil.muz@gmail.com |
b15ef0ce97a9d687f88f8fcbea3e7e4fa94065ff | db79787a8726e136e48b25133fbe6724d25ec5f2 | /src/uikefutestcase/kefu_testcase02_myuserfeedback_alllist_noback.py | ed16c701208661bbe7f10597431d0d76fb24c159 | [] | no_license | cash2one/edaixi_python_selenium | a1d51ada40788c550f3014bf62a44360781b27b9 | ae63b323a46032dc3116c4515ee375ace67dddda | refs/heads/master | 2020-05-22T16:55:53.137551 | 2016-11-26T15:31:52 | 2016-11-26T15:31:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,355 | py | # -*- coding: utf-8 -*-
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import NoAlertPresentException
import unittest, time, re,ConfigParser
import appobjectkefu
class KefuTestcase02MyUserFeedbackAlllistNoback(unittest.TestCase):
def setUp(self):
#self.driver = webdriver.Firefox()
self.driver = appobjectkefu.GetInstance()
self.driver.implicitly_wait(30)
conf = ConfigParser.ConfigParser()
conf.read("C:/edaixi_testdata/userdata_kefu.conf")
global CAIWU_URL,USER_NAME,PASS_WORD
KEFU_URL = conf.get("kefusection", "uihostname")
USER_NAME = conf.get("kefusection", "uiusername")
PASS_WORD = conf.get("kefusection", "uipassword")
print KEFU_URL,USER_NAME,PASS_WORD
self.base_url = KEFU_URL
#self.base_url = "http://kefu05.edaixi.cn:81/"
self.verificationErrors = []
self.accept_next_alert = True
def test_kefu_testcase02_myuserfeedback_alllist_noback(self):
driver = self.driver
driver.get(self.base_url + "/")
#driver.find_element_by_link_text(u"登陆").click()
driver.find_element_by_css_selector("div#container.container h3.text-center.text-primary a.btn.btn-success.text-center").click()
driver.find_element_by_id("username").clear()
driver.find_element_by_id("username").send_keys(USER_NAME)
driver.find_element_by_id("password").clear()
driver.find_element_by_id("password").send_keys(PASS_WORD)
driver.find_element_by_id("login-submit").click()
time.sleep(1)
self.assertEqual(driver.title,u"客服系统")
driver.find_element_by_css_selector("div.container>div.navbar-collapse.collapse.navbar-responsive-collapse>ul.nav.navbar-nav>li:nth-child(2)>a").click()
#driver.find_element_by_link_text(u"反馈总列表").click()
#driver.find_element_by_link_text(u"踢").click()
self.assertEqual(driver.title,u"客服系统")
driver.find_element_by_css_selector("div#container.container div.panel.panel-primary ul.nav.nav-tabs li:first-child.active a").click()
#driver.find_element_by_link_text(u"处理").click()
self.assertEqual(driver.title,u"客服系统")
driver.find_element_by_css_selector("div#container.container div.panel.panel-primary table.table.table-stripe tbody#table_new_customer tr#customer_1239520 td a.btn.btn-success.btn-sm").click()
self.assertEqual(driver.title,u"客服系统")
driver.find_element_by_css_selector("div#container.container div.col-sm-6 ul#replies_navi.nav.nav-tabs li:first-child#ajax_customer_feedbacks_all.active a").click()
self.assertEqual(driver.title,u"客服系统")
driver.find_element_by_css_selector("div#container.container div.row div.col-sm-12 div.div a.btn.btn-info.pull-right").click()
driver.find_element_by_css_selector("div#container.container table.table.table-striped tbody tr:first-child td:nth-child(8) a.btn.btn-sm.btn-info").click()
time.sleep(2)
self.assertEqual(u"确认发券吗?", self.close_alert_and_get_its_text())
print driver.title
#self.assert_(driver.title, u"客服系统")
self.assertEqual(driver.title,u"客服系统")
def is_element_present(self, how, what):
try: self.driver.find_element(by=how, value=what)
except NoSuchElementException, e: return False
return True
def is_alert_present(self):
try: self.driver.switch_to_alert()
except NoAlertPresentException, e: return False
return True
def close_alert_and_get_its_text(self):
try:
alert = self.driver.switch_to_alert()
alert_text = alert.text
if self.accept_next_alert:
alert.accept()
else:
alert.dismiss()
return alert_text
finally: self.accept_next_alert = True
def tearDown(self):
self.driver.quit()
self.assertEqual([], self.verificationErrors)
if __name__ == "__main__":
unittest.main()
| [
"ibmcuijun2015@126.com"
] | ibmcuijun2015@126.com |
c3b04dfc938039b9540e226bcf5b58bc739dc96d | 42803b9b279991cdfe8978ecc9a01918e8ab174c | /examples/courier_example.py | c852e2fed233d70ff129aa10c2e28e453216c87b | [
"MIT"
] | permissive | AfterShip/aftership-sdk-python | 0732159d0128dc0d7202343dba17cd7c4f3f5de7 | 32299e673cc859d1c7571240edd97aba98470418 | refs/heads/master | 2022-11-21T10:39:01.578286 | 2022-07-27T09:20:52 | 2022-07-27T09:20:52 | 20,640,069 | 43 | 20 | MIT | 2022-11-10T06:44:16 | 2014-06-09T08:50:12 | Python | UTF-8 | Python | false | false | 536 | py | import aftership
aftership.api_key = 'PUT_YOUR_AFTERSHIP_KEY_HERE'
def get_enabled_courier_names():
result = aftership.courier.list_couriers()
courier_list = [courier['name'] for courier in result['couriers']]
return courier_list
def get_supported_courier_names():
result = aftership.courier.list_all_couriers()
courier_list = [courier['name'] for courier in result['couriers']]
return courier_list
if __name__ == '__main__':
enabled_couriers = get_enabled_courier_names()
print(enabled_couriers)
| [
"alvie.zhang@gmail.com"
] | alvie.zhang@gmail.com |
2a62d0ce7d1490a12fab7d9049349edd7a6f7f38 | d515e4ee65e064b8bfc8895d9b55fd7a23572649 | /plugins/operators/dataflow_xcom_operator.py | aa9de87209b3ad0985391ba093f2d33481653527 | [
"Apache-2.0"
] | permissive | HocLengChung/docker-airflow | a8c995534c3102559d41e76d04245471900b2757 | b4b6f7e13d031fd40843884b27ae9e3f9a998afc | refs/heads/master | 2020-07-19T06:28:06.662885 | 2019-09-05T19:20:22 | 2019-09-05T19:20:22 | 206,391,809 | 0 | 0 | Apache-2.0 | 2019-09-04T18:58:11 | 2019-09-04T18:58:10 | null | UTF-8 | Python | false | false | 10,591 | py | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import re
import uuid
import copy
from airflow.contrib.hooks.gcs_hook import GoogleCloudStorageHook
from airflow.contrib.hooks.gcp_dataflow_hook import DataFlowHook
from airflow.models import BaseOperator
from airflow.version import version
from airflow.utils.decorators import apply_defaults
class DataFlowJavaXcomKeysOperator(BaseOperator):
"""
Start a Java Cloud DataFlow batch job. The parameters of the operation
will be passed to the job. Supports pulling xcom keys as parameters
**Example**: ::
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date':
(2016, 8, 1),
'email': ['alex@vanboxel.be'],
'email_on_failure': False,
'email_on_retry': False,
'retries': 1,
'retry_delay': timedelta(minutes=30),
'dataflow_default_options': {
'project': 'my-gcp-project',
'zone': 'us-central1-f',
'stagingLocation': 'gs://bucket/tmp/dataflow/staging/',
}
}
dag = DAG('test-dag', default_args=default_args)
task = DataFlowJavaOperator(
gcp_conn_id='gcp_default',
task_id='normalize-cal',
jar='{{var.value.gcp_dataflow_base}}pipeline-ingress-cal-normalize-1.0.jar',
options={
'autoscalingAlgorithm': 'BASIC',
'maxNumWorkers': '50',
'start': '{{ds}}',
'partitionType': 'DAY'
},
dag=dag)
.. seealso::
For more detail on job submission have a look at the reference:
https://cloud.google.com/dataflow/pipelines/specifying-exec-params
:param jar: The reference to a self executing DataFlow jar (templated).
:type jar: str
:param job_name: The 'jobName' to use when executing the DataFlow job
(templated). This ends up being set in the pipeline options, so any entry
with key ``'jobName'`` in ``options`` will be overwritten.
:type job_name: str
:param dataflow_default_options: Map of default job options.
:type dataflow_default_options: dict
:param options: Map of job specific options.
:type options: dict
:param gcp_conn_id: The connection ID to use connecting to Google Cloud
Platform.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param poll_sleep: The time in seconds to sleep between polling Google
Cloud Platform for the dataflow job status while the job is in the
JOB_STATE_RUNNING state.
:type poll_sleep: int
:param job_class: The name of the dataflow job class to be executed, it
is often not the main class configured in the dataflow jar file.
:type job_class: str
:param xcom_keys: The xcom elements list of dictionaries containing the xcom data you want to pull data
from in order to pass as parameters to dataflow. e.g.
[{'xcom_key': xcom_key_value, 'task_id': task_id_value, 'dataflow_par_name': schema},...]
If you specify this value, the operator will pull a value from xcom with
key=xcom_key_value, task_id=task_id_value
It will then pass --schema xcom_key_value as pipeline parameter value to the dataflow job.
:type xcom_keys: dict
``jar``, ``options``, and ``job_name`` are templated so you can use variables in them.
Note that both
``dataflow_default_options`` and ``options`` will be merged to specify pipeline
execution parameter, and ``dataflow_default_options`` is expected to save
high-level options, for instances, project and zone information, which
apply to all dataflow operators in the DAG.
It's a good practice to define dataflow_* parameters in the default_args of the dag
like the project, zone and staging location.
.. code-block:: python
default_args = {
'dataflow_default_options': {
'project': 'my-gcp-project',
'zone': 'europe-west1-d',
'stagingLocation': 'gs://my-staging-bucket/staging/'
}
}
You need to pass the path to your dataflow as a file reference with the ``jar``
parameter, the jar needs to be a self executing jar (see documentation here:
https://beam.apache.org/documentation/runners/dataflow/#self-executing-jar).
Use ``options`` to pass on options to your job.
.. code-block:: python
t1 = DataFlowJavaOperator(
task_id='datapflow_example',
jar='{{var.value.gcp_dataflow_base}}pipeline/build/libs/pipeline-example-1.0.jar',
options={
'autoscalingAlgorithm': 'BASIC',
'maxNumWorkers': '50',
'start': '{{ds}}',
'partitionType': 'DAY',
'labels': {'foo' : 'bar'}
},
gcp_conn_id='gcp-airflow-service-account',
dag=my-dag)
"""
template_fields = ['options', 'jar', 'job_name']
ui_color = '#0273d4'
@apply_defaults
def __init__(
self,
jar,
job_name='{{task.task_id}}',
dataflow_default_options=None,
options=None,
gcp_conn_id='google_cloud_default',
delegate_to=None,
poll_sleep=10,
job_class=None,
xcom_element_list=None,
*args,
**kwargs):
super(DataFlowJavaXcomKeysOperator, self).__init__(*args, **kwargs)
dataflow_default_options = dataflow_default_options or {}
options = options or {}
options.setdefault('labels', {}).update(
{'airflow-version': 'v' + version.replace('.', '-').replace('+', '-')})
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.jar = jar
self.job_name = job_name
self.dataflow_default_options = dataflow_default_options
self.options = options
self.poll_sleep = poll_sleep
self.job_class = job_class
self.xcom_element_list = xcom_element_list
def execute(self, context):
bucket_helper = GoogleCloudBucketHelper(
self.gcp_conn_id, self.delegate_to)
self.jar = bucket_helper.google_cloud_to_local(self.jar)
hook = DataFlowHook(gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
poll_sleep=self.poll_sleep)
dataflow_options = copy.copy(self.dataflow_default_options)
dataflow_options.update(self.options)
# Legacy code for xcom key
if 'xcom_key' in dataflow_options:
value = context['task_instance'].xcom_pull(key=dataflow_options['xcom_key'])
dataflow_options['queryParameters'] = value
del dataflow_options['xcom_key']
# Code for xcom_keys (to be implemented sanity check)
if self.xcom_element_list is not None:
for xcom_element in self.xcom_element_list:
# Sanity check:'
if any(key in xcom_element for key in ['xcom_key', 'task_id', 'dataflow_par_name']):
pulled_xcom_value = \
context['task_instance'].xcom_pull(key=xcom_element['xcom_key'],
task_ids=xcom_element['task_id'])
dataflow_options[xcom_element['dataflow_par_name']] = pulled_xcom_value
else:
raise Exception("ERROR: one of the fields ['xcom_key', 'task_id', 'dataflow_par_name']"
" is not non-existent")
print("dataflow_options: ", dataflow_options)
hook.start_java_dataflow(self.job_name, dataflow_options,
self.jar, self.job_class)
class GoogleCloudBucketHelper(object):
"""GoogleCloudStorageHook helper class to download GCS object."""
GCS_PREFIX_LENGTH = 5
def __init__(self,
gcp_conn_id='google_cloud_default',
delegate_to=None):
self._gcs_hook = GoogleCloudStorageHook(gcp_conn_id, delegate_to)
def google_cloud_to_local(self, file_name):
"""
Checks whether the file specified by file_name is stored in Google Cloud
Storage (GCS), if so, downloads the file and saves it locally. The full
path of the saved file will be returned. Otherwise the local file_name
will be returned immediately.
:param file_name: The full path of input file.
:type file_name: str
:return: The full path of local file.
:rtype: str
"""
if not file_name.startswith('gs://'):
return file_name
# Extracts bucket_id and object_id by first removing 'gs://' prefix and
# then split the remaining by path delimiter '/'.
path_components = file_name[self.GCS_PREFIX_LENGTH:].split('/')
if len(path_components) < 2:
raise Exception(
'Invalid Google Cloud Storage (GCS) object path: {}'
.format(file_name))
bucket_id = path_components[0]
object_id = '/'.join(path_components[1:])
local_file = '/tmp/dataflow{}-{}'.format(str(uuid.uuid4())[:8],
path_components[-1])
self._gcs_hook.download(bucket_id, object_id, local_file)
if os.stat(local_file).st_size > 0:
return local_file
raise Exception(
'Failed to download Google Cloud Storage (GCS) object: {}'
.format(file_name))
| [
"hoc.leng.chung@devoteam.com"
] | hoc.leng.chung@devoteam.com |
5fbfc72f14ae2926b33488a6f8779cdf247fa0b7 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_397/ch12_2020_03_04_11_23_47_173814.py | 6f4b82bd91b75fe7a2fadd6fbd05585a048320bf | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 61 | py | def resolve_equacao_1o_grau (a, b):
X=(0-b)/a
return X | [
"you@example.com"
] | you@example.com |
f6399a661fc91b6428b91b95ac912b34f0274bb8 | 6ad7f2266ce14e9556a98ae8a919c268d5ec2f01 | /model_measuring/kamal/core/callbacks/logging.py | 79c6119daaf1643b251c96f61f14d0cfac23e9e9 | [
"Apache-2.0"
] | permissive | Gouzhong1223/Dubhe | 3fc3fbb259e71013c2ddc12c27dbd39b98e56534 | 8959a51704410dc38b595a0926646b9928451c9a | refs/heads/master | 2022-07-26T23:24:54.295338 | 2021-12-27T05:58:18 | 2021-12-27T05:58:18 | 442,334,231 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,167 | py | """
Copyright 2020 Tianshu AI Platform. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=============================================================
"""
from .base import Callback
import numbers
from tqdm import tqdm
class MetricsLogging(Callback):
def __init__(self, keys):
super(MetricsLogging, self).__init__()
self._keys = keys
def __call__(self, engine):
if engine.logger==None:
return
state = engine.state
content = "Iter %d/%d (Epoch %d/%d, Batch %d/%d)"%(
state.iter, state.max_iter,
state.current_epoch, state.max_epoch,
state.current_batch_index, state.max_batch_index
)
for key in self._keys:
value = state.metrics.get(key, None)
if value is not None:
if isinstance(value, numbers.Number):
content += " %s=%.4f"%(key, value)
if engine.tb_writer is not None:
engine.tb_writer.add_scalar(key, value, global_step=state.iter)
elif isinstance(value, (list, tuple)):
content += " %s=%s"%(key, value)
engine.logger.info(content)
class ProgressCallback(Callback):
def __init__(self, max_iter=100, tag=None):
self._max_iter = max_iter
self._tag = tag
#self._pbar = tqdm(total=self._max_iter, desc=self._tag)
def __call__(self, engine):
self._pbar.update(1)
if self._pbar.n==self._max_iter:
self._pbar.close()
def reset(self):
self._pbar = tqdm(total=self._max_iter, desc=self._tag)
| [
"tianshu@zhejianglab.com"
] | tianshu@zhejianglab.com |
60ac420c2fdafe2fef8b1cbd79cbee62bb4ae15b | bb05406a5abd415807d9f1a9fc8d47502657c7a8 | /Test3(square tracking face and eyes)/final.py | f3b0d0e844697b6d295e9bcea748ebb68d989e77 | [
"Apache-2.0"
] | permissive | spell1612/Face-Overlay-AR | fce26a9677f9f965af30e6807f5037bf4af018b1 | 7350d13999c2647412b34637a70c817cf5940350 | refs/heads/master | 2021-04-15T14:01:21.444735 | 2021-03-19T18:36:28 | 2021-03-19T18:36:28 | 126,498,107 | 6 | 3 | null | 2020-09-08T08:27:09 | 2018-03-23T14:40:59 | Python | UTF-8 | Python | false | false | 986 | py | import numpy as np
import cv2
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')
cap = cv2.VideoCapture(0)
count=1
while True:
ret, img = cap.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.2, 5)
for (x,y,w,h) in faces:
cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
eyes = eye_cascade.detectMultiScale(roi_gray)
for (ex,ey,ew,eh) in eyes:
#print(count)
#crop_img = roi_color[ey: ey + eh, ex: ex + ew]
cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)
#s1='tmp/{}.jpg'.format(count)
#count=count+1
#cv2.imwrite(s1,crop_img)
cv2.imshow('img',img)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
| [
"spell1612@gmail.com"
] | spell1612@gmail.com |
c0d4a1418c35fa87353788017933553e0217d445 | 0a1a0b8034d3f6cfc39dd756f0bdb1981ef0a990 | /main.py | 8edc314eef8c22d308a7d004e1b9b5d0d9f900a1 | [] | no_license | dirac292/Password_Manager | e4638c4b902aaa7fb103c32a85812a0ccc9bd017 | 49e11121dab1d8f50e8283dc553e2f3c82bb9de7 | refs/heads/main | 2023-06-26T15:20:13.937690 | 2021-07-20T11:41:38 | 2021-07-20T11:41:38 | 368,448,145 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,689 | py | import json
import re
import base64
import random
import os
from cryptography.fernet import Fernet
from tkinter import *
from tkinter import messagebox
from tkinter import simpledialog
from tkinter import ttk
from ttkthemes import ThemedTk
import pyperclip
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
import os.path
import time
from threading import Event
import hash
import pass_check
import csv
# import matplotlib
# matplotlib.use('Agg')
# ---------------------------- PASSWORD AND KEY GENERATOR ------------------------------- #
# if os.environ.get('DISPLAY', '') == '':
# print('no display found. Using :0.0')
# os.environ.__setitem__('DISPLAY', ':0.0')
autocompleteList = []
with open('user.csv', 'r') as f:
file = csv.DictReader(f)
autocompleteList = []
for col in file:
autocompleteList.append(col['Username'])
def gen_key(master_pass):
password = master_pass.encode()
mysalt = b'b9\xcc\x8d_B\xdd\xe9@.\xcf\xb1;\xac\x8f\xac'
kdf = PBKDF2HMAC(
algorithm=hashes.SHA256,
length=32,
salt=mysalt,
iterations=100000,
backend=default_backend()
)
key = base64.urlsafe_b64encode(kdf.derive(password))
return key
def gen_pass():
letters = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v',
'w', 'x', 'y', 'z', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R',
'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']
numbers = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
symbols = ['!', '#', '$', '%', '&', '(', ')', '*', '+']
pass_let = [random.choice(letters) for _ in range(random.randint(8, 10))]
pass_sym = [random.choice(symbols) for _ in range(random.randint(2, 4))]
pass_num = [random.choice(numbers) for _ in range(random.randint(2, 4))]
pass_list = pass_let+pass_sym+pass_num
random.shuffle(pass_list)
password = "".join(pass_list)
password_entry.insert(0, password)
pyperclip.copy(password)
def reset_pass():
if os.path.isfile("encrypted_data.txt"):
os.remove("encrypted_data.txt")
if os.path.isfile('data.json'):
os.remove("data.json")
messagebox.showinfo(
title="Prompt", message="All Password directories cleared.")
else:
messagebox.showinfo(
title="Prompt", message="Directories already empty.")
# ---------------------------- SAVE PASSWORD ------------------------------- #
def del_f():
if messagebox.askokcancel("Prompt", "Are you sure you want to delte the file?"):
if os.path.isfile('data.json'):
os.remove("data.json")
messagebox.showinfo(
title="Prompt", message="File Successfuly Removed.")
else:
messagebox.showinfo(
title="Prompt", message="File is not on the system.")
def new_del():
if os.path.isfile('data.json'):
os.remove("data.json")
def encryp(key):
cipher = Fernet(key)
with open("data.json", 'rb') as f:
e_file = f.read()
encrypted_file = cipher.encrypt(e_file)
with open("encrypted_data.txt", 'wb') as ef:
ef.write(encrypted_file)
print("written")
os.remove("data.json")
def decryp(key):
cipher = Fernet(key)
with open('encrypted_data.txt', 'rb') as df:
encrypted_data = df.read()
decrypted_file = cipher.decrypt(encrypted_data)
with open('data.json', 'wb') as df:
df.write(decrypted_file)
# def view_txt():
# # some code to time
# if os.path.isfile("encrypted_data.txt"):
# master_pass = simpledialog.askstring(
# title='Test', prompt='Enter the master password?', show="*")
# if hash.check_pass(master_pass):
# key = gen_key(master_pass)
# decryp(key)
# messagebox.showinfo(
# title="Prompt", message="File will be delted in a minute automatically.")
# os.system("start " + "data.json")
# else:
# messagebox.showinfo(title="Oops", message="Check password again")
# else:
# messagebox.showinfo(
# title="Prompt", message="Password Directory Empty.")
# window.after(60000, new_del)
# time.sleep(25)
# new_del()
# ---------------------------- AUTO COMPLETE FEATURE ------------------------------- #
# Class Autocomplete Code Credits: uroshekic https: // gist.github.com/uroshekic/11078820 #
class AutocompleteEntry(ttk.Entry):
def __init__(self, autocompleteList, *args, **kwargs):
# Listbox length
if 'listboxLength' in kwargs:
self.listboxLength = kwargs['listboxLength']
del kwargs['listboxLength']
else:
self.listboxLength = 8
# Custom matches function
if 'matchesFunction' in kwargs:
self.matchesFunction = kwargs['matchesFunction']
del kwargs['matchesFunction']
else:
def matches(fieldValue, acListEntry):
pattern = re.compile(
'.*' + re.escape(fieldValue) + '.*', re.IGNORECASE)
return re.match(pattern, acListEntry)
self.matchesFunction = matches
ttk.Entry.__init__(self, *args, **kwargs)
self.focus()
self.autocompleteList = autocompleteList
self.var = self["textvariable"]
if self.var == '':
self.var = self["textvariable"] = StringVar()
self.var.trace('w', self.changed)
self.bind("<Right>", self.selection)
self.bind("<Up>", self.moveUp)
self.bind("<Down>", self.moveDown)
self.listboxUp = False
def changed(self, name, index, mode):
if self.var.get() == '':
if self.listboxUp:
self.listbox.destroy()
self.listboxUp = False
else:
words = self.comparison()
if words:
if not self.listboxUp:
self.listbox = Listbox(
width=self["width"], height=self.listboxLength)
self.listbox.bind("<Button-1>", self.selection)
self.listbox.bind("<Right>", self.selection)
self.listbox.place(
x=self.winfo_x(), y=self.winfo_y() + self.winfo_height())
self.listboxUp = True
self.listbox.delete(0, END)
for w in words:
self.listbox.insert(END, w)
else:
if self.listboxUp:
self.listbox.destroy()
self.listboxUp = False
def selection(self, event):
if self.listboxUp:
self.var.set(self.listbox.get(ACTIVE))
self.listbox.destroy()
self.listboxUp = False
self.icursor(END)
def moveUp(self, event):
if self.listboxUp:
if self.listbox.curselection() == ():
index = '0'
else:
index = self.listbox.curselection()[0]
if index != '0':
self.listbox.selection_clear(first=index)
index = str(int(index) - 1)
self.listbox.see(index) # Scroll!
self.listbox.selection_set(first=index)
self.listbox.activate(index)
def moveDown(self, event):
if self.listboxUp:
if self.listbox.curselection() == ():
index = '0'
else:
index = self.listbox.curselection()[0]
if index != END:
self.listbox.selection_clear(first=index)
index = str(int(index) + 1)
self.listbox.see(index) # Scroll!
self.listbox.selection_set(first=index)
self.listbox.activate(index)
def comparison(self):
return [w for w in self.autocompleteList if self.matchesFunction(self.var.get(), w)]
# autocompleteList = ['Gmail', 'YouTube', 'Facebook',
# 'Zoom', 'Reddit', 'Netflix', 'Microsoft', 'Amazon', 'Instagram', 'Google', 'Twitch', 'Twitter', 'Apple Inc', 'Adobe', 'Linkedin',
# 'Hotstar', 'Quora', 'Dropbox']
def matches(fieldValue, acListEntry):
pattern = re.compile(re.escape(fieldValue) + '.*', re.IGNORECASE)
return re.match(pattern, acListEntry)
def is_present(arr, entry):
for i in arr:
if i == entry:
return True
return False
def save_pass():
global autocompleteList
website = website_entry.get()
row = [f'{website}']
if(not (is_present(autocompleteList, website))):
autocompleteList.append(website)
with open('user.csv', 'a', newline='') as f:
writer = csv.writer(f)
writer.writerow(row)
email = email_entry.get()
password = password_entry.get()
count = pass_check.pwned_api_check(password)
question = 0
if count:
question = messagebox.askquestion(
title="Warning", message=f"This password was found {count} times.Do you wish to proceed ?")
new_data = {
website: {
"username": email,
"password": password,
}
}
if len(website) == 0 or len(password) == 0:
messagebox.showinfo(
title="Oops", message="Please don't leave any fields empty!")
elif question == 'yes' or count == 0:
is_ok = messagebox.askokcancel(title=f"{website}", message=f"These are the details entered:\n Email: {email}"
f"\n Password: {password} \n Is it ok to save?")
if is_ok:
master_pass = simpledialog.askstring(
title='Test', prompt='Enter the master password?', show="*")
# Sample Password For now
if hash.check_pass(master_pass):
key = gen_key(master_pass)
if os.path.isfile('encrypted_data.txt'):
decryp(key)
if (not os.path.isfile('data.json')):
with open("data.json", "w") as data_file:
json.dump(new_data, data_file, indent=4)
website_entry.delete(0, END)
password_entry.delete(0, END)
encryp(key)
else:
with open("data.json", 'r') as data_file:
data = json.load(data_file)
data.update(new_data)
with open("data.json", "w") as data_file:
json.dump(data, data_file, indent=4)
website_entry.delete(0, END)
password_entry.delete(0, END)
encryp(key)
else:
messagebox.showinfo(
title="Oops", message="Check password again")
def on_closing():
if messagebox.askokcancel("Quit", "Do you want to quit?"):
window.destroy()
new_del()
def create_login(create_pass):
# os.remove('encrypted_data.txt')
hash.create_pass(create_pass)
messagebox.showinfo(
title="Prompt", message="Password Sucessfuly Created")
def searchpass():
website = website_entry.get()
print(website)
if(website == ""):
messagebox.showinfo(title="Prompt",
message="Please Don't leave the website entry empty.")
else:
if os.path.isfile("encrypted_data.txt"):
master_pass = simpledialog.askstring(
title='Prompt', prompt='Enter the master password?', show="*")
if hash.check_pass(master_pass):
key = gen_key(master_pass)
decryp(key)
with open("data.json") as data_file:
data = json.load(data_file)
if website in data:
email = data[website]["username"]
password = data[website]["password"]
pyperclip.copy(password)
messagebox.showinfo(title=website,
message=f"Username: {email}\nPassword: {password}\nCopied to Clipboard")
else:
messagebox.showinfo(
title="Error Occured", message=f"No Username or Password Found for the {website}.")
else:
messagebox.showinfo(
title="Oops", message="Check password again")
else:
messagebox.showinfo(
title="Prompt", message="Password Directory Empty.")
window.after(60000, new_del)
start = time.time()
incorrect_tries = 0
window = ThemedTk(theme="arc")
style = ttk.Style(window)
style.theme_use("xpnative")
# window.get_themes()
# window.set_theme("clearlooks")
window.title("Password Manager")
window.iconbitmap(r'padlock.ico')
window.state("zoomed")
window.geometry("1000x1000")
back_image = PhotoImage(file="new.png")
if(not os.path.isfile("hashed_pass.txt")):
master_pass = simpledialog.askstring(
title='Register', prompt='Create Master Password', show="*")
create_login(master_pass)
master_pass = simpledialog.askstring(
title='Test', prompt='Enter the master password?', show="*", parent=window)
if(master_pass == None):
messagebox.showinfo(
title="Prompt", message="Error Occured")
window.destroy()
exit()
# ---------------------------- UI SETUP ------------------------------- #
while(incorrect_tries <= 2):
if hash.check_pass(master_pass):
canvas = Canvas(window, width=1000, height=1000)
canvas.pack(fill="both", expand=True)
canvas.create_image(0, 0, image=back_image, anchor="nw")
canvas.create_text(500, 150, text="ManagePass",
font=("Helvetica", 45), fill="white")
canvas.create_text(500, 250, text="Website: ",
font=("Helvetica"), fill="white")
canvas.create_text(500, 280, text="Username:",
font=("Helvetica"), fill="white")
canvas.create_text(500, 310, text="Password:",
font=("Helvetica"), fill="white")
# # Entries
website_entry = AutocompleteEntry(
autocompleteList, window, listboxLength=6, width=35, matchesFunction=matches)
website_entry_window = canvas.create_window(
550, 240, anchor="nw", window=website_entry)
email_entry = ttk.Entry(window, width=35)
email_entry_window = canvas.create_window(
550, 270, anchor="nw", window=email_entry)
password_entry = ttk.Entry(window, width=21, show="*")
password_entry_window = canvas.create_window(
550, 300, anchor="nw", window=password_entry)
search_pass = ttk.Button(window, text="Search", command=searchpass)
search_pass_window = canvas.create_window(
780, 240, anchor="nw", window=search_pass)
gen_pass = ttk.Button(
window, text="Generate Password", command=gen_pass)
gen_pass_window = canvas.create_window(
700, 300, anchor="nw", window=gen_pass)
add_button = ttk. Button(
window, text="Add", width=35, command=save_pass)
add_button_window = canvas.create_window(
550, 330, anchor="nw", window=add_button)
clear_button = ttk.Button(window, text="Reset",
width=35, command=reset_pass)
clear_button_window = canvas.create_window(
550, 360, anchor="nw", window=clear_button)
# view_pass = ttk.Button(
# window, text="View Password", width=35, command=view_txt)
# view_pass_window = canvas.create_window(
# 550, 360, anchor="nw", window=view_pass)
# del_file = ttk.Button(window, text="Delete File",
# width=35, command=del_f)
# del_file_window = canvas.create_window(
# 550, 420, anchor="nw", window=del_file)
window.protocol("WM_DELETE_WINDOW", on_closing)
window.mainloop()
break
else:
incorrect_tries += 1
master_pass = simpledialog.askstring(
title='Prompt', prompt='Password Incorrect.Enter the master password again?', show="*")
if (incorrect_tries > 2):
messagebox.showinfo(
title="Warning", message="Incorrect Password entered 3 times")
window.destroy()
| [
"noreply@github.com"
] | noreply@github.com |
d93a39b389a01a60c09bd2155e8c4f7003ac6379 | 67c849d4de48ed2ced0de758d2f19fb1b86663f9 | /ledcontroller.py | cd3222b566e8d10777b199e68dc864b9a9fe0b42 | [] | no_license | s-dasa/led-proj | 66fdb84e45f8bb1fea3bc767a35d3ef2e43fda1a | 465a547a4c167ee1f4c515931b1a4666c812f0d5 | refs/heads/master | 2020-06-12T14:39:05.404403 | 2019-07-04T20:01:44 | 2019-07-04T20:01:44 | 194,332,664 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,855 | py | from gpiozero import *
import RPi.GPIO as GPIO
from time import sleep
from led_database import *
#from tester import *
class controller():
def __init__(self):
self.led_obj = led_class()
#self.my_tester = testing()
self.color = ""
self.times = 0
self.rate = 1
self.led_obj.setUp()
self= self
self.speed = 0
def userInput(self):
self.color = input("Enter your desired color (from ROYGBIV and White) or its RGB value in the format: [R, G, B].")
self.times
int(input("How many times do you want it to blink?"))
self.speed = int(input("And how fast do you want it to blink"))
if (isinstance(self.color, list) and self.led_obj.checkList(self.color)):
#print("it's a list")
self.color = self.led_obj.getColor(self.color)
print(self.color)
elif (isinstance(self.color, str) and self.led_obj.checkList(self.color)):
self.color=self.color.lower()
#print("It's a string")
else:
print("That's not a valid input!")
userInput()
def lightUp(self):
self.led_obj.setUp()
self.led_obj.lightUp(self.color)
#self.my_tester.powerOn(self.color)
def execution(self):
rate = 1/self.speed
#print(self.led_obj.getPin(self.color))
led = LED(self.led_obj.getPin(self.color))
for x in range (self.times):
sleep(rate)
led.on()
sleep(rate)
led.off()
again = input("Again? Y or N")
if (again == "Y"):
userInput(self)
else: print("Thank you for using the LedController. Have a nice day")
"""
def main(self):
userInput(self)
execution(self)
main(self)
"""
#ontrol = controller()
c = controller()
| [
"noreply@github.com"
] | noreply@github.com |
2eefca90d0978c3e073c76b1c622a1f5013e263f | e4f42366983ee0e08e22dfd305f1b161ca1173c4 | /DesignPattern/proxy.py | cd0e860db878b04004fe232f37141dc402220f6e | [] | no_license | GhostZCH/python-examples | c949aba3d07993164474d21ebec902be6ad9a99a | 383384c2769ccd1bf470c62c790236331d2dae37 | refs/heads/master | 2021-10-01T14:19:26.388354 | 2018-11-27T00:01:37 | 2018-11-27T00:01:37 | 40,755,376 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 590 | py | class FactorialProxy:
def __init__(self, n):
self.n = n
def factorial(self):
'real computing'
result = 1
for i in range(self.n):
result *= i + 1
return result
if __name__ == '__main__':
item_list = ['100! = ', FactorialProxy(100),';' , '200! = ', FactorialProxy(200), '.'] # init but not execute
string = ''
for item in item_list:
if isinstance(item, FactorialProxy):
string += str(item.factorial()) # real execute
else:
string += item
print(string) | [
"noreply@github.com"
] | noreply@github.com |
eff8eacaddbcb5f0b247e7725566f85fd0320b10 | 4f7bb02e4fcba432f673615886e80dafdabb6942 | /munges/creature_heatmap2_standard.py | 6e84607bfd3ea2cdc70ed68b3da62354d0e42bf7 | [] | no_license | robbintt/mtg-data-processing | ec0e31676d7785b51e5ce4bc1e697fd9cceec1ed | c2d386d0709e199bd43b9b255507ffea5b62d9e4 | refs/heads/master | 2016-09-10T17:16:29.582837 | 2015-07-20T06:44:10 | 2015-07-20T06:44:10 | 34,285,811 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,474 | py | """
Gather all the ability text for a provided sqlite db and generate a
word frequency dict for it.
"""
import sqlite3
import re
import utils.wordfreq
import copy
# This uses a symlink, may break in non posix.
# use os.path.join here if you wish.
sample_db = "../db/standard.sqlite"
def clean_text(dirtytext):
""" Apply various cleaning to the text.
This function was used when cleaning bizarre characters out of the ability text.
There may be information lost if you use this function. KNOW YOUR DATABASE.
"""
allowed_chars = r"[^a-zA-Z0-9'/{}+-]"
repl_char = ' '
cleantext = re.sub(allowed_chars, repl_char, dirtytext)
# lowercase only
cleantext = cleantext.lower()
return cleantext
"""
# ditch the tuple wrapping in each item of the list.
# utils.wordfreq.convert_to_frequency_dict requires a list of strings.
ability_strings = list()
for tuple_wrapped_string in ability_list:
ability_strings.append(clean_text(tuple_wrapped_string[0].encode('utf-8')))
"""
"""
ability_freq_dict = utils.wordfreq.convert_to_frequency_dict(ability_strings)
sorted_ability_words = sorted([(v,k) for k,v in ability_freq_dict.iteritems()])
for c,i in reversed(sorted_ability_words):
print c, i
"""
def try_int(v):
""" Convert to an int and return input on ValueError.
"""
try:
v = int(v)
except ValueError:
pass
return v
statement = "select Nname,Nconverted_manacost,Npower,Ntoughness from Ncards where Npower<>'' or Ntoughness<>''"
conn = sqlite3.connect(sample_db)
cur = conn.cursor()
cur.execute(statement)
fetched_cards = cur.fetchall()
names, cmc, powers, toughnesses = zip(*fetched_cards)
names = list(names)
# Map to int if possible with try_int
cmc = map(try_int, cmc)
powers = map(try_int, powers)
toughnesses = map(try_int, toughnesses)
# Test if all card names are unique.
names_test = copy.deepcopy(names)
while len(names_test) > 0:
name = names_test.pop()
if name in names_test:
print("Names has a nonunique: {}".format(name))
print("All card names are unique, {} cards.".format(len(names)))
creature_size = zip(cmc, powers, toughnesses)
print sorted(creature_size)
# Clean out non-ints.
bad_entries = list()
for i in range(len(creature_size)):
try:
int(creature_size[i][0])
int(creature_size[i][1])
int(creature_size[i][2])
except:
bad_entries.append(i)
print("Unfiltered creature size: {}".format(len(creature_size)))
# Del from top to bottom to preserve the index.
# This could have been done above but was too clever for good code.
print bad_entries
for i in reversed(bad_entries):
del creature_size[i]
print("Static-sized creature size: {}".format(len(creature_size)))
from matplotlib import pyplot as plt
from matplotlib import cm
import numpy as np
# Rudimentary sort and organization of relevant data.
cmc, powers, toughnesses = zip(*sorted(creature_size))
legend = ["cmc", "power", "toughness"]
xlabel = "cmc"
ylabel = "power"
#plt.legend(legend)
#plt.xlabel(xlabel)
#plt.ylabel(ylabel)
# clear plot
plt.clf()
bins = (max(powers), max(toughnesses))
heatmap, xedges, yedges = np.histogram2d(powers, toughnesses, bins=bins)
extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]]
plt.axis([min(powers), max(powers), min(toughnesses), max(toughnesses)])
plt.imshow(heatmap, extent=extent, origin="lower", cmap=cm.get_cmap('spectral'))
cb = plt.colorbar()
cb.set_label('mean value')
plt.show()
| [
"robbintt@gmail.com"
] | robbintt@gmail.com |
fb50b6df0ff5b04d2d8eec90f3141572a7c00c53 | 822772bb8718e315658e6f9a6b3628d839f852a3 | /venv/bin/pyi-makespec | 40dfbe18d0f42732935edf35d2503912a75060e0 | [] | no_license | tuhindewan/EDiaries | dff23ce72081fcfafc0b076cf733ba5e9427d319 | 2753be83c30379e1abd21e631a4ad1297a060681 | refs/heads/master | 2022-11-23T14:23:14.381225 | 2020-07-30T06:55:35 | 2020-07-30T06:55:35 | 277,780,141 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 419 | #!/home/tuhin/EDiaries/venv/bin/python3
# EASY-INSTALL-ENTRY-SCRIPT: 'PyInstaller==3.6','console_scripts','pyi-makespec'
__requires__ = 'PyInstaller==3.6'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('PyInstaller==3.6', 'console_scripts', 'pyi-makespec')()
)
| [
"tuhinsshadow@gmail.com"
] | tuhinsshadow@gmail.com | |
c483568c2315f978744b6101a583b3a86f7d31ca | 8e6e3f7fc065548cb25825632c49d83964bf9f30 | /Network/TelnetApplication.py | 49defd5da817e2f396c626560ae3413f2b1f7161 | [] | no_license | raviwithu/Scripts | 1d13c9f368ed9ab966fda434d022acd9f71d3f1d | cba34cdceee121ce696bc1b30faf19a1fc126eda | refs/heads/master | 2021-01-22T02:13:18.252626 | 2018-05-03T01:53:16 | 2018-05-03T01:53:16 | 92,339,767 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 831 | py |
def open_telnet_conn(ip):
try:
connection = telnetlib.Telnet(ip, 23, 5)
output = connection.read_until("name:", READ_TIMEOUT)
connection.write(username + "\n")
output = connection.read_until("word:", READ_TIMEOUT)
connection.write(password + "\n")
time.sleep(1)
connection.write("\n")
connection.write("configure terminal\n")
time.sleep(1)
selected_cmd_file = open(cmd_file, 'r')
selected_cmd_file.seek(0)
for each_line in selected_cmd_file.readlines():
connection.write(each_line + '\n')
time.sleep(1)
selected_cmd_file.close()
connection.close()
except IOError:
print "Input parameter error! Please check username, password and file name."
open_telnet_conn(ip)
| [
"miravishankar@yahoo.co.in"
] | miravishankar@yahoo.co.in |
ac7e69a00d6ae49d7a169955c6ba1228e4f0a064 | 85466f270dd3ebef93a80db7b6fe5227dd504c81 | /lstm/nn-trajectory-prediction/train.py | af7824ca9f42e5a2631f4c680319950058e3e0f0 | [] | no_license | vineetsk1/cs231a-project | e5bc20e713a58bbc42677bd2d1c6487ef06a60f1 | f6adc7733144502fa891cabf21392a51534de306 | refs/heads/master | 2021-03-24T12:31:24.626838 | 2018-03-22T22:08:23 | 2018-03-22T22:08:23 | 122,318,431 | 10 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,200 | py | #training script for neural nets
import numpy as np
from pooling_gru import PoolingGRU
from baseline.naive_gru import NaiveGRU
from utils import *
import pdb
import argparse
import math
_id = 0
_position = 1
_class = 2
#hyperparameters
__INPUT_DIM = 2
__OUTPUT_DIM = 2
__HIDDEN_DIM = 128
__NUM_EPOCHS = 2 # 100
__LEARNING_RATE = 0.003
__POOLING_SIZE = 20
__NUM_SCENES = 4
classes = ["Pedestrian", "Biker", "Skater", "Cart"]
def map_tensor_index(pos, ref_pos):
x = math.ceil((pos[0] - ref_pos[0])/8) + 9
y = math.ceil((pos[1] - ref_pos[1])/8) + 9
return (int(x),int(y))
def pool_hidden_states(member_id, position, hidden_states):
pooled_tensor = [[[0] * __HIDDEN_DIM] * __POOLING_SIZE] * __POOLING_SIZE
bound = __POOLING_SIZE * 8 / 2
window_limits_upper_bound = (position[0] + bound, position[1] + bound)
window_limits_lower_bound = (position[0] - bound, position[1] - bound)
for ID in hidden_states:
if ID != member_id:
pos = hidden_states[ID][0]
within_upper_bound = (pos[0] <= window_limits_upper_bound[0]) and (pos[1] <= window_limits_upper_bound[1])
within_lower_bound = (pos[0] > window_limits_lower_bound[0]) and (pos[1] > window_limits_lower_bound[1])
if within_upper_bound and within_lower_bound:
x,y = map_tensor_index(pos, position)
pooled_tensor[x][y] = hidden_states[ID][1]
return pooled_tensor
def step_through_scene(models, scene, learning_rates, epoch, num_epochs, calculate_loss):
outlay_dict = scene[0]
class_dict = scene[1]
path_dict = scene[2]
frames = outlay_dict.keys()
frames = sorted(frames)
cost = {c: [] for c in classes}
prev_hidden_states = {}
pooled_tensors = {}
for frame in frames:
print "EPOCH {} / {} : FRAME {} / {}".format(epoch+1, num_epochs, frame, frames[-1])
frame_occupants = outlay_dict[frame].keys()
hidden_states = {}
for occupant in frame_occupants:
if occupant not in pooled_tensors:
pooled_tensors[occupant] = []
#pool tensors
position = outlay_dict[frame][occupant]
c = class_dict[occupant]
pooled_tensor = pool_hidden_states(occupant, position, hidden_states)
pooled_tensors[occupant].append(pooled_tensor)
h = prev_hidden_states[occupant][1] if occupant in prev_hidden_states else [0] * __HIDDEN_DIM
ns, nh = models[c].time_step(position, pooled_tensor, h)
hidden_states[occupant] = (position, nh.tolist())
path = path_dict[frame][occupant]
if len(path) > 18:
y = path[-1]
x = path[-19:-1]
H = pooled_tensors[occupant][-18:]
if calculate_loss:
cost[c].append(models[c].loss(x, H, y))
else:
models[c].sgd_step(x, H, y, learning_rates[c])
prev_hidden_states = hidden_states
if calculate_loss:
return {c: sum(cost[c])/len(cost[c]) for c in cost}
def train_with_pooling(models, num_scenes, learning_rates, num_epochs, evaluate_loss_after=5):
prev_cost = {c: float("inf") for c in classes}
for epoch in range(num_epochs):
cost = {c: 0 for c in classes}
for s in range(num_scenes):
scene = load_processed_scene(s)
if (epoch + 1) % evaluate_loss_after == 0:
cost_update = step_through_scene(models, scene, learning_rates, epoch, num_epochs, True)
cost = {c : cost[c] + cost_update[c] for c in cost}
if (s+1) == num_scenes:
for c in cost:
print "{} COST : {}".format(c, cost[c])
if cost[c] > prev_cost[c]:
learning_rates[c] *= 0.5
print "LEARNING RATE FOR {} WAS HALVED".format(c)
prev_cost = cost
step_through_scene(models, scene, learning_rates, epoch, num_epochs, False)
for c in models:
save_model(models[c], c, True)
def train_naively(model, num_scenes, learning_rate, num_epochs, category, evaluate_loss_after=1): #5):
last_cost = float("inf")
for epoch in range(num_epochs):
print "EPOCH: {} /{}".format(epoch+1, num_epochs)
cost = 0
for s in range(num_scenes):
x_train, y_train = load_training_set(s, category)
print "SCENE: {} /{}".format(s+1, num_scenes)
if ((epoch+1) % evaluate_loss_after == 0):
cost += model.cost(x_train, y_train)
if (s+1) == num_scenes:
print("CURRENT COST IS {}".format(cost))
if (cost > last_cost):
learning_rate = learning_rate * 0.5
print "Learning rate was halved to {}".format(learning_rate)
last_cost = cost
for example in range(len(y_train)):
model.sgd_step(x_train[example], y_train[example], learning_rate)
save_model(model, category, False)
parser = argparse.ArgumentParser(description='Pick Training Mode.')
parser.add_argument('mode', type=str, nargs=1, help="which mode to use for training? either 'pooling' or 'naive'")
mode = parser.parse_args().mode[-1]
if mode == "pooling":
print 'creating models'
models = {label: PoolingGRU(__INPUT_DIM, __OUTPUT_DIM, __POOLING_SIZE, __HIDDEN_DIM) for label in classes}
learning_rates = {model : __LEARNING_RATE for model in classes}
train_with_pooling(models, __NUM_SCENES, learning_rates, __NUM_EPOCHS)
elif mode == "naive":
print 'creating model'
model = NaiveGRU(__INPUT_DIM, __OUTPUT_DIM, __HIDDEN_DIM)
CLASS = "Biker"
train_naively(model, __NUM_SCENES, __LEARNING_RATE, __NUM_EPOCHS, CLASS)
else:
print("enter a valid mode: either 'pooling' or 'naive'")
| [
"vineetsk1@gmail.com"
] | vineetsk1@gmail.com |
f040059ed129aa620990f466c803b2a2a026b103 | 00a3f91db1e0bd349a0a120d8980429363446d67 | /api/migrations/0004_merge_20180805_1637.py | a0c874598dea406d580c4e4f2a4cf32728b0e65b | [] | no_license | junkluis/cenecu_web | 59130894d0584479b352fcd7a119aa2c6185a5e5 | 078b59308a93e40514b63c130c4506f98f929be4 | refs/heads/master | 2020-03-25T09:08:02.327198 | 2018-08-05T21:42:21 | 2018-08-05T21:42:21 | 143,649,219 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 330 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2018-08-05 21:37
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0003_auto_20180730_0141'),
('api', '0003_auto_20180729_2237'),
]
operations = [
]
| [
"lufezuro@gmail.com"
] | lufezuro@gmail.com |
a8744d0bca6c674118b972ff9c176d33cf41a3a3 | 4207ab9e68a139a0dd9cc2c8ed2c6166946d4e54 | /Py/cutsTest.py | ffac1bc92e75a920bb020947ab3117e1697b715e | [] | no_license | ThenukaDK/Windguard | da869038785e0504176118b6b3208f484c4b389a | 4c600f7769b85767524cd7b7369317702894d61f | refs/heads/master | 2021-06-06T22:18:56.162397 | 2016-08-25T16:08:47 | 2016-08-25T16:08:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 675 | py | import os
import cv2
import numpy as np
cut_count = 0
i = 0
cut_cascade = cv2.CascadeClassifier("stage11.xml")
for filename in os.listdir("../img/imagemap"):
if filename.endswith("jpg"):
img = cv2.imread("../img/imagemap/"+filename)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
cuts = cut_cascade.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in cuts:
cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
cut_count = cut_count + 1
cv2.imwrite("../img/detected_images/image"+str(i)+".jpg",img)
i = i+1
cv2.waitKey(0)
| [
"thenukaa@gmail.com"
] | thenukaa@gmail.com |
125f3eca8985b6337481c57ecb01d50d4d12cd2f | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_25145.py | cc20ebccbcceb580e0202cead14e2afc85781e87 | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 59 | py | # How to find / import the win32security in python?
import
| [
"ubuntu@ip-172-31-7-228.us-west-2.compute.internal"
] | ubuntu@ip-172-31-7-228.us-west-2.compute.internal |
6d3560c2179b9703d5955c377a9989b5a7a236c0 | 34d88082307281333ef4aeeec012a3ff5f8ec06e | /w3resource/basic/Q098.py | c3ed4f05918371d4d44df3cbbb34edc91b0fb439 | [] | no_license | JKChang2015/Python | a6f8b56fa3f9943682470ae57e5ad3266feb47a7 | adf3173263418aee5d32f96b9ea3bf416c43cc7b | refs/heads/master | 2022-12-12T12:24:48.682712 | 2021-07-30T22:27:41 | 2021-07-30T22:27:41 | 80,747,432 | 1 | 8 | null | 2022-12-08T04:32:06 | 2017-02-02T17:05:19 | HTML | UTF-8 | Python | false | false | 148 | py | # -*- coding: UTF-8 -*-
# Q098
# Created by JKChang
# Wed, 31/05/2017, 15:34
# Tag:
# Description: Write a Python program to get the system time.
| [
"jkchang2015@gmail.com"
] | jkchang2015@gmail.com |
894482ee3334014d91285e7f29af8f4772c1e0bf | dd80a584130ef1a0333429ba76c1cee0eb40df73 | /external/chromium_org/tools/cr/main.py | dced8cd4069ceea9d47ee5b9b17ca6fc164b8c81 | [
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"GPL-2.0-only",
"Apache-2.0",
"LicenseRef-scancode-unknown"
] | permissive | karunmatharu/Android-4.4-Pay-by-Data | 466f4e169ede13c5835424c78e8c30ce58f885c1 | fcb778e92d4aad525ef7a995660580f948d40bc9 | refs/heads/master | 2021-03-24T13:33:01.721868 | 2017-02-18T17:48:49 | 2017-02-18T17:48:49 | 81,847,777 | 0 | 2 | MIT | 2020-03-09T00:02:12 | 2017-02-13T16:47:00 | null | UTF-8 | Python | false | false | 3,092 | py | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Chromium cr tool main module.
Holds the main function and all it's support code.
"""
import os
import sys
import cr
import cr.auto.user
import cr.autocomplete
import cr.loader
_CONTACT = 'iancottrell@chromium.org'
def Main():
"""Chromium cr tool main function.
This is the main entry point of the cr tool, it finds and loads all the
plugins, creates the context and then activates and runs the specified
command.
"""
# Add the users plugin dir to the cr.auto.user package scan
user_path = os.path.expanduser(os.path.join('~', '.config', 'cr'))
cr.auto.user.__path__.append(user_path)
cr.loader.Scan()
# Build the command context
context = cr.Context(
description='The chrome dev build tool.',
epilog='Contact ' + _CONTACT + ' if you have issues with this tool.',
)
# Install the sub-commands
for command in cr.Command.Plugins():
context.AddSubParser(command)
# test for the special autocomplete command
if context.autocompleting:
# After plugins are loaded so pylint: disable=g-import-not-at-top
cr.autocomplete.Complete(context)
return
# Speculative argument processing to add config specific args
context.ParseArgs(True)
cr.plugin.Activate(context)
# At this point we should know what command we are going to use
command = cr.Command.GetActivePlugin(context)
# Do some early processing, in case it changes the build dir
if command:
command.EarlyArgProcessing(context)
# Update the activated set again, in case the early processing changed it
cr.plugin.Activate(context)
# Load the build specific configuration
found_build_dir = cr.base.client.LoadConfig(context)
# Final processing or arguments
context.ParseArgs()
cr.plugin.Activate(context)
# If we did not get a command before, it might have been fixed.
if command is None:
command = cr.Command.GetActivePlugin(context)
# If the verbosity level is 3 or greater, then print the environment here
if context.verbose >= 3:
context.DumpValues(context.verbose > 3)
if command is None:
print context.Substitute('No command specified.')
exit(1)
if command.requires_build_dir:
if not found_build_dir:
if not context.Find('CR_OUT_FULL'):
print context.Substitute(
'No build directory specified. Please use cr init to make one.')
else:
print context.Substitute(
'Build {CR_BUILD_DIR} not a valid build directory')
exit(1)
if context.Find('CR_VERSION') != cr.base.client.VERSION:
print context.Substitute(
'Build {CR_BUILD_DIR} is for the wrong version of cr')
print 'Please run cr init to reset it'
exit(1)
cr.Platform.Prepare(context)
if context.verbose >= 1:
print context.Substitute(
'Running cr ' + command.name + ' for {CR_BUILD_DIR}')
# Invoke the given command
command.Run(context)
if __name__ == '__main__':
sys.exit(Main())
| [
"karun.matharu@gmail.com"
] | karun.matharu@gmail.com |
d79019f5641bb3c85f070279e42ae59776f382d0 | 350a592c614f533833e8a0ac9afa37cef6d4d03d | /python2/exercise6/ex6.py | cb4c09c56cdcbd009ad545388bfde21f76d2e542 | [] | no_license | jcclarke/learnpythonthehardwayJC | 7a53f15c9e1cda2f56ce064b6cc08cdc32278746 | 88d2a10d4c04468ae31e29e82f78454b52e5743e | refs/heads/master | 2023-01-19T01:52:01.067881 | 2020-11-28T06:28:19 | 2020-11-28T06:28:19 | 258,010,674 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 436 | py | #!/usr/bin/env python2
x = "There are %d types of people." % 10
binary = "binary"
do_not = "don't"
y = "Those who know %s and those who %s." % (binary, do_not)
print x
print y
print "I said: %r" % x
print "I also said: '%s'" % y
hilarious = False
joke_evaluation = "Isn't that joke so funny?! %r."
print joke_evaluation % hilarious
w = "This is the left side of..."
e = "a string with a right side."
print w + e
| [
"jeanclaudelclarke@gmail.com"
] | jeanclaudelclarke@gmail.com |
8c136c45800c0ff8c2165c758acbdb6f43f14c3f | 963b15e58ba6f769253af9ce80a83c2f003a5386 | /pose_estimation/for_middle_feature_vis.py | d30694848b96b5900bbd465bda096466893b5ade | [] | no_license | hsk9767/pose | aa3100b6315065732e56aedabeeea472370627a3 | 8f0981219423e298554f0216235ed350d896acbe | refs/heads/main | 2023-03-03T07:14:31.480532 | 2021-02-17T06:59:59 | 2021-02-17T06:59:59 | 339,636,770 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,658 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import pprint
import shutil
import torch
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
from tensorboardX import SummaryWriter
import _init_paths
from core.config import config
from core.config import update_config
from core.config import update_dir
from core.config import get_model_name
from core.loss import JointsMSELoss
from core.function import train
from core.function import validate
from utils.utils import get_optimizer
from utils.utils import save_checkpoint
from utils.utils import create_logger
import dataset
import models
from utils.vis import save_batch_image_with_joints_original_size, get_masked_image
import cv2
import numpy as np
def parse_args():
parser = argparse.ArgumentParser(description='Train keypoints network')
# general
parser.add_argument('--cfg',
help='experiment configure file name',
required=True,
type=str)
args, rest = parser.parse_known_args()
# update config
update_config(args.cfg)
# training
parser.add_argument('--frequent',
help='frequency of logging',
default=config.PRINT_FREQ,
type=int)
parser.add_argument('--gpus',
help='gpus',
type=str)
parser.add_argument('--workers',
help='num of dataloader workers',
type=int)
args = parser.parse_args()
return args
def reset_config(config, args):
if args.gpus:
config.GPUS = args.gpus
if args.workers:
config.WORKERS = args.workers
args = parse_args()
reset_config(config, args)
logger, final_output_dir, tb_log_dir = create_logger(
config, args.cfg, 'train')
cudnn.benchmark = config.CUDNN.BENCHMARK
torch.backends.cudnn.deterministic = config.CUDNN.DETERMINISTIC
torch.backends.cudnn.enabled = config.CUDNN.ENABLED
model = eval('models.'+config.MODEL.NAME+'.get_pose_net_practice')(
config, is_train=False
)
this_dir = os.path.dirname(__file__)
shutil.copy2(
os.path.join(this_dir, '../lib/models', config.MODEL.NAME + '.py'),
final_output_dir)
gpus = [int(i) for i in config.GPUS.split(',')]
model = torch.nn.DataParallel(model, device_ids=gpus).cuda()
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_dataset = eval('dataset.'+config.DATASET.DATASET)(
config,
config.DATASET.ROOT,
config.DATASET.TRAIN_SET,
True,
transforms.Compose([
transforms.ToTensor(),
normalize,
])
)
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=config.TRAIN.BATCH_SIZE*len(gpus),
shuffle=config.TRAIN.SHUFFLE,
num_workers=config.WORKERS,
pin_memory=True
)
## model inference start
criterion = JointsMSELoss(use_target_weight=config.LOSS.USE_TARGET_WEIGHT, use_gain_loss = config.LOSS.USE_GAIN_LOSS)
train_loader = iter(train_loader)
for i in range(10):
input, target, target_weight, meta = next(train_loader)
input = input.cuda()
x1, x2, x3, x4, x5, x6, x = model(input)
mean=(0.485, 0.456, 0.406)
std=(0.229, 0.224, 0.225)
dtype = input.dtype
mean = torch.as_tensor(mean, dtype=dtype, device=input.device)
std = torch.as_tensor(std, dtype=dtype, device=input.device)
if mean.ndim == 1:
mean = mean.view(-1, 1, 1)
if std.ndim == 1:
std = std.view(-1, 1, 1)
input = input[0].mul_(std).add_(mean).mul(255).clamp(0, 255).permute(1,2,0).byte().cpu().numpy()
x = x.sum(dim=1).sub(x.min())
x = x.div(x.max()).mul(255).clamp(0, 255).permute(1,2,0).byte().cpu().numpy()
x1 = x1.sum(dim=1).sub(x1.min())
x1 = x1.div(x1.max()).mul(255).clamp(0, 255).permute(1,2,0).byte().cpu().numpy()
x2 = x2.sum(dim=1).sub(x2.min())
x2 = x2.div(x2.max()).mul(255).clamp(0, 255).permute(1,2,0).byte().cpu().numpy()
x3 = x3.sum(dim=1).sub(x3.min())
x3 = x3.div(x3.max()).mul(255).clamp(0, 255).permute(1,2,0).byte().cpu().numpy()
x4 = x4.sum(dim=1).sub(x4.min())
x4 = x4.div(x4.max()).mul(255).clamp(0, 255).permute(1,2,0).byte().cpu().numpy()
x5 = x5.sum(dim=1).sub(x5.min())
x5 = x5.div(x5.max()).mul(255).clamp(0, 255).permute(1,2,0).byte().cpu().numpy()
x6 = x6.sum(dim=1).sub(x6.min())
x6 = x6.div(x6.max()).mul(255).clamp(0, 255).permute(1,2,0).byte().cpu().numpy()
pics = [input, x1, x2, x3, x4, x5, x6, x]
max_w = max(input.shape[1], x1.shape[1], x2.shape[1], x3.shape[1], x4.shape[1], x5.shape[1], x6.shape[1], x.shape[1])
max_h = max(input.shape[0], x1.shape[0], x2.shape[0], x3.shape[0], x4.shape[0], x5.shape[0], x6.shape[0], x.shape[0])
total_w = sum([input.shape[1], x1.shape[1], x2.shape[1], x3.shape[1], x4.shape[1], x5.shape[1], x6.shape[1], x.shape[1]])
total_h = sum([input.shape[0], x1.shape[0], x2.shape[0], x3.shape[0], x4.shape[0], x5.shape[0], x6.shape[0], x.shape[0]])
canvas = np.zeros(shape=(max_h, total_w, 3))
current_w = 0
for j in range(8):
w = pics[j].shape[1]
h = pics[j].shape[0]
canvas[:h, current_w:current_w+w, :] = pics[j]
current_w += w
#imwrite
cv2.imwrite(f'{i}_th_image.jpg', canvas) | [
"noreply@github.com"
] | noreply@github.com |
3c19759b050e380232f30dc447ee3888d2535db2 | 117d0dae50e628646c3603382e9c070a9f702cb6 | /AliExp_OrderListParser/AliOrder.py | d2fc758e395f72846ebd7acf481ec42741aa4262 | [] | no_license | kurtjcu/AliExpressHTMLOrderParser | 58c708c18c37161fb025b0e999618e5d367cecbd | 0e4d78a82fede17f883ecacf18fc6829832d4acb | refs/heads/master | 2021-05-07T18:03:29.236901 | 2017-10-30T04:45:02 | 2017-10-30T04:45:02 | 108,781,262 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 875 | py | #
#
# class for storing an order details
#
#
import time
from AliExp_OrderListParser.AliItem import AliItem
class AliOrder:
def __init__(self):
self.itemList = []
self.seller = "someones shop"
self.orderNum = 666
self.datetime = time.strptime('1979-05-16', "%Y-%m-%d")
self.orderAmount = 0
def setShipping(self):
totalItemsCost = 0
for item in self.itemList:
totalItemsCost += item.itemPrice * item.numUnits
if totalItemsCost == self.orderAmount:
return
else:
totalShipping = self.orderAmount - totalItemsCost
for item in self.itemList:
myRatio = (item.itemPrice * item.numUnits) / totalItemsCost
item.shippingCost = totalShipping * myRatio
#print(item.description + str(item.shippingCost)) | [
"Kurt@kurtsch.com.au"
] | Kurt@kurtsch.com.au |
bfa68a39b7b3efae040c6b9329a4a337d7d01419 | fa3f1e168efe701127c50a0aaa8d61c4757be286 | /python5problemset1read.py | f5b00453fca0f3fab4ca5bfa640f28f559390cce | [] | no_license | bryandngo/PFB2017_problemsets | b0ecdca6a6e05f074a5a6056a857963ca26220da | f57da8decaf0976d16a9a4786d678f1aa1ed0f1c | refs/heads/master | 2021-07-15T05:07:15.765951 | 2017-10-22T21:18:38 | 2017-10-22T21:18:38 | 107,168,894 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 202 | py | #!/usr/bin/env python3
file_object = open("/Users/admin/Python5ProblemSet/Python_05.txt","r")
contents = file_object.read()
print(contents)
file.close()
print("File Was Opened and Read 10-19-17")
| [
"admin@pfb16.cshl.edu"
] | admin@pfb16.cshl.edu |
2bfeb26827945a143da5f6f4a5afd5ac7c7c8f47 | 3dbe9d208e8fd03649fd8fbfeafd2f44e5d4165d | /bot.py | 8032c0bb5285d6c60064d19864f9ad7d240c3641 | [] | no_license | tylerlv3/project_poseidon | 37438fe2daae000ba79edc2a4a5b375434f0e8b6 | 56132f8acd5f889f94d8a8ad3b6db6531b9fd396 | refs/heads/master | 2020-05-01T00:50:40.531047 | 2019-03-22T17:02:56 | 2019-03-22T17:02:56 | 177,179,478 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,794 | py | import discord
from discord.ext import commands
import random
import asyncio
from urllib.request import urlopen as uReq
from bs4 import BeautifulSoup as soup
import youtube_dl
import time
TOKEN = 'NTU1NTQ5ODQ3ODgxNzc3MTUz.D2tHxA.DY7FV9ULgEQoH2oUpsxQ69Q7wbY'
extensions = ['rndfacts']
client = commands.Bot(command_prefix='.')
client.remove_command('help')
client.remove_command('play')
#web scraping
# Events
@client.event
async def on_member_join(member):
join_msg = 'Welcome to the server, we hope you enjoy! To use any commands please use the prefix "." before the command desired. For help type ".help"'
await client.send_message(member, join_msg)
join_serv_message = ' Just joined, Welcome!'
await client.send_message(discord.Object(id='555893854952226837'), member.mention + join_serv_message)
print('A user has joined the server')
@client.event
async def on_ready():
await client.change_presence(game=discord.Game(name="In the Best Server Ever!"))
print('Logged in as')
print(client.user.name)
print(client.user.id)
print('------')
if __name__ == '__main__':
for extension in extensions:
try:
client.load_extensions(extension)
except Exception as error:
print('{} Cannot load [{}]'.format(extension, error))
# Commands
@client.command(pass_context = True)
async def clear(ctx, amount=100):
channel = ctx.message.channel
messages = []
if amount < 2:
await client.say('You must delete atlesast 2 messages!')
async for message in client.logs_from(channel, limit=int(amount)):
messages.append(message)
await client.delete_messages(messages)
await client.say('Messages Deleted.')
print('A member has used a command')
@client.command()
async def help():
await client.say('For Help Please DM <@&555587892349632514>, for commands please use ".commands"')
print('A member has used a command')
@client.command()
async def flip():
flip_list = ["Heads", "Tails"]
flipped = random.choice(flip_list)
await client.say('You got: ' + flipped)
print('A member has used a command')
@client.command(pass_context=True)
async def commands(ctx):
author = ctx.message.author
cmd_list = '" .help " - Gives instructions for what to do if you need help\n" .commands " - Shows a list of availble commands and what their function is\n" .clear " - Used to clear the previous messages, enter a number after command for a specific amount you want deleted\n" .flip " - Chooses heads or tails at random\n" .bitcoin " - Shows the price of bitcoin at the time the command is used'
await client.send_message(author, cmd_list)
print('A member has used a command')
players = {}
queues = {}
def check_queue(id):
if queues[id] != []:
player = queues[id].pop(8)
players[id] = player
player.start()
@client.command(pass_context=True)
async def join(ctx):
channel_voice = ctx.message.author.voice.voice_channel
try:
await client.join_voice_channel(channel_voice)
except:
await client.say("You Must Be In a Channel for me to Join. (Try Again Once in a Channel)")
@client.command(pass_context=True)
async def leave(ctx):
server = ctx.message.server
voice_client = client.voice_client_in(server)
try:
await voice_client.disconnect()
except:
await client.say("I must be in a Voice channel to leave.")
print("A member has used a command")
@client.command(pass_context=True)
async def play(ctx):
if "www.youtube.com" not in ctx.message.content:
await client.say("Your request must have a YouTube URL in it.")
else:
try:
channel_voice = ctx.message.author.voice.voice_channel
await client.join_voice_channel(channel_voice)
try:
yt_url = ctx.message.content
link = yt_url.replace('.play ', '')
await client.say("Playing :white_check_mark:")
url = link.strip()
print(url)
print('test')
server = ctx.message.server
voice_client = client.voice_client_in(server)
player = await voice_client.create_ytdl_player(url, after=lambda: check_queue(server.id))
players[server.id] = player
player.start()
except:
await client.say("Must be a YouTube URL")
except:
await client.send_message(ctx.message.channel, "You Must Be In a Channel for me to Join. (Try Again Once in a Channel)")
@client.command(pass_context=True)
async def pause(ctx):
try:
id = ctx.message.server.id
players[id].pause()
print("A member has paused audio")
await client.say("Paused")
except:
await client.say("There Must be audio playing for me to pause.")
@client.command(pass_context=True)
async def stop(ctx):
try:
id = ctx.message.server.id
players[id].stop()
print("A member has stopped audio")
except:
await client.say("There Must be audio playing for me to stop.")
@client.command(pass_context=True)
async def resume(ctx):
try:
id = ctx.message.server.id
players[id].resume()
print("A member has resumed audio")
except:
await client.say("There Must be audio paused for me to resume.")
@client.command(pass_context=True)
async def queue(ctx, url):
server = ctx.message.server
voice_client = client.voice_client_in(server)
player = await voice_client.create_ytdl_player(url, after=lambda: check_queue(server.id))
if server.id in queues:
queues[server.id].append(player)
else:
queues[server.id] = [player]
await client.say("Audio Queued.")
@client.command(pass_context=True)
async def ethereum(ctx):
author = ctx.message.author
site = 'https://cointelegraph.com/ethereum-price-index'
uClient = uReq(site)
pg_html = uClient.read()
uClient.close()
page_souped = soup(pg_html, "html.parser")
et_price = page_souped.find("div", {"class": "price-value"})
et_vol = page_souped.find("div", {"class": "day-percent"})
et_vol_text = et_vol.get_text()
et_price_text = et_price.get_text()
await client.say('The current price of Ethereum is: ' + et_price_text)
await client.say("Change From Yesterday: " + et_vol_text)
print('A member has used a command')
client.run(TOKEN)
#@client.command()
#async def CommandName()
#print('A member has used a command') | [
"noreply@github.com"
] | noreply@github.com |
a27bdc5dbe7caef23e65b8f108991616833d3a01 | 193a5d50b5dc12a16190fce1c95eb727717f8d50 | /tutorials/Images Classifiers/vgg19.py | 34350d7abea832d29740cabc1d07308c235d5536 | [
"MIT"
] | permissive | AnkitmB125/tflearn | 2ef2e07aa61bc69841ba5d4cf2d5ba132621bf26 | 19c29122a4b58d5c0718562039423b702c897fde | refs/heads/master | 2020-04-25T18:02:13.201352 | 2019-02-27T19:23:22 | 2019-02-27T19:23:22 | 172,970,611 | 0 | 0 | NOASSERTION | 2019-02-27T18:47:34 | 2019-02-27T18:47:33 | null | UTF-8 | Python | false | false | 2,916 | py | """ Very Deep Convolutional Networks for Large-Scale Visual Recognition.
Applying VGG 19-layers convolutional network to Imagenet classification task.
References:
Very Deep Convolutional Networks for Large-Scale Image Recognition.
K. Simonyan, A. Zisserman. arXiv technical report, 2014.
Links:
http://arxiv.org/pdf/1409.1556
"""
import tflearn
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.estimator import regression
# Building 'VGG Network'
input_layer = input_data(shape=[None, 224, 224, 3])
block1_conv1 = conv_2d(input_layer, 64, 3, activation='relu', name='block1_conv1')
block1_conv2 = conv_2d(block1_conv1, 64, 3, activation='relu', name='block1_conv2')
block1_pool = max_pool_2d(block1_conv2, 2, strides=2, name = 'block1_pool')
block2_conv1 = conv_2d(block1_pool, 128, 3, activation='relu', name='block2_conv1')
block2_conv2 = conv_2d(block2_conv1, 128, 3, activation='relu', name='block2_conv2')
block2_pool = max_pool_2d(block2_conv2, 2, strides=2, name = 'block2_pool')
block3_conv1 = conv_2d(block2_pool, 256, 3, activation='relu', name='block3_conv1')
block3_conv2 = conv_2d(block3_conv1, 256, 3, activation='relu', name='block3_conv2')
block3_conv3 = conv_2d(block3_conv2, 256, 3, activation='relu', name='block3_conv3')
block3_conv4 = conv_2d(block3_conv3, 256, 3, activation='relu', name='block3_conv4')
block3_pool = max_pool_2d(block3_conv4, 2, strides=2, name = 'block3_pool')
block4_conv1 = conv_2d(block3_pool, 512, 3, activation='relu', name='block4_conv1')
block4_conv2 = conv_2d(block4_conv1, 512, 3, activation='relu', name='block4_conv2')
block4_conv3 = conv_2d(block4_conv2, 512, 3, activation='relu', name='block4_conv3')
block4_conv4 = conv_2d(block4_conv3, 512, 3, activation='relu', name='block4_conv4')
block4_pool = max_pool_2d(block4_conv4, 2, strides=2, name = 'block4_pool')
block5_conv1 = conv_2d(block4_pool, 512, 3, activation='relu', name='block5_conv1')
block5_conv2 = conv_2d(block5_conv1, 512, 3, activation='relu', name='block5_conv2')
block5_conv3 = conv_2d(block5_conv2, 512, 3, activation='relu', name='block5_conv3')
block5_conv4 = conv_2d(block5_conv3, 512, 3, activation='relu', name='block5_conv4')
block4_pool = max_pool_2d(block5_conv4, 2, strides=2, name = 'block4_pool')
flatten_layer = tflearn.layers.core.flatten (block4_pool, name='Flatten')
fc1 = fully_connected(flatten_layer, 4096, activation='relu')
dp1 = dropout(fc1, 0.5)
fc2 = fully_connected(dp1, 4096, activation='relu')
dp2 = dropout(fc2, 0.5)
network = fully_connected(dp2, 1000, activation='rmsprop')
regression = tflearn.regression(network, optimizer='adam',
loss='categorical_crossentropy',
learning_rate=0.001)
model = tflearn.DNN(regression, checkpoint_path='vgg19',
tensorboard_dir="./logs") | [
"abhadage@gmail.com"
] | abhadage@gmail.com |
032083abede80dbe6bb0ad327dfe57258ab046c1 | 30595788d5c3cd43eda9c06ee5783a2522d2b04f | /import json.py | cb5f0ef787abafe11079078c9619f344b2800178 | [
"MIT"
] | permissive | OfficialPouya/SmartClock | 603e7a5da89eefebff4a65d57b75e8498535d8b9 | 1b8cdcf5bc6c26eade41abc8d0ee9524431387bc | refs/heads/master | 2020-12-19T05:07:27.630689 | 2020-08-16T13:34:16 | 2020-08-16T13:34:16 | 235,630,109 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 179 | py | import json
# read file
with open('city.list.json', 'r') as myfile:
data=myfile.read()
# parse file
obj = json.loads(data)
# show values
print("Name: " + str(obj['name']))
| [
"pouya.akbar@gmail.com"
] | pouya.akbar@gmail.com |
0107bba8f664413fd57f0ac0e15ae23805251d21 | 8a6f2a6b5cf3be2bea2795f6a09c93e33f7a5909 | /4 İşlem.py | e2cbcc7a3cef9e013ed0022b4175aa0506baaf7f | [] | no_license | SezaiAraplarli/4-islem | 36c67bb6003d0c487a379d14784e4e71d7f7c915 | 4972a00fc6ece8b5f513105c9f4acadb1f3a0b59 | refs/heads/master | 2020-06-25T05:27:06.128626 | 2019-07-27T21:36:46 | 2019-07-27T21:36:46 | 199,215,105 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,054 | py | # Toplama İşlemi
def topla(a,b):
top=(a+b)
print(a,"+",b,"=",(a+b))
# Çıkarma İşlemi
def cikar(a,b):
cik=(a-b)
print(a,"-",b,"=",(a-b))
# Çarpma İşlemi
def carp(a,b):
car=(a*b)
print(a,"x",b,"=",(a*b))
# Bölme İşlemi
def bolme(a,b):
bol=(a/b)
print(a,"/",b,"=",(a/b))
print("Yapılacak İşlemi Seçin.")
print("-__-__-__-__-__-__-")
print("1)Toplama")
print("2)Çıkarma")
print("3)Çarpma")
print("4)Bölme")
# Hangi işlemin uygulanacağı kısım burası
while True:
secim = (input("?"))
x = int(input("İlk Sayıyı Giriniz"))
y = int(input("İkinci Sayıyı Giriniz"))
if secim=="1":
topla(x,y)
elif secim=="2":
cikar(x,y)
elif secim=="3":
carp(x,y)
elif secim=="4":
bolme(x,y)
else:
print("Öyle bir işlem yok!")
break
#Program gayet iyi çalışıyor ama
#1,2,3 veya 4 dışında bir sayı
#girilirse ilk ve ikinci sayıyı
#sorup ondan sonra geçersiz yazdırıyor. | [
"noreply@github.com"
] | noreply@github.com |
31ee5c556c25850ddd15704dede4c1ff0190e717 | 83e2824b060cea6290563a63dfc5a2caaddccc32 | /problem019.py | 3ab4e40b4de9a056bbdaac0afe9cb807e5089a42 | [] | no_license | 1tux/project_euler | b8f731155eb59c5cdb92efe68f0140695c5b6353 | 9a6c04d08a77d6e80eb15d203c8003870645415a | refs/heads/master | 2021-01-09T20:17:35.502613 | 2016-10-03T16:19:50 | 2016-10-03T16:19:50 | 61,938,647 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 137 | py | import datetime
c = 0
for x in xrange(1901, 2001):
for y in xrange(1, 13):
if datetime.date(x, y, 1).weekday() == 1:
c+=1
print c | [
"root"
] | root |
eead725f503c98b5e5286f15c4a5ef579502c793 | a19f028c9bbbb2e0046c9d41267a4659f5e4a660 | /venv/Scripts/easy_install-script.py | 22cfdc9a6aefd0bbfe94f8adb485c88850b83481 | [] | no_license | weni09/scrapyprograms | 0d70d4ac63ed366052989b4960e10225d746ce8f | 0a564d2543327b7427c2a655b6ee367a21136d45 | refs/heads/master | 2022-11-12T00:03:48.884181 | 2020-06-29T06:18:10 | 2020-06-29T06:18:10 | 275,747,364 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 445 | py | #!D:\PycharmProjects\scrapyprograms\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install')()
)
| [
"wenwei@163.com"
] | wenwei@163.com |
29d48b41bbe62a669fe720e8989acc66a8f7c7bd | b51a9ff5685d522d99429b69df59c352fc34e99e | /test_model1.py | fd6dc3195acd7dd7b446d657720b6b20428f8a93 | [
"MIT"
] | permissive | nvikramraj/Neural_Networks-cifar10 | fba35b86cb3b6c6509150f2208b848b0bb205a0b | 7d23541cb941dcdc6ab11f3f8a24dbc7c94929b1 | refs/heads/master | 2022-07-18T02:17:19.097053 | 2020-05-23T05:01:04 | 2020-05-23T05:01:04 | 266,134,090 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,538 | py | import os
import cv2
import numpy as np
from tqdm import tqdm
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import time
#Neural Network
# 2 conv layer with batch norm , 3 linear layer
# Adam optimizer and MSELoss , one hot vector - labels
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 8, 5)
self.pool = nn.MaxPool2d(2, 2)
self.bn1 = torch.nn.BatchNorm2d(3, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv2 = nn.Conv2d(8, 16, 5)
self.bn2 = torch.nn.BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward (self,x):
x = self.bn1(x)
x = self.pool(F.relu(self.conv1(x)))
x = self.bn2(x)
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return F.softmax(x,dim =1) #using activation function at output to get % or 0-1 values
model = Model()
save_path = os.path.join("model2-10.pt")
model.load_state_dict(torch.load(save_path))
model.eval()
model.double()
test_data = np.load("test_data2.npy",allow_pickle=True) # loading data set
test_X = torch.tensor([i[0] for i in test_data])
test_y = torch.tensor([np.eye(10)[i[1]] for i in test_data])
#print(test_X[0])
batch_size = 100
acc = 0
label = { "aeroplane":0,"automobile":0,"bird":0,"cat":0,"deer":0,
"dog":0,"frog":0,"horse":0,"ship":0,"truck":0 }
for i in tqdm(range(0,len(test_X),batch_size)):
batch_X = test_X[i:i+batch_size].view(-1,3,32,32)
batch_y = test_y[i:i+batch_size]
batch_X = batch_X.type(torch.DoubleTensor)
output = model(batch_X)
for i,j in zip(output,batch_y):
x = torch.argmax(i)
y = torch.argmax(j)
if x == y :
acc += 1
if y == 0:
label["aeroplane"] += 1
elif y == 1:
label["automobile"] += 1
elif y == 2:
label["bird"] += 1
elif y == 3:
label["cat"] += 1
elif y == 4:
label["deer"] += 1
elif y == 5:
label["dog"] += 1
elif y == 6:
label["frog"] += 1
elif y == 7:
label["horse"] += 1
elif y == 8:
label["ship"] += 1
elif y == 9:
label["truck"] += 1
total_accuracy = acc/len(test_X) *100
print("Total accuracy : ",total_accuracy)
#Getting accuracy of each element
for i in label:
label[i] = label[i]/1000 *100
print(f" {i} : {label[i]} ")
#checking for last 10 images
pic = test_X[-10:]
prediction = output[-10:]
titles = { 0:"aeroplane",1:"automobile",2:"bird",3:"cat",4:"deer",
5:"dog",6:"frog",7:"horse",8:"ship",9:"truck" }
c = 1
for i in range(10):
x = pic[i].numpy() #plotting the images
y = torch.argmax(prediction[i]).tolist()
image = cv2.merge((x[2],x[1],x[0]))
plt.subplot(2,5,c)
plt.axis("off")
plt.title(titles[y])
plt.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
c += 1
plt.show()
#X = test_X[0].view(-1,3,32,32)
#X = X.type(torch.DoubleTensor)
#print(X.dtype)
#output = model(X)
#print(torch.argmax(output) , torch.argmax(test_y[0]))
| [
"nvikramraj"
] | nvikramraj |
df7cd173843e26f38510ef061189991744f18bb2 | c4aece6955cb571e5a79c370d016ee191b84746b | /compliments.py | 5c0cda2d878c2d853a92c0c5997ef278cf235e6f | [] | no_license | radikal95/goodwords_bot | de403fdb1273631764d23ffe8cb144db74eb9e59 | 00711612879af84362a3b72ac6ae09053a455d95 | refs/heads/master | 2020-03-28T20:04:22.347551 | 2018-09-16T21:01:48 | 2018-09-16T21:01:48 | 149,035,494 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,270 | py | import random
compliment = ['Ты молодец, не расстраивайся!', 'У тебя всё получится!''Ты молодец',
'Ты умница',
'Ты герой',
'Ты отлично сегодня выглядишь',
'Ты отлично выглядишь, невозможно налюбоваться',
'Ты умеешь расположить к себе',
'Нет таких сложностей, с которыми ты не справишься! Ты же почти гений',
'Ты со всем справишься',
'У тебя всё обязательно получится',
'Никто лучше тебя не справится',
'Ты просто молодец',
'Ты солнышко',
'Тебя точно ждет успех',
'В тебя можно влюбиться с первого взгляда',
'Всем бы твой характер',
'Твоему упорству можно позавидовать',
'Таких друзей, как ты, не сыскать на всём белом свете',
'Ты столько всего делаешь',
'Ты можешь собой гордиться',
'У тебя золотые руки',
'Ты классный друг',
'У тебя отличный вкус',
'Всем бы твой вкус',
'Всем бы твоё чувство стиля',
'Ты отлично разбираешься в людях',
'Ты просто золото',
'С тобой никогда не соскучишься',
'Ты добьешься всего, чего захочешь',
'Все получится, я точно знаю',
'Ты просто идеал: и ум, и фигура, и доброта - всё при тебе',
'Кое-что ты умеешь очень хорошо, а именно быть лучшим во всём!',
'С тобой весело',
'Ты всегда находишь правильные слова',
'Без всякой лести - ты большой молодец! ',
'Знакомство с тобой - большая удача для любого человека',
'Тебе обязательно повезёт',
'Общение с тобой доставляет большое удовольствие',
'Ты не перестаешь удивлять',
'Ты каждый день открываешься с новой стороны',
'С тебя нужно брать пример, ты просто молодец',
'Ты вдохновляешь всех вокруг',
'У тебя очень красивая улыбка',
'С тобой никогда не скучно',
'Как искусно ты подбираешь гардероб, ты выглядишь замечательно!',
'Иногда твои глаза сияют - это так красиво',
'Всем бы твой тонкий ум',
'У тебя потрясающее чувство юмора',
'Ты умеешь шутить и веселиться',
'Ты, конечно, не ангел. Но кому они нужны? Зато с тобой никогда не будет скучно',
'С тобой всегда интересно']
random_compliment = lambda: random.choice(compliment) | [
"radushin.arseny@yandex.ru"
] | radushin.arseny@yandex.ru |
7a2ac4da6b55db2ccc0c022304cb9443b0d99e2e | 8ccca2a9b3a77ed2e65beccbe2c846d8c8355f77 | /alipay/aop/api/domain/AlipayMarketingCampaignDrawcampTriggerModel.py | f2a5cd7fe99db5417ddbca73ef3ad59a258ae3df | [] | no_license | jkjkiiiii/KDFP-server | 9d1887409641e26d0dcc491d2726b356e5955256 | c196a6d9aa18b59917bfdc4ea1d452b32631c860 | refs/heads/master | 2020-04-26T04:45:55.400935 | 2019-04-01T08:51:40 | 2019-04-01T08:51:40 | 173,312,850 | 0 | 0 | null | 2019-03-01T14:10:59 | 2019-03-01T14:10:59 | null | UTF-8 | Python | false | false | 4,316 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayMarketingCampaignDrawcampTriggerModel(object):
def __init__(self):
self._bind_mobile = None
self._camp_id = None
self._camp_source = None
self._channel_info = None
self._client_ip = None
self._json_ua = None
self._login_id = None
self._user_id = None
@property
def bind_mobile(self):
return self._bind_mobile
@bind_mobile.setter
def bind_mobile(self, value):
self._bind_mobile = value
@property
def camp_id(self):
return self._camp_id
@camp_id.setter
def camp_id(self, value):
self._camp_id = value
@property
def camp_source(self):
return self._camp_source
@camp_source.setter
def camp_source(self, value):
self._camp_source = value
@property
def channel_info(self):
return self._channel_info
@channel_info.setter
def channel_info(self, value):
self._channel_info = value
@property
def client_ip(self):
return self._client_ip
@client_ip.setter
def client_ip(self, value):
self._client_ip = value
@property
def json_ua(self):
return self._json_ua
@json_ua.setter
def json_ua(self, value):
self._json_ua = value
@property
def login_id(self):
return self._login_id
@login_id.setter
def login_id(self, value):
self._login_id = value
@property
def user_id(self):
return self._user_id
@user_id.setter
def user_id(self, value):
self._user_id = value
def to_alipay_dict(self):
params = dict()
if self.bind_mobile:
if hasattr(self.bind_mobile, 'to_alipay_dict'):
params['bind_mobile'] = self.bind_mobile.to_alipay_dict()
else:
params['bind_mobile'] = self.bind_mobile
if self.camp_id:
if hasattr(self.camp_id, 'to_alipay_dict'):
params['camp_id'] = self.camp_id.to_alipay_dict()
else:
params['camp_id'] = self.camp_id
if self.camp_source:
if hasattr(self.camp_source, 'to_alipay_dict'):
params['camp_source'] = self.camp_source.to_alipay_dict()
else:
params['camp_source'] = self.camp_source
if self.channel_info:
if hasattr(self.channel_info, 'to_alipay_dict'):
params['channel_info'] = self.channel_info.to_alipay_dict()
else:
params['channel_info'] = self.channel_info
if self.client_ip:
if hasattr(self.client_ip, 'to_alipay_dict'):
params['client_ip'] = self.client_ip.to_alipay_dict()
else:
params['client_ip'] = self.client_ip
if self.json_ua:
if hasattr(self.json_ua, 'to_alipay_dict'):
params['json_ua'] = self.json_ua.to_alipay_dict()
else:
params['json_ua'] = self.json_ua
if self.login_id:
if hasattr(self.login_id, 'to_alipay_dict'):
params['login_id'] = self.login_id.to_alipay_dict()
else:
params['login_id'] = self.login_id
if self.user_id:
if hasattr(self.user_id, 'to_alipay_dict'):
params['user_id'] = self.user_id.to_alipay_dict()
else:
params['user_id'] = self.user_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayMarketingCampaignDrawcampTriggerModel()
if 'bind_mobile' in d:
o.bind_mobile = d['bind_mobile']
if 'camp_id' in d:
o.camp_id = d['camp_id']
if 'camp_source' in d:
o.camp_source = d['camp_source']
if 'channel_info' in d:
o.channel_info = d['channel_info']
if 'client_ip' in d:
o.client_ip = d['client_ip']
if 'json_ua' in d:
o.json_ua = d['json_ua']
if 'login_id' in d:
o.login_id = d['login_id']
if 'user_id' in d:
o.user_id = d['user_id']
return o
| [
"1056871944@qq.com"
] | 1056871944@qq.com |
e23d1e57cc1d9d0e903427af7e40d12428563d84 | 32d6473c5ea02315a8ab40641f0b3ac36c19ee52 | /monerdnode/utils.py | 10b04e8ebbccdee45deeeaeab7be29907183d6e7 | [] | no_license | nkapashi/MDCLI | a496349d95bebbba6556112278a2e9c89c58421c | ad65d1a27c0a50785b6130b7581e69070b8fede7 | refs/heads/master | 2020-03-21T16:48:48.780447 | 2018-06-26T21:46:46 | 2018-06-26T21:46:46 | 138,787,352 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,211 | py | from datetime import datetime, timedelta
import sys
generalErrorText="""ERROR:
Either no response was recevied by monerod or the resposne contained an error.
Double check your connection details and any input data."""
def calcUptime(timestamp):
"""Function that returns uptime in day,hour,minute format.
Args:
param1 (int):The time of the start in unix time stamp format.
Returns:
str: The uptime in day,hour,minute format.
"""
startDate = datetime.fromtimestamp(timestamp).strftime('%d-%b-%y')
uptime = (datetime.now() - datetime.fromtimestamp(timestamp)).seconds
d = datetime(1,1,1) + timedelta(seconds=uptime)
return (f"{d.day -1}d, {d.hour}h, {d.minute}m", startDate)
def fromUnixTime(timestamp):
try:
humanTime = datetime.fromtimestamp(timestamp).strftime('%d-%b-%y %H:%M')
return humanTime
except OverflowError:
return "Pending"
def assertToExit(data):
# Will need to replace assert with something else...Some day.
try:
assert type(data) is dict
except AssertionError:
sys.exit(generalErrorText)
def print_table(lines, separate_head=True):
"""Prints a formatted table given a 2 dimensional array.
All gredit goes to:
http://blog.paphus.com/blog/2012/09/04/simple-ascii-tables-in-python/
Args:
param1 (list): A list consisting of tuples.
param2 (bool): Use list first entry as a header.
Returns:
"""
#Count the column width
widths = []
for line in lines:
for i,size in enumerate([len(str(x)) for x in line]):
while i >= len(widths):
widths.append(0)
if size > widths[i]:
widths[i] = size
#Generate the format string to pad the columns
print_string = ""
for i,width in enumerate(widths):
print_string += "{" + str(i) + ":" + str(width) + "} | "
if (len(print_string) == 0):
return
print_string = print_string[:-3]
#Print the actual data
for i,line in enumerate(lines):
print(print_string.format(*line))
if (i == 0 and separate_head):
print("-"*(sum(widths)+3*(len(widths)-1)))
| [
"nikolay.kapashikov@gmail.com"
] | nikolay.kapashikov@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.