seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
70735315874 | # %%
import os
import glob
import random
import monai
from os import makedirs
from os.path import join
from tqdm import tqdm
from copy import deepcopy
from time import time
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
from datetime import datetime
import cv2
import argparse
from matplotlib import pyplot as plt
import segmentation_models_pytorch as smp
import monai
# %%
parser = argparse.ArgumentParser(description='Train DeepLabV3Plus')
parser.add_argument('-i', '--data_root', type=str, default='', help='Two subfolders for training data: imgs and gts')
parser.add_argument('-o', '--ckpt_dir', type=str, default='', help='Checkpoint save directory')
parser.add_argument('-b', '--batch_size', type=int, default=600, help='batch size')
parser.add_argument('--num_workers', type=int, default=30, help='number of workers for dataloader')
parser.add_argument("--max_epochs", type=int, default=500, help="number of epochs")
parser.add_argument('--compile', action='store_true', help='compile model')
args = parser.parse_args()
model_compile = args.compile
num_epochs = args.max_epochs
resume = None
device = torch.device("cuda:0")
data_root = args.data_root
ckpt_dir = args.ckpt_dir
batch_size = args.batch_size
num_workers = args.num_workers
makedirs(ckpt_dir, exist_ok=True)
# %%
torch.cuda.empty_cache()
torch.set_float32_matmul_precision('high')
def show_mask(mask, ax, random_color=False):
if random_color:
color = np.concatenate([np.random.random(3), np.array([0.45])], axis=0)
else:
color = np.array([251/255, 252/255, 30/255, 0.45])
h, w = mask.shape[-2:]
mask_image = mask.reshape(h, w, 1) * color.reshape(1, 1, -1)
ax.imshow(mask_image)
def show_box(box, ax):
x0, y0 = box[0], box[1]
w, h = box[2] - box[0], box[3] - box[1]
ax.add_patch(plt.Rectangle((x0, y0), w, h, edgecolor='blue', facecolor=(0,0,0,0), lw=2))
# %%
class NpyDataset(Dataset):
def __init__(self, data_root, image_size=224, bbox_shift=5, data_aug=False):
self.data_root = data_root
self.gt_path = join(data_root, 'gts')
self.img_path = join(data_root, 'imgs')
self.gt_path_files = sorted(glob.glob(join(self.gt_path, '**/*.npy'), recursive=True))
self.gt_path_files = [file for file in self.gt_path_files if os.path.isfile(join(self.img_path, os.path.basename(file)))]
self.image_size = image_size
self.bbox_shift = bbox_shift
self.data_aug = data_aug
print(f'number of images: {len(self.gt_path_files)}')
def __len__(self):
return len(self.gt_path_files)
def __getitem__(self, index):
img_name = os.path.basename(self.gt_path_files[index])
assert img_name == os.path.basename(self.gt_path_files[index]), 'img gt name error' + self.gt_path_files[index] + self.npy_files[index]
img_3c = np.load(join(self.img_path, img_name), 'r', allow_pickle=True) # (H, W, 3)
resize_img_cv2 = cv2.resize(
img_3c,
(self.image_size, self.image_size),
interpolation=cv2.INTER_AREA
)
resize_img_cv2_01 = (resize_img_cv2 - resize_img_cv2.min()) / np.clip(resize_img_cv2.max() - resize_img_cv2.min(), a_min=1e-8, a_max=None) # normalize to [0, 1], (H, W, 3)
# convert the shape to (3, H, W)
resize_img = np.transpose(resize_img_cv2_01, (2, 0, 1))
assert np.max(resize_img)<=1.0 and np.min(resize_img)>=0.0, 'image should be normalized to [0, 1]'
gt = np.load(self.gt_path_files[index], 'r', allow_pickle=True) # multiple labels [0, 1,4,5...]
if gt.shape[0] != self.image_size or gt.shape[1] != self.image_size:
gt_resize = cv2.resize(
gt, (self.image_size, self.image_size),
interpolation=cv2.INTER_NEAREST
)
gt_resize = np.uint8(gt_resize)
else:
gt_resize = gt
label_ids = np.unique(gt_resize)[1:]
label_id = random.choice(label_ids.tolist())
gt2D = np.uint8(gt_resize == label_id) # only one label
assert np.max(gt2D) == 1 and np.min(gt2D) == 0, 'ground truth should be 0, 1, got: ' + str(np.unique(gt2D))
# add data augmentation: random fliplr and random flipud
if self.data_aug:
if random.random() > 0.5:
resize_img = np.ascontiguousarray(np.flip(resize_img, axis=-1))
gt2D = np.ascontiguousarray(np.flip(gt2D, axis=-1))
# print('DA with flip left right')
if random.random() > 0.5:
resize_img = np.ascontiguousarray(np.flip(resize_img, axis=-2))
gt2D = np.ascontiguousarray(np.flip(gt2D, axis=-2))
# print('DA with flip up down')
y_indices, x_indices = np.where(gt2D > 0)
x_min, x_max = np.min(x_indices), np.max(x_indices)
y_min, y_max = np.min(y_indices), np.max(y_indices)
# add perturbation to bounding box coordinates
H, W = gt2D.shape
x_min = max(0, x_min - random.randint(0, self.bbox_shift))
x_max = min(W, x_max + random.randint(0, self.bbox_shift))
y_min = max(0, y_min - random.randint(0, self.bbox_shift))
y_max = min(H, y_max + random.randint(0, self.bbox_shift))
bboxes = np.array([x_min, y_min, x_max, y_max])
## Append bbox prompt channel
resize_img_bbox = np.concatenate([resize_img, np.zeros((1, self.image_size, self.image_size))], axis=0)
resize_img_bbox[-1, y_min:y_max, x_min:x_max] = 1.0
# print(img_name, resize_img_bbox.shape, gt2D.shape)
return torch.tensor(resize_img_bbox).float(), torch.tensor(gt2D[None, :,:]).long(), torch.tensor(bboxes).float(), img_name
# %%
model = smp.DeepLabV3Plus(
encoder_name="resnet50", # encoder model type
encoder_weights="imagenet", # use `imagenet` pretrained weights for encoder initialization
in_channels=4, # Additional channel for bounding box prompt
classes=1, # model output channels (number of classes in your dataset)
activation=None # Output logits
)
model.to(device)
if model_compile:
print("Compiling model...")
model = torch.compile(model)
# %%
optimizer = torch.optim.Adam(
model.parameters(),
lr=0.001,
weight_decay=4e-5,
)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=2, gamma=0.94)
# %%
train_dataset = NpyDataset(data_root=data_root, data_aug=False, bbox_shift=5, image_size=224)
train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers)
# %%
# loss function
seg_loss = monai.losses.DiceCELoss(sigmoid=True, squared_pred=True, reduction='mean', to_onehot_y=False)
# %%
# training
if resume is not None:
checkpoint = torch.load(resume)
model._orig_mod.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
best_loss = checkpoint['best_loss']
start_epoch = checkpoint['epoch'] + 1
print(f"Resuming training from epoch {start_epoch} with best loss {best_loss:.4f}")
else:
best_loss = 1e10
best_epoch = 0
start_epoch = 0
for epoch in range(start_epoch, num_epochs):
epoch_loss = [1e10 for _ in range(len(train_dataloader))]
pbar = tqdm(train_dataloader)
for step, (image, gt2D, boxes, img_names) in enumerate(pbar):
optimizer.zero_grad()
boxes_np = boxes.detach().cpu().numpy()
image, gt2D = image.to(device), gt2D.to(device)
pred = model(image)
loss = seg_loss(pred, gt2D)
epoch_loss[step] = loss.item()
loss.backward()
optimizer.step()
optimizer.zero_grad()
pbar.set_description(f"Epoch {epoch} at {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}, loss: {loss.item():.4f}")
epoch_loss_reduced = sum(epoch_loss) / len(epoch_loss)
model_weights = model._orig_mod.state_dict()
checkpoint = {
"model": model_weights,
"epoch": epoch,
"optimizer": optimizer.state_dict(),
"loss": epoch_loss_reduced,
"best_loss": best_loss,
}
torch.save(checkpoint, join(ckpt_dir, "deeplabv3plus_latest.pt"))
if epoch_loss_reduced < best_loss:
print(f"New best loss: {best_loss:.4f} -> {epoch_loss_reduced:.4f}")
best_loss = epoch_loss_reduced
torch.save(checkpoint, join(ckpt_dir, "deeplabv3plus_best.pt"))
epoch_loss_reduced = 1e10
lr_scheduler.step()
| bowang-lab/MedSAM | comparisons/DeepLabV3+/train_deeplabv3_res50.py | train_deeplabv3_res50.py | py | 8,501 | python | en | code | 1,269 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "torch.device",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "os.makedirs",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "torch.cuda.empty_cac... |
2837694708 | #!/usr/bin/env python
from setuptools import setup, find_packages
version = __import__('pipetter').VERSION
setup(
name='Pipetter',
version='.'.join([str(v) for v in version]),
description='Uniform registration and processing of inclusion tags for information pulled from other sources, such as websites.',
packages = find_packages(),
extras_require = {
'twitter': ['pytwitter'],
'noaa': ['BeautifulSoup']
}
) | melinath/django-pipetter | setup.py | setup.py | py | 430 | python | en | code | 16 | github-code | 1 | [
{
"api_name": "setuptools.setup",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "setuptools.find_packages",
"line_number": 11,
"usage_type": "call"
}
] |
3243322415 |
# Residual Dense Network for Image Super-Resolution
# https://arxiv.org/abs/1802.08797
from functools import partial
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from model import common
from model.matrix import *
def make_model(args, parent=False):
return MetaRDN(args)
## ------------------ Meta Magnitude Modules -------------------- ##
class WL2Weight(nn.Module):
""" Generate 1x1 Custom Conv layer weights from wavelength.
"""
def __init__(self, inC, outC, kernel_size=1, use_bw=True, bias=True):
super(WL2Weight, self).__init__()
self.kernel_size = kernel_size
self.inC = inC
self.outC = outC
self.channel = 1+use_bw
self.use_bw = use_bw
self.bias = bias
self.meta_block = nn.Sequential(
nn.Linear(self.channel, 256),
nn.ReLU(inplace=True),
nn.Linear(256, self.outC*(self.inC*self.kernel_size *
self.kernel_size+self.bias)),
nn.Sigmoid()
)
def forward(self, wl, bw):
"""
Args:
wl: Central wavelength.
"""
wl = (wl.float() - 550) / 10
bw = bw.float() - 55
if self.use_bw:
x = torch.stack([wl, bw]).unsqueeze(0)
else:
x = wl.unsqueeze(0).unsqueeze(0)
x = self.meta_block(x)
if self.bias:
bias = x[:, -self.outC:].squeeze(0)
out = x[:, :-self.outC].contiguous().view(self.outC, self.inC,
self.kernel_size, self.kernel_size)
return out, bias
else:
out = x.contiguous().view(self.outC, self.inC, self.kernel_size, self.kernel_size)
return out
## ------------------ Full Network ------------------------- ##
class MetaRDN(nn.Module):
def __init__(self, args):
super(MetaRDN, self).__init__()
self.args = args
self.scale = self.args.scale
H0 = self.args.H0
G0 = self.args.G0
kSize = self.args.RDNkSize
self.scale_idx = 0
self.device = torch.device('cpu' if self.args.cpu else 'cuda')
self.sub_mean = partial(common.mean_shift_1d, add=False)
self.add_mean = partial(common.mean_shift_1d, add=True)
# Shallow feature extraction net
self.SFENet1 = nn.Conv2d(
1, G0, kSize, padding=(kSize - 1) // 2, stride=1)
self.SFENet2 = nn.Conv2d(
G0, G0, kSize, padding=(kSize - 1) // 2, stride=1)
# Central Wavelength weight prediction Network
self.wlwp1 = WL2Weight(inC=G0, outC=G0,
kernel_size=self.args.WLkSize1, use_bw=1-self.args.skip_bw, bias=True)
self.wlwp2 = WL2Weight(inC=G0+3, outC=G0,
kernel_size=self.args.WLkSize2, use_bw=1-self.args.skip_bw, bias=True)
# Redidual Dense Blocks
if self.args.head_blocks > 0:
self.RDBs_head = nn.ModuleList()
for i in range(self.args.head_blocks):
self.RDBs_head.append(
common.RDB(growRate0=G0, growRate=self.args.G_hidden,
nConvLayers=self.args.rdb_conv_num)
)
# Global Feature Fusion
self.GFF_head = nn.Sequential(*[
nn.Conv2d(self.args.head_blocks * G0,
G0, 1, padding=0, stride=1),
nn.Conv2d(G0, G0, kSize, padding=(kSize - 1) // 2, stride=1)
])
m_head = [
common.ResBlock(
common.default_conv, G0, 3, act=nn.ReLU(), res_scale=args.res_scale
) for _ in range(self.args.head_blocks)
]
self.head = nn.Sequential(*m_head)
if self.args.body_blocks > 0:
self.RDBs_body = nn.ModuleList()
for _ in range(self.args.body_blocks):
self.RDBs_body.append(
common.RDB(growRate0=G0, growRate=self.args.G_hidden,
nConvLayers=self.args.rdb_conv_num)
)
if (self.args.body_blocks % 2) == 1:
self.RDBs_body.append(
common.RDB(growRate0=G0, growRate=self.args.G_hidden,
nConvLayers=self.args.rdb_conv_num)
)
self.GFF_body = nn.Sequential(*[
nn.Conv2d((self.args.body_blocks) *
G0, G0, 1, padding=0, stride=1),
nn.Conv2d(G0, G0, kSize, padding=(kSize - 1) // 2, stride=1)
])
m_tail = [
common.ResBlock(
common.default_conv, G0, 3, act=nn.ReLU(), res_scale=args.res_scale
) for _ in range(self.args.tail_blocks)
]
m_tail.append(common.default_conv(G0, 1, 3))
self.tail = nn.Sequential(*m_tail)
self.upsample = nn.Sequential(*[
common.Upsampler(common.default_conv,
self.scale, G0, act=False),
common.default_conv(G0, G0, 3)])
def forward(self, x, lr_rgb, hr_rgb, info):
""" Network forward function.
Args:
x: input image tensor.
pos_mat: position matrix for meta magnitude.
in_cwl: input central wavelengthes.
cwl_in: input HSI central wavelengthes.
cwl_out: output HSI central wavelengthes.
bw_in: input HSI bandwidthes.
bw_out: output HSI bandwidthes.
"""
self.B, self.C, *_ = x.shape
# ------------------------------------- Head ------------------------------------- #
head_feat = []
for i in range(self.C+3):
if i < self.C:
slice_x = x[:, i, :, :]
else:
slice_x = lr_rgb[:, i-self.C, :, :]
mean_i = info.mean_in[i].repeat(self.B).reshape(self.B, 1, 1, 1) if i < self.C else \
info.mean_rgb[i-self.C].repeat(self.B).reshape(self.B, 1, 1, 1)
std_i = info.std_in[i].repeat(self.B).reshape(self.B, 1, 1, 1) if i < self.C else \
info.std_rgb[i-self.C].repeat(self.B).reshape(self.B, 1, 1, 1)
cwl_i = info.cwl_in[i] if i < self.C else info.cwl_rgb[i-self.C]
bw_i = info.bw_in[i] if i < self.C else info.bw_rgb[i-self.C]
slice_x = slice_x.unsqueeze(1)
slice_x = self.sub_mean(slice_x, mean_i, std_i)
if i >= self.C:
hr_rgb[:, i-self.C, :, :] = self.sub_mean(
hr_rgb[:, i-self.C, :, :].unsqueeze(1), mean_i, std_i).squeeze(1)
slice_x = self.SFENet1(slice_x)
slice_x = self.head(slice_x)
weight1, bias1 = self.wlwp1(cwl_i, bw_i)
slice_x = F.conv2d(slice_x, weight1, bias1, stride=1, padding=0)
head_feat.append(slice_x)
# ----------------------------------- Concate ----------------------------------- #
f1 = torch.mean(torch.cat([feat.unsqueeze(0)
for feat in head_feat], dim=0), dim=0)
x = f1
x = self.SFENet2(x)
# ----------------------------------- Backbone ----------------------------------- #
if self.args.body_blocks > 0:
RDBs_out_body = []
for i in range(self.args.body_blocks):
x = self.RDBs_body[i](x)
RDBs_out_body.append(x)
x = self.GFF_body(torch.cat(RDBs_out_body, 1))
x += f1
# -------------------------------- Tail(Upsample) -------------------------------- #
# Original parallel split-and-cat method
# hsi_out = self.parallel_tail(x, hr_rgb, info.cwl_out, info.bw_out, info.mean_out, info.std_out)
hsi_out = self.parallel_tail(x, hr_rgb, info.cwl_in, info.bw_in, info.mean_in, info.std_in)
return hsi_out
def parallel_tail(self, x, hr_rgb, cwl, bw, mean, std):
out_images = []
x = self.upsample(x)
x = torch.cat([x, hr_rgb], 1)
for i in range(len(cwl)):
x_tail = x
weight2, bias2 = self.wlwp2(cwl[i], bw[i])
x_tail = F.conv2d(x_tail, weight2, bias2, stride=1, padding=0)
out = self.tail(x_tail)
out = self.add_mean(out, mean[i].repeat(self.B).reshape(
self.B, 1, 1, 1), std[i].repeat(self.B).reshape(self.B, 1, 1, 1))
out_images.append(out)
out_images = torch.cat(out_images, dim=1)
return out_images
| miracleyoo/Meta-SSSR-Pytorch-Publish | model/metafrdn.py | metafrdn.py | py | 8,590 | python | en | code | 4 | github-code | 1 | [
{
"api_name": "torch.nn.Module",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"lin... |
20356849460 | from copy import deepcopy
from tqdm import tqdm
class Tokenizer:
"""
两种数据类型的映射的抽象类
"""
def tokenize_A2B(self,data):pass
def tokenize_B2A(self,data):pass
class DictTokenizer(Tokenizer):
def __init__(self,data2label):
"""
输入字典构建词典
如果是一对一的话构建反向词典
"""
self.data2label=data2label
label2data=dict()
for k,v in data2label.items():
if k in label2data: return
else:label2data[v]=k
self.label2data=label2data
def tokenize(self,data,data2label):
result = deepcopy(data)
if isinstance(data,list):
if isinstance(data[0],list):
# 二维列表
for i in range(len(data)):
for j in range(len(data[i])):
result[i][j] = data2label[data[i][j]]
else:
# 一维列表
for i in range(len(data)):
result[i]=data2label[data[i]]
else:
# 标量
result=data2label[data]
return result
def tokenize_A2B(self,data):
return self.tokenize(data,self.data2label)
def tokenize_B2A(self,labels):
return self.tokenize(labels,self.label2data)
class bertTokenizer(Tokenizer):
# 原地改变
def __init__(self,bert_path):
from transformers import BertTokenizer
super().__init__()
self.tokenizer = BertTokenizer.from_pretrained(bert_path)
def tokenize_A2B(self,data,add_sep=True):
# 首末添加特殊字符
# 原地改变
SEP = self.tokenizer.convert_tokens_to_ids("[SEP]")
CLS = self.tokenizer.convert_tokens_to_ids("[CLS]")
for i in tqdm(range(len(data))):
data[i] = self.tokenizer.convert_tokens_to_ids(data[i])
if add_sep:
data[i]=[CLS] + data[i] + [SEP]
def tokenize_B2A(self,data,remove_sep=False):
for i in range(len(data)):
data[i] = self.tokenizer.decode(data[i])
if remove_sep:
if "[SEP]" in data[i]:data[i].remove("[SEP]")
if "[CLS]" in data[i]: data[i].remove("[CLS]")
| ReturnTR/PytorchModelCode | CommonTools/TorchTool.py | TorchTool.py | py | 2,338 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "copy.deepcopy",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "transformers.BertTokenizer.from_pretrained",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "transformers.BertTokenizer",
"line_number": 57,
"usage_type": "name"
},
{
... |
19157644863 | import json
from enum import Enum
"""
Innehåller funktioner för att serialisera paket. Alla paket skapas och skickas som ett JSON objekt.
"""
class Types(Enum):
"""
En lista med de olika packettyper, detta är en hårdkodad lista
"""
time = 0
play = 1
pause = 2
def serializeCreateRoom(_alias, _name, _password, _private=False, _publicKey=None):
"""
alias: klientens alias
roomName: rummets namn
password: rummets lösenord
Skapar och retunerar ett objekt som kan skickas med packetType, alias, roomName och password.
"""
_msg = {
"type": Types.createRoom.value,
"alias": _alias,
"roomName": _name,
"password": _password,
"private": _private,
"publicKey": _publicKey,
}
try:
_send = json.dumps(_msg).encode()
except UnicodeError:
print("Failed to encode create room message!")
return _send
def serializeConnectRoom(_alias, _name, _password, _publicKey):
"""
alias: klientens alias
roomName: rummets namn
password: rummets lösenord
Skapar och retunerar ett objekt som kan skickas med packetType, alias, roomName och password.
"""
_msg = {
"type": Types.connectRoom.value,
"roomName": _name,
"alias": _alias,
"password": _password,
"publicKey": _publicKey,
}
try:
_send = json.dumps(_msg).encode()
except UnicodeError:
print("Failed to encode connect room message!")
return _send
def serializeDisconnectRoom():
"""
Skapar och retunerar ett objekt som kan skickas med packetType.
"""
_msg = {
"type": Types.disconnectRoom.value,
}
try:
_send = json.dumps(_msg).encode()
except UnicodeError:
print("Failed to encode connect room message!")
return _send
def serializePublickey(_pKey):
"""
alias: klientens alias
pkey: klientens publika nyckel
Retunerar ett objekt som kan skickas, packetType läggs även till.
"""
_msg = {
"type": Types.publicKey.value,
"publicKey": _pKey,
}
try:
_send = json.dumps(_msg).encode()
except UnicodeError:
print("Failde to encode create public key message!")
return _send
def serializeMessage(_alias, _message, _roomName, _signature=None):
"""
alias: klientens alias
message: meddelandet som ska skickas
roomName: rummets namn
Skapar och retunerar ett objekt som kan skickas med packetType alias, message och roomName.
"""
_msg = {
"type": Types.message.value,
"message" : _message,
"alias": _alias,
"roomName": _roomName,
"signature": _signature
}
try:
_send = json.dumps(_msg).encode()
except UnicodeError:
print("Failed to encode create message!")
except ValueError:
print("Error decoding json message! ", _msg)
return _send
def serializeRequestRoomsList():
"""
Skapar och retunerar ett objekt som kan skickas med packettype. Kräver ingen input
då det endast är en förfrågan.
"""
_msg = {
"type": Types.requestRoomsList.value,
}
try:
_send = json.dumps(_msg).encode()
except UnicodeError:
print("Failed to encode connected message!")
except ValueError:
print("Error decoding json message! ", _msg)
return _send
def serializeSendRoomsList(_rooms):
"""
rooms: sträng med alla rumsnamn
Skapar och retunerar ett objekt som kan skickas med packetType samt rooms.
"""
_msg = {
"type": Types.roomsList.value,
"rooms": _rooms
}
try:
_send = json.dumps(_msg).encode()
except UnicodeError:
print("Failed to encode connected message!")
print("Failed to encode connected message!")
except ValueError:
print("Error decoding json message! ", _msg)
print("Error decoding json message! " + str(_msg))
return _send
def serializeConnectedRoom(_name, _private=False):
"""
name: rummets namn
private: om rummet är privat eller ej
Skickar ett paket till klienten som bekräftar att klienten är ansluten till rummet.
"""
_msg = {
"type": Types.connectedRoom.value,
"roomName": _name,
"private": _private,
}
try:
_send = json.dumps(_msg).encode()
except UnicodeError:
print("Failed to encode connected message!")
except ValueError:
print("Error decoding json message! ", _msg)
return _send
def serializeChatHistory(_history):
"""
history: föregående chattmeddelanden
Skickar chatthistoriken till nyanslutna klienter.
"""
_msg = {
"type": Types.chatHistory.value,
"history": _history
}
try:
_send = json.dumps(_msg).encode()
except UnicodeError as e:
print("Failed to encode history message!" + str(e))
except ValueError as e:
print("Error decoding json message! " + str(e))
return _send
def serializeTime(_time):
"""
alias: klientens alias
message: meddelandet som ska skickas
roomName: rummets namn
Skapar och retunerar ett objekt som kan skickas med packetType alias, message och roomName.
"""
_msg = {
"type": Types.time.value,
"time" : _time,
}
try:
_send = json.dumps(_msg).encode()
except UnicodeError:
print("Failed to encode create message!")
except ValueError:
print("Error decoding json message! ", _msg)
return _send
def serializePlay():
"""
alias: klientens alias
message: meddelandet som ska skickas
roomName: rummets namn
Skapar och retunerar ett objekt som kan skickas med packetType alias, message och roomName.
"""
_msg = {
"type": Types.play.value,
}
try:
_send = json.dumps(_msg).encode()
except UnicodeError:
print("Failed to encode create message!")
except ValueError:
print("Error decoding json message! ", _msg)
return _send
def serializePause():
"""
alias: klientens alias
message: meddelandet som ska skickas
roomName: rummets namn
Skapar och retunerar ett objekt som kan skickas med packetType alias, message och roomName.
"""
_msg = {
"type": Types.pause.value,
}
try:
_send = json.dumps(_msg).encode()
except UnicodeError:
print("Failed to encode create message!")
except ValueError:
print("Error decoding json message! ", _msg)
return _send
def parsePacket(_packet):
"""
Skapar och retunerar ett JSON objekt med variabler som kom i paketet
samt packetType.
"""
try:
_msg = json.loads(_packet)
except ValueError:
print("Error decoding json message in packet! ", _packet)
return
return _msg
| Simpag/PythonVideoPlayer | packet.py | packet.py | py | 6,890 | python | sv | code | 0 | github-code | 1 | [
{
"api_name": "enum.Enum",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "json.dumps",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 70,
... |
39560119331 | import numpy as np
import multiprocessing as mp
from itertools import repeat
import time
import pandas as pd
# Custom functions from other files we wrote
import PSOTestFuncs as tf
from PSOInit import pso_init
from PSOInit import qpso_init
from PSOUpdate import veloc_update
from PSOUpdate import point_update
from PSOUpdate import qpoint_update
############################ Contents ############################
# Defines the 4 algorithms that will be used:
# - PSO - pso_algo()
# - QPSO - qpso_algo()
# - Parallelized PSO - pso_algo_par()
# - Parallelized QPSO - qpso_algo_par()
# Runs 50 simulations of each algorithm on each test function within PSOTestFuncs.py
# Saves the output to a CSV
############################ Variable descriptions ############################
# n is the number of dimensions (int)
# s is the number of particles (int)
# bounds of the search area - of the form [[x1min, x1max], ... , [xnmin, xnmax]]
# f is the function to be optimized
# params are the necessary parameters (omega, c1, c2) for PSO
# t is the current iteration of the algorithm
# sims is number of simulations to run on each function
# maxrounds is the maximum number of iterations allowed
# tol is the amount of change in fgbest to be considered an improvement
# nochange is the number or iterations without a sufficient improvement in fgbest before stopping
# samebest is a counter for how many rounds in a row with improvement in fgbest of less than tol
# pcurr is the current position of each particles
# vcurr is the current velocity of each particles (PSO and parallelized PSO only)
# pbest is the best position of each particle
# fbest is the minimum value found for each particle
# fgbest is the overall minimum value found of all particles
# pgbest is the overall best position of each particle
# x
# mbest
# newp is a temporary calculation of the new position, saved to pcurr if inside the bounds
# newx_id is a temporary calculation of the new x, saved to x if inside the bounds
# fgbest_compare
# rpg
# phi
# u
# beta
# changeParam
# coinToss - 50% chance of being True / False, used in QPSO to decide to add or subtract changeParam
############################ Algorithm Functions ############################
# Takes in f, s, bounds, params, maxrounds, tol, nochange
# Runs PSO on f over the search area bounds using s particles and parameters params,
# and stopping criteria specified by maxrounds, tol, nochange
# Returns pgbest, fgbest, and t
def pso_algo(f, s, bounds, params, maxrounds, tol, nochange):
n = len(bounds)
pcurr, vcurr, pbest, fbest, pgbest, fgbest = pso_init(f, s, bounds)
t = 0
samebest = 0
while t < maxrounds:
fgbest_compare = fgbest
for i in range(s):
for d in range(n):
vcurr[i][d] = veloc_update(pcurr[i][d], vcurr[i][d], pbest[i][d], pgbest[d], params)
newp = pcurr[i] + vcurr[i]
for d in range(n):
if newp[d] > bounds[d][0] and newp[d] < bounds[d][1]:
#Adding 0 creates a new object in memory instead of variable that references same object
pcurr[i][d] = newp[d] + 0
fcurr = f(pcurr[i])
if fcurr < fbest[i]:
fbest[i] = fcurr + 0
pbest[i] = pcurr[i] + 0
if fcurr < fgbest:
fgbest = fcurr + 0
pgbest = pcurr[i] + 0
t += 1
if abs(fgbest_compare - fgbest) > tol :
samebest = 0
else :
samebest += 1
if samebest >= nochange :
break
return pgbest, fgbest, t
# Takes in f, s, bounds, maxrounds, tol, nochange
# Runs QPSO on f over the search area bounds using s particles,
# and stopping criteria specified by maxrounds, tol, nochange
# Returns pgbest, fgbest, and t
def qpso_algo(f, s, bounds, maxrounds, tol, nochange):
n = len(bounds)
pcurr, pbest, fbest, pgbest, fgbest = qpso_init(f, s, bounds)
x = np.copy(pcurr, order="k")
t = 0
samebest = 0
while t < maxrounds:
fgbest_compare = fgbest
mbest = np.mean(pbest, axis=0)
beta = 0.5*(maxrounds-t)/maxrounds + 0.5
for i in range(s):
for d in range(n):
phi = np.random.uniform()
u = np. random.uniform()
coinToss = np.random.uniform() < 0.5
pcurr[i,d] = phi*pbest[i,d] + (1- phi)*pgbest[d]
changeParam = beta * abs(mbest[d] - x[i, d]) * (-1) * np.log(u)
newx_id = pcurr[i, d] + changeParam if coinToss else pcurr[i, d] - changeParam
if newx_id > bounds[d][0] and newx_id < bounds[d][1]:
#Adding 0 creates a new object in memory instead of variable that references same object
x[i,d] = newx_id + 0
fcurr = f(x[i])
if fcurr < fbest[i]:
fbest[i] = fcurr + 0
pbest[i] = x[i] + 0
if fcurr < fgbest:
fgbest = fcurr + 0
pgbest = x[i] + 0
t += 1
if abs(fgbest_compare - fgbest) > tol:
samebest = 0
else:
samebest += 1
if samebest >= nochange:
break
return pgbest, fgbest, t
# Takes in f, s, bounds, params, maxrounds, tol, nochange
# Runs parallelized PSO on f over the search area bounds using s particles and parameters params,
# and stopping criteria specified by maxrounds, tol, nochange
# We update all the points in an iteration at once, so no communication within an iteration
# Returns pgbest, fgbest, and t
def pso_algo_par(f, s, bounds, params, maxrounds, tol, nochange):
n = len(bounds)
pcurr, vcurr, pbest, fbest, pgbest, fgbest = pso_init(f, s, bounds)
t = 0
samebest = 0
while t < maxrounds:
fgbest_compare = fgbest
inputs = zip(pcurr, vcurr, pbest, fbest, repeat(pgbest), repeat(params), repeat(bounds), repeat(f))
results_0 = pool.starmap(point_update, inputs)
results = list(map(list, zip(*results_0)))
pcurr = np.array(list(results)[0])
vcurr = np.array(list(results)[1])
pbest = np.array(list(results)[2])
fbest = np.array(list(results)[3])
if min(fbest) < fgbest:
#Adding 0 creates a new object in memory instead of variable that references same object
fgbest = min(fbest) + 0
pgbest = np.copy(pbest[fbest == fgbest], order="k")[0]
t += 1
if abs(fgbest_compare - fgbest) > tol:
samebest = 0
else:
samebest += 1
if samebest >= nochange:
break
return pgbest, fgbest, t
# Takes in f, s, bounds, maxrounds, tol, nochange
# Runs parallelized QPSO on f over the search area bounds using s particles and parameters params,
# and stopping criteria specified by maxrounds, tol, nochange
# We update all the points in an iteration at once, so no communication within an iteration
# Returns pgbest, fgbest, and t
def qpso_algo_par(f, s, bounds, maxrounds, tol, nochange):
pcurr, pbest, fbest, pgbest, fgbest = qpso_init(f, s, bounds)
x = np.copy(pcurr, order="k")
t = 0
samebest = 0
while t < maxrounds:
fgbest_compare = fgbest
mbest = np.mean(pbest, axis=0)
beta = 0.5*(maxrounds-t)/maxrounds + 0.5
inputs = zip(x, pcurr, pbest, fbest, repeat(mbest), repeat(pgbest), repeat(beta), repeat(bounds), repeat(f))
results_0 = pool.starmap(qpoint_update, inputs)
results = list(map(list, zip(*results_0)))
x = np.array(list(results)[0])
pcurr = np.array(list(results)[1])
pbest = np.array(list(results)[2])
fbest = np.array(list(results)[3])
if min(fbest) < fgbest:
#Adding 0 creates a new object in memory instead of variable that references same object
fgbest = min(fbest) + 0
pgbest = np.copy(pbest[fbest == fgbest], order="k")[0]
t += 1
if abs(fgbest_compare - fgbest) > tol:
samebest = 0
else:
samebest += 1
if samebest >= nochange:
break
return pgbest, fgbest, t
############################ Simulations and Testing ############################
if __name__ == '__main__':
# Specifies the necessary parameters to be used by the algorithms, and # of simulations
s = 50
params = [0.715, 1.7, 1.7]
maxrounds = 1000
tol = 10**(-9)
nochange = 20
sims = 50
# Stores the information for each function including names of function as a string,
# how to call it, where the true minimum occurs, and what the bounds are
funcnamelist = ["X-Squared", "Booth", "Beale", "ThreeHumpCamel", "GoldsteinPrice", "Levi_n13", "Sphere", "Rosebrock", "StyblinskiTang", "Ackley", "Schaffer_n2", "Eggholder", "McCormick", "Rastrigin", "Schaffer_n4", "Easom", "Bukin_n6", "Matyas"]
functionlist = [tf.xsq, tf.booth, tf.beale, tf.threehumpcamel, tf.goldsteinprice, tf.levi_n13, tf.sphere, tf.rosenbrock, tf.Styblinski_Tang, tf.ackley, tf.schaffer_n2, tf.eggholder, tf.mccormick, tf.rastrigin, tf.schaffer_n4, tf.easom, tf.bukin_n6, tf.matyas]
pminlist = [[0], [1,3], [3,0.5], [0,0], [0, -1],[1,1], [0,0,0,0], [1,1,1,1], [-2.903534,-2.903534,-2.903534,-2.903534,-2.903534,-2.903534], [0,0], [0,0], [512, 404.2319], [-0.54719, -1.54719], [0,0,0,0,0,0,0,0], [0,1.25313], [np.pi, np.pi], [-10,1], [0,0]]
boundlist = [[[-200, 200]], [[-10, 10], [-10, 10]], [[-4.5, 4.5], [-4.5, 4.5]], [[-5, 5], [-5, 5]], [[-2, 2], [-2, 2]], [[-10, 10], [-10, 10]], [[-100, 100], [-100, 100], [-100, 100], [-100, 100]], [[-100, 100], [-100, 100], [-100, 100], [-100, 100]], [[-5, 5], [-5, 5], [-5, 5], [-5, 5], [-5, 5], [-5, 5]], [[-5, 5], [-5, 5]], [[-100, 100], [-100, 100]], [[-512, 512], [-512, 512]], [[-1.5, 4], [-3, 4]], [[-5.12, 5.12], [-5.12, 5.12], [-5.12, 5.12], [-5.12, 5.12], [-5.12, 5.12], [-5.12, 5.12], [-5.12, 5.12], [-5.12, 5.12]], [[-100, 100], [-100, 100]], [[-100, 100], [-100, 100]], [[-15, -5], [-3, 3]], [[-10.00, 10.00], [-10.00, 9.00]]]
# Sets up a dataframe to store the data
outdata = pd.DataFrame()
# Sets up for parallel computing
cores = mp.cpu_count()
pool = mp.Pool(processes=cores)
# Forloop for each function containing for-loop for each simulation which runs all 4 algorithms and times them
# Stores the results of each simulation and true function values in outdata, and saves this as a CSV
for j in range(len(functionlist)):
for k in range(sims):
f = functionlist[j]
bounds = boundlist[j]
trueval = f(pminlist[j])
start = time.time()
pmin, fmin, nrounds = pso_algo(f, s, bounds, params, maxrounds, tol, nochange)
end = time.time()
outdata = outdata.append([[k, funcnamelist[j], "PSO", end-start, nrounds, pmin, pminlist[j], fmin, trueval]])
start = time.time()
pmin, fmin, nrounds = pso_algo_par(f, s, bounds, params, maxrounds, tol, nochange)
end = time.time()
outdata = outdata.append([[k, funcnamelist[j], "PSO_Par", end-start, nrounds, pmin, pminlist[j], fmin, trueval]])
start = time.time()
pmin, fmin, nrounds = qpso_algo(f, s, bounds, maxrounds, tol, nochange)
end = time.time()
outdata = outdata.append([[k, funcnamelist[j], "QPSO", end-start, nrounds, pmin, pminlist[j], fmin, trueval]])
start = time.time()
pmin, fmin, nrounds = qpso_algo_par(f, s, bounds, maxrounds, tol, nochange)
end = time.time()
outdata = outdata.append([[k, funcnamelist[j], "QPSO_Par", end-start, nrounds, pmin, pminlist[j], fmin, trueval]])
pool.close()
outdata.columns = ["Simulation#", "Function", "Method", "time", "rounds", "FoundMinLoc", "TrueMinLoc", "FoundMinVal", "TrueMinVal"]
outdata.sort_values(["Function", "Method"], inplace = True)
outdata = outdata.reset_index(drop = True)
outdata.to_csv("OutputData.csv") | anyapriya/PSO | PSOMain.py | PSOMain.py | py | 12,258 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "PSOInit.pso_init",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "PSOUpdate.veloc_update",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "PSOInit.qpso_init",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "numpy.co... |
5409197519 | from copy import deepcopy
from flask_camp.models import DocumentVersion, User
from flask_camp import current_api
from sqlalchemy.orm.attributes import flag_modified
from c2corg_api.models import ARTICLE_TYPE, XREPORT_TYPE
class DocumentRest:
# on v6, a document can be created and exists without a version.
# it's not the case here, when a document is created, a first version must exists
# on first call of create_new_version() for a given document, we do nothing as
# the version still exists. On any other call, we actually create new versions
create_new_version_called = set()
@staticmethod
def create_new_version(document, author):
last_version = document._document.last_version
if id(document) not in DocumentRest.create_new_version_called:
DocumentRest.create_new_version_called.add(id(document))
if last_version.data["type"] in (ARTICLE_TYPE, XREPORT_TYPE):
last_version.data |= {"author": {"user_id": author}}
flag_modified(last_version, "data")
else:
version = DocumentVersion(
document=document._document,
user=User.get(id=author),
comment="no comment",
data=deepcopy(last_version.data),
)
document._document.last_version = version
document._version = version
current_api.database.session.add(last_version)
current_api.database.session.add(version)
@staticmethod
def update_version(document, user_id, comment, update_types, changed_langs):
document._document.last_version.comment = comment
| c2corg/c2c_api-poc | c2corg_api/legacy/views/document.py | document.py | py | 1,673 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "c2corg_api.models.ARTICLE_TYPE",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "c2corg_api.models.XREPORT_TYPE",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.orm.attributes.flag_modified",
"line_number": 28,
"usage_type": ... |
26573286125 | import torch
import torchvision
import numpy as np
from torchvision import datasets
import torchvision.transforms as transforms
from torch.utils.data.sampler import SubsetRandomSampler
import torch.optim as optim
import torchvision.models as models
from torchvision.utils import make_grid
import time
import copy
class Variables:
# number of subprocesses to use for data loading
num_workers = 0
# how many samples per batch to load
batch_size = 32
# percentage of training set to use as validation
valid_size = 0.2
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
num_epochs = 20
data_dir = '../input/flowers-recognition/flowers/flowers'
class FlowerRecognition:
def make_dataloader(self, data_dir, valid_size, batch_size, num_workers):
train_transforms = transforms.Compose([transforms.RandomRotation(30),
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
test_transforms = transforms.Compose([transforms.Resize(255),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
dataset = datasets.ImageFolder(data_dir, transform=train_transforms)
len_train_set = int(0.8*len(dataset))
len_test_set = len(dataset) - len_train_set
# repare datasets.train_data will be use for training,and test_data for final test
train_data, test_data = torch.utils.data.random_split(dataset, [len_train_set, len_test_set])
num_train = len(train_data)
indices = list(range(num_train))
np.random.shuffle(indices)
split = int(np.floor(valid_size * num_train))
train_idx, valid_idx = indices[split:], indices[:split]
# define samplers for obtaining training and validation batches
train_sampler = SubsetRandomSampler(train_idx)
valid_sampler = SubsetRandomSampler(valid_idx)
# prepare data loaders (combine dataset and sampler)
train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, num_workers=num_workers,sampler=train_sampler)
valid_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, num_workers=num_workers,sampler=valid_sampler)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size)
return train_loader, valid_loader, test_loader
def make_model(self, device, ):
# we are using pretrained on ImageNet model resnet34
model_conv = torchvision.models.resnet34(pretrained=True)
for param in model_conv.parameters():
param.requires_grad = False
# Parameters of newly constructed modules have requires_grad=True by default
num_ftrs = model_conv.fc.in_features
model_conv.fc = torch.nn.Linear(num_ftrs, 5)
model_conv = model_conv.to(device)
optimizer_conv = optim.SGD(model_conv.fc.parameters(), lr=0.001, momentum=0.9)
exp_lr_scheduler = optim.lr_scheduler.StepLR(optimizer_conv, step_size=7, gamma=0.1)
criterion = torch.nn.CrossEntropyLoss()
return model_conv, optimizer_conv, exp_lr_scheduler, criterion
def train_model(self, model, criterion, optimizer, scheduler, device, num_epochs=25):
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
if phase == 'train':
scheduler.step()
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
# deep copy the model
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
return model
def test_model(self, model, device):
result = 0
counter = 0
with torch.no_grad():
for i, (inputs, labels) in enumerate(test_loader):
inputs = inputs.to(device)
labels = labels.to(device)
outputs = model_conv(inputs)
_, preds = torch.max(outputs, 1)
result += int(sum(labels == preds))
counter += len(labels)
print('Correct_answers - {0}, Total_answers - {1}, Percent_corrects - {2}'.format(result, counter, result / counter))
if __name__ == '__main__':
flowers = FlowerRecognition()
train_loader, valid_loader, test_loader = flowers.make_dataloader(Variables.data_dir, Variables.valid_size,
Variables.batch_size, Variables.num_workers)
dataloaders = {'train': train_loader, 'val': valid_loader}
dataset_sizes = {x: len(dataloaders[x]) for x in ['train', 'val']}
model_conv, optimizer_conv, exp_lr_scheduler, criterion = flowers.make_model(Variables.device)
model_ft = flowers.train_model(model_conv, criterion, optimizer_conv, exp_lr_scheduler, Variables.device, Variables.num_epochs)
flowers.test_model(model_ft, Variables.device)
| Constanter/Kaggle | ComputerVision/ImageClassification/FlowersRecognition/PytorchSolution/main.py | main.py | py | 7,551 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "torch.device",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "torchvision.tran... |
17540865999 | import torch
import scanpy as sc
import numpy as np
import pytorch_lightning as pl
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from torch_geometric.data import HeteroData, InMemoryDataset
from torch_geometric.loader import DataLoader
from data.helpers import downsample_adata, check_normalized
class CellGeneDataset(InMemoryDataset):
def __init__(self, root, h5ad_file, threshold=1, transform=None, pre_transform=None, downsample=False):
self.h5ad_file = h5ad_file
self.threshold = threshold
self.downsample = downsample
super(CellGeneDataset, self).__init__(root, transform, pre_transform)
self.data, self.slices = torch.load(self.processed_paths[0])
@property
def raw_file_names(self):
return [self.h5ad_file]
@property
def processed_file_names(self):
return ['data.pt']
def download(self):
pass
def get_num_cell_types(self):
return len(self.data["cell"].y.unique().tolist())
def process(self):
adata = sc.read_h5ad(self.h5ad_file)
if self.downsample:
adata = downsample_adata(adata)
# Check if the data is normalized
check_normalized(adata)
# Create a binary adjacency matrix based on the threshold (count must be higher than threshold, for relationship to matter)
adj_matrix = (adata.X > self.threshold).astype(int).toarray() # Convert to dense array
# Define the number of cell and gene nodes
n_cells, n_genes = adj_matrix.shape
# Extract the edges from the adjacency matrix
cell_nodes, gene_nodes = np.where(adj_matrix == 1)
edges = torch.tensor(np.array([cell_nodes, gene_nodes]), dtype=torch.long)
# Extract counts for the edges
edge_counts = adata.X[cell_nodes, gene_nodes].A1
edge_counts = torch.tensor(edge_counts, dtype=torch.float).view(-1, 1)
# Create a PyTorch Geometric HeteroData graph
graph = HeteroData()
# Encode cell type labels as integers
le = LabelEncoder()
cell_type_int = le.fit_transform(adata.obs['cell_ontology_class'].values)
# One-hot encode the cell type labels
ohe = OneHotEncoder(sparse_output=False)
cell_features = ohe.fit_transform(cell_type_int.reshape(-1, 1))
# Prepare gene features (constant feature vector)
gene_features = np.ones((n_genes, 1))
# Set node features for cell and gene nodes
graph['cell'].x = torch.tensor(cell_features, dtype=torch.float)
graph['gene'].x = torch.tensor(gene_features, dtype=torch.float)
# Set edge index and edge features for the ('cell', 'expresses', 'gene') relation
graph['cell', 'expresses', 'gene'].edge_index = edges
graph['cell', 'expresses', 'gene'].edge_attr = edge_counts
# Set cell type labels for the cell nodes
graph['cell'].y = torch.tensor(cell_type_int, dtype=torch.long)
data, slices = self.collate([graph])
torch.save((data, slices), self.processed_paths[0])
# def __repr__(self):
# return '{}()'.format(self.__class__.__name__)
# def __len__(self):
# return len(self.data)
# def get(self, idx):
# return self.data[idx]
class CellGeneDataModule(pl.LightningDataModule):
def __init__(self, train_dataset, val_dataset, batch_size=32):
super().__init__()
self.train_dataset = train_dataset
self.val_dataset = val_dataset
self.batch_size = batch_size
def train_dataloader(self):
return DataLoader(self.train_dataset, batch_size=self.batch_size, shuffle=True)
def val_dataloader(self):
return DataLoader(self.val_dataset, batch_size=self.batch_size)
# Usage
# dataset = CellGeneDataset(h5ad_file='path/to/your/h5ad/file', threshold=0.5)
# dataset = dataset.shuffle()
# train_dataset = dataset[:int(len(dataset) * 0.8)]
# val_dataset = dataset[int(len(dataset) * 0.8):]
# data_module = CellGeneDataModule(train_dataset, val_dataset)
| srdsam/TranscriptomicsGNN | data/CellGeneDataset.py | CellGeneDataset.py | py | 4,055 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "torch_geometric.data.InMemoryDataset",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "torch.load",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "scanpy.read_h5ad",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "data... |
28303693280 | # Write py4DSTEM formatted .h5 files.
#
# See filestructure.txt for a description of the file structure.
import h5py
import numpy as np
from collections import OrderedDict
from os.path import exists
from os import remove as rm
from .read_utils import is_py4DSTEM_file, get_py4DSTEM_topgroups
from .metadata import metadata_to_h5
from ..datastructure import DataCube, DiffractionSlice, RealSlice, CountedDataCube
from ..datastructure import DataObject, PointList, PointListArray, Metadata
from ...process.utils import tqdmnd
from ...version import __version__
def save(filepath, data, overwrite=False, topgroup='4DSTEM_experiment', **kwargs):
"""
Saves data to a new py4DSTEM .h5 file at filepath.
Accepts:
filepath path where the file will be saved
data a single DataObject or a list of DataObjects
overwrite boolean controlling behavior when an existing file
is found at filepath. If overwrite is True, deletes the
existing file and writes a new one. Otherwise,
raises an error.
topgroup name of the h5 toplevel group containing the py4DSTEM
file of interest
"""
# Open the file
if exists(filepath):
if not overwrite:
if is_py4DSTEM_file(filepath):
# If the file exists and is a py4DSTEM .h5, determine
# if we are writing a new topgroup to an existing .h5
tgs = get_py4DSTEM_topgroups(filepath)
if topgroup in tgs:
raise Exception("This py4DSTEM .h5 file already contains a topgroup named '{}'. Overwrite the whole file using overwrite=True, or add another topgroup.".format(topgroup))
else:
f = h5py.File(filepath,'r+')
else:
raise Exception('A file already exists at path {}. To overwrite the file, use overwrite=True. To append new objects to an existing file, use append() rather than save().'.format(filepath))
else:
rm(filepath)
f = h5py.File(filepath,'w')
else:
f = h5py.File(filepath,'w')
# Construct dataobject list
if isinstance(data, DataObject):
dataobject_list = [data]
elif isinstance(data, list):
assert all([isinstance(item,DataObject) for item in data]), "If 'data' is a list, all items must be DataObjects."
dataobject_list = data
else:
raise TypeError("Error: unrecognized value for argument data. Must be a DataObject or list of DataObjects")
assert np.sum([isinstance(dataobject_list[i],Metadata) for i in range(len(dataobject_list))])<2, "Multiple Metadata instances were passed"
# Handle keyword arguments
use_compression = kwargs.get('compression',False)
##### Make .h5 file #####
# Make topgroup
grp_top = f.create_group(topgroup)
grp_top.attrs.create("emd_group_type",2)
grp_top.attrs.create("version_major",__version__.split('.')[0])
grp_top.attrs.create("version_minor",__version__.split('.')[1])
grp_top.attrs.create("version_release",__version__.split('.')[2])
# Make data groups
group_data = grp_top.create_group("data")
grp_dc = group_data.create_group("datacubes")
grp_cdc = group_data.create_group("counted_datacubes")
grp_ds = group_data.create_group("diffractionslices")
grp_rs = group_data.create_group("realslices")
grp_pl = group_data.create_group("pointlists")
grp_pla = group_data.create_group("pointlistarrays")
ind_dcs, ind_cdcs, ind_dfs, ind_rls, ind_ptl, ind_ptla = 0,0,0,0,0,0
# Make metadata group and identify any metadata, either passed as arguments or attached to DataCubes
grp_md = grp_top.create_group("metadata")
inds = np.nonzero([isinstance(dataobject_list[i],Metadata) for i in range(len(dataobject_list))])[0]
metadata_list = []
for i in inds[::-1]:
metadata_list.append(dataobject_list.pop(i))
for dataobject in dataobject_list:
if isinstance(dataobject,DataCube):
if hasattr(dataobject,'metadata'):
metadata_list.append(dataobject.metadata)
if len(metadata_list)>1:
assert(all([id(metadata_list[0])==id(metadata_list[i]) for i in range(1,len(metadata_list))])), 'Error: multiple distinct Metadata objects found'
md = metadata_list[0]
elif len(metadata_list)==1:
md = metadata_list[0]
else:
md = None
# Loop through and save all objects in the dataobjectlist
names,grps,save_fns = [],[],[]
lookupTable = {
'DataCube':['datacube_',ind_dcs,grp_dc,
save_datacube_group],
'CountedDataCube':['counted_data_cube_',ind_cdcs,grp_cdc,
save_counted_datacube_group],
'DiffractionSlice':['diffractionslice_',ind_dfs,grp_ds,
save_diffraction_group],
'RealSlice':['realslice_',ind_rls,grp_rs,
save_real_group],
'PointList':['pointlist_',ind_ptl,grp_pl,
save_pointlist_group],
'PointListArray':['pointlistarray_',ind_ptla,grp_pla,
save_pointlistarray_group]
}
for dataobject in dataobject_list:
name = dataobject.name
dtype = type(dataobject).__name__
basename,inds,grp,save_fn = lookupTable[dtype]
if name == '':
name = basename+str(inds)
inds += 1
names.append(name)
grps.append(grp)
save_fns.append(save_fn)
# Save metadata
if md is not None:
metadata_to_h5(filepath,md,overwrite=overwrite,topgroup=topgroup)
else:
metadata_to_h5(filepath,Metadata(),overwrite=overwrite,topgroup=topgroup)
# Save data
for name,grp,save_fn,do in zip(names,grps,save_fns,dataobject_list):
new_grp = grp.create_group(name)
print("Saving {} '{}'...".format(type(do).__name__,name))
save_fn(new_grp,do)
#### Functions for writing dataobjects to .h5 ####
def save_datacube_group(group, datacube, use_compression=False):
group.attrs.create("emd_group_type",1)
if (isinstance(datacube.data,np.ndarray) or isinstance(datacube.data,h5py.Dataset)):
if use_compression:
data_datacube = group.create_dataset("data", data=datacube.data,
chunks=(1,1,datacube.Q_Nx,datacube.Q_Ny),compression='gzip')
else:
data_datacube = group.create_dataset("data", data=datacube.data)
else:
# handle K2DataArray datacubes
data_datacube = datacube.data._write_to_hdf5(group)
# Dimensions
assert len(data_datacube.shape)==4, "Shape of datacube is {}".format(len(data_datacube))
R_Nx,R_Ny,Q_Nx,Q_Ny = data_datacube.shape
data_R_Nx = group.create_dataset("dim1",(R_Nx,))
data_R_Ny = group.create_dataset("dim2",(R_Ny,))
data_Q_Nx = group.create_dataset("dim3",(Q_Nx,))
data_Q_Ny = group.create_dataset("dim4",(Q_Ny,))
# Populate uncalibrated dimensional axes
data_R_Nx[...] = np.arange(0,R_Nx)
data_R_Nx.attrs.create("name",np.string_("R_x"))
data_R_Nx.attrs.create("units",np.string_("[pix]"))
data_R_Ny[...] = np.arange(0,R_Ny)
data_R_Ny.attrs.create("name",np.string_("R_y"))
data_R_Ny.attrs.create("units",np.string_("[pix]"))
data_Q_Nx[...] = np.arange(0,Q_Nx)
data_Q_Nx.attrs.create("name",np.string_("Q_x"))
data_Q_Nx.attrs.create("units",np.string_("[pix]"))
data_Q_Ny[...] = np.arange(0,Q_Ny)
data_Q_Ny.attrs.create("name",np.string_("Q_y"))
data_Q_Ny.attrs.create("units",np.string_("[pix]"))
# TODO: Calibrate axes, if calibrations are present
def save_counted_datacube_group(group,datacube):
if datacube.data._mmap:
# memory mapped CDC's aren't supported yet
print('Data not written. Memory mapped CountedDataCube not yet supported.')
return
group.attrs.create("emd_group_type",1)
# if datacube.metadata is not None:
# group.attrs.create("metadata",datacube.metadata._ind)
# else:
# group.attrs.create("metadata",-1)
pointlistarray = datacube.electrons
try:
n_coords = len(pointlistarray.dtype.names)
except:
n_coords = 1
#coords = np.string_(str([coord for coord in pointlistarray.dtype.names]))
group.attrs.create("coordinates", np.string_(str(pointlistarray.dtype)))
group.attrs.create("dimensions", n_coords)
pointlist_dtype = h5py.special_dtype(vlen=pointlistarray.dtype)
name = "data"
dset = group.create_dataset(name,pointlistarray.shape,pointlist_dtype)
print('Writing CountedDataCube:',flush=True)
for (i,j) in tqdmnd(dset.shape[0],dset.shape[1]):
dset[i,j] = pointlistarray.get_pointlist(i,j).data
# indexing coordinates:
dt = h5py.special_dtype(vlen=str)
data_coords = group.create_dataset('index_coords',shape=(datacube.data._mode,),dtype=dt)
if datacube.data._mode == 1:
data_coords[0] = datacube.data.index_key
else:
data_coords[0] = datacube.data.index_key.ravel()[0]
data_coords[1] = datacube.data.index_key.ravel()[1]
# Dimensions
R_Nx,R_Ny,Q_Nx,Q_Ny = datacube.data.shape
data_R_Nx = group.create_dataset("dim1",(R_Nx,))
data_R_Ny = group.create_dataset("dim2",(R_Ny,))
data_Q_Nx = group.create_dataset("dim3",(Q_Nx,))
data_Q_Ny = group.create_dataset("dim4",(Q_Ny,))
# Populate uncalibrated dimensional axes
data_R_Nx[...] = np.arange(0,R_Nx)
data_R_Nx.attrs.create("name",np.string_("R_x"))
data_R_Nx.attrs.create("units",np.string_("[pix]"))
data_R_Ny[...] = np.arange(0,R_Ny)
data_R_Ny.attrs.create("name",np.string_("R_y"))
data_R_Ny.attrs.create("units",np.string_("[pix]"))
data_Q_Nx[...] = np.arange(0,Q_Nx)
data_Q_Nx.attrs.create("name",np.string_("Q_x"))
data_Q_Nx.attrs.create("units",np.string_("[pix]"))
data_Q_Ny[...] = np.arange(0,Q_Ny)
data_Q_Ny.attrs.create("name",np.string_("Q_y"))
data_Q_Ny.attrs.create("units",np.string_("[pix]"))
def save_diffraction_group(group, diffractionslice):
# if diffractionslice.metadata is not None:
# group.attrs.create("metadata",diffractionslice.metadata._ind)
# else:
# group.attrs.create("metadata",-1)
group.attrs.create("depth", diffractionslice.depth)
data_diffractionslice = group.create_dataset("data", data=diffractionslice.data)
shape = diffractionslice.data.shape
assert len(shape)==2 or len(shape)==3
# Dimensions 1 and 2
Q_Nx,Q_Ny = shape[:2]
dim1 = group.create_dataset("dim1",(Q_Nx,))
dim2 = group.create_dataset("dim2",(Q_Ny,))
# Populate uncalibrated dimensional axes
dim1[...] = np.arange(0,Q_Nx)
dim1.attrs.create("name",np.string_("Q_x"))
dim1.attrs.create("units",np.string_("[pix]"))
dim2[...] = np.arange(0,Q_Ny)
dim2.attrs.create("name",np.string_("Q_y"))
dim2.attrs.create("units",np.string_("[pix]"))
# TODO: Calibrate axes, if calibrations are present
# Dimension 3
if len(shape)==3:
dim3 = group.create_dataset("dim3", data=np.array(diffractionslice.slicelabels).astype("S64"))
def save_real_group(group, realslice):
# if realslice.metadata is not None:
# group.attrs.create("metadata",realslice.metadata._ind)
# else:
# group.attrs.create("metadata",-1)
group.attrs.create("depth", realslice.depth)
data_realslice = group.create_dataset("data", data=realslice.data)
shape = realslice.data.shape
assert len(shape)==2 or len(shape)==3
# Dimensions 1 and 2
R_Nx,R_Ny = shape[:2]
dim1 = group.create_dataset("dim1",(R_Nx,))
dim2 = group.create_dataset("dim2",(R_Ny,))
# Populate uncalibrated dimensional axes
dim1[...] = np.arange(0,R_Nx)
dim1.attrs.create("name",np.string_("R_x"))
dim1.attrs.create("units",np.string_("[pix]"))
dim2[...] = np.arange(0,R_Ny)
dim2.attrs.create("name",np.string_("R_y"))
dim2.attrs.create("units",np.string_("[pix]"))
# TODO: Calibrate axes, if calibrations are present
# Dimension 3
if len(shape)==3:
dim3 = group.create_dataset("dim3", data=np.array(realslice.slicelabels).astype("S64"))
def save_pointlist_group(group, pointlist):
#if pointlist.metadata is not None:
# group.attrs.create("metadata",pointlist.metadata._ind)
#else:
# group.attrs.create("metadata",-1)
n_coords = len(pointlist.dtype.names)
coords = np.string_(str([coord for coord in pointlist.dtype.names]))
group.attrs.create("coordinates", coords)
group.attrs.create("dimensions", n_coords)
group.attrs.create("length", pointlist.length)
for name in pointlist.dtype.names:
group_current_coord = group.create_group(name)
group_current_coord.attrs.create("dtype", np.string_(pointlist.dtype[name]))
group_current_coord.create_dataset("data", data=pointlist.data[name])
def save_pointlistarray_group(group, pointlistarray):
#if pointlistarray.metadata is not None:
# group.attrs.create("metadata",pointlistarray.metadata._ind)
#else:
# group.attrs.create("metadata",-1)
try:
n_coords = len(pointlistarray.dtype.names)
except:
n_coords = 1
#coords = np.string_(str([coord for coord in pointlistarray.dtype.names]))
group.attrs.create("coordinates", np.string_(str(pointlistarray.dtype)))
group.attrs.create("dimensions", n_coords)
pointlist_dtype = h5py.special_dtype(vlen=pointlistarray.dtype)
name = "data"
dset = group.create_dataset(name,pointlistarray.shape,pointlist_dtype)
for (i,j) in tqdmnd(dset.shape[0],dset.shape[1]):
dset[i,j] = pointlistarray.get_pointlist(i,j).data
| magnunor/py4DSTEM | py4DSTEM/io/native/write.py | write.py | py | 13,944 | python | en | code | null | github-code | 1 | [
{
"api_name": "os.path.exists",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "read_utils.is_py4DSTEM_file",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "read_utils.get_py4DSTEM_topgroups",
"line_number": 37,
"usage_type": "call"
},
{
"api_... |
71464292195 | import pandas as pd
import plotly.express as px
from plotly.subplots import make_subplots
import plotly.graph_objs as go
df = pd.read_csv('data/info_data_job_market_research.csv')
def graph_npuestos_ciudad(df):
df_ub_da = df[df['Tipo puesto'] == 'Data Analyst']
df_ub_da = df_ub_da['Ubicación'].copy()
df_ub_da_top = df_ub_da.value_counts()[:6]
df_ub_da_other = df_ub_da.value_counts()[7:].count()
df_ub_da_other = pd.Series([df_ub_da_other], index=['Otras ciudades'])
df_ub_da_ok = df_ub_da_top.append(df_ub_da_other)
df_ub_da_ok.rename('Data Analyst', inplace=True)
df_ub_da_ok = pd.DataFrame(df_ub_da_ok)
df_ub_ds = df[df['Tipo puesto'] == 'Data Scientist']
df_ub_ds = df_ub_ds['Ubicación'].copy()
df_ub_ds_top = df_ub_ds.value_counts()[:6]
df_ub_ds_other = df_ub_ds.value_counts()[7:].count()
df_ub_ds_other = pd.Series([df_ub_ds_other], index=['Otras ciudades'])
df_ub_ds_ok = df_ub_ds_top.append(df_ub_ds_other)
df_ub_ds_ok.rename('Data Scientist', inplace=True)
df_ub_ds_ok = pd.DataFrame(df_ub_ds_ok)
result = pd.concat([df_ub_da_ok, df_ub_ds_ok], axis=1)
result.sort_values(by=['Data Analyst'], ascending=False)
labels = list(result.index)
graph_npuestos_ciudad = go.Figure()
graph_npuestos_ciudad.add_trace(go.Bar(
x=labels,
y=result['Data Analyst'],
name='Data Analyst',
marker_color='green'
))
graph_npuestos_ciudad.add_trace(go.Bar(
x=labels,
y=result['Data Scientist'],
name='Data Scientist',
marker_color='rgba(48,45,255)'
))
graph_npuestos_ciudad.update_layout(barmode='group', xaxis_tickangle=-45, template="plotly_white")
return graph_npuestos_ciudad
def graph_dias_da(df):
diasact_da = df[df['Tipo puesto'] == 'Data Analyst']
diasact_da = diasact_da['Días activos'].copy()
lista_dias = ['segundo', 'segundos', 'minuto', 'minutos', 'hora', 'horas', 'día', 'días']
for i in lista_dias:
diasact_da[diasact_da.str.contains(i)] = '03-08-21'
diasact_da[diasact_da.str.contains(' 1 semana')] = '03-01-21'
diasact_da[diasact_da.str.contains(' 2 semanas')] = '02-22-21'
diasact_da[diasact_da.str.contains(' 3 semanas')] = '02-15-21'
diasact_da[diasact_da.str.contains(' 4 semanas')] = '02-08-21'
diasact_da[diasact_da.str.contains(' 1 mes')] = '02-01-21'
diasact_da[diasact_da.str.contains(' 2 meses')] = '01-25-21'
diasact_da[diasact_da.str.contains(' 3 meses')] = '01-18-21'
diasact_da[diasact_da.str.contains(' 4 meses')] = '01-11-21'
diasact_da[diasact_da.str.contains(' 5 meses')] = '01-04-21'
diasact_da = diasact_da.value_counts()
diasact_da.drop('no_data', inplace=True)
df_diasact_da = pd.DataFrame(diasact_da)
df_diasact_da.reset_index(inplace=True)
df_diasact_da.rename(columns={'index': 'Fechas'}, inplace=True)
df_diasact_da['Fechas'] = pd.to_datetime(df_diasact_da['Fechas'])
df_diasact_da.sort_values('Fechas', ascending=True, inplace=True)
graph_dias_da = px.bar(df_diasact_da, x='Fechas', y='Días activos',
template="presentation",
labels={'Fechas': 'Semanas',
'Días activos': 'Nº puestos activos'})
graph_dias_da.update_traces(marker=dict(color='green'))
graph_dias_da.update_layout(title_text='Nº publicaciones activas D. Analyst por semanas')
graph_dias_da['layout']['xaxis']['autorange'] = "reversed"
return graph_dias_da
def graph_dias_ds(df):
diasact_ds = df[df['Tipo puesto'] == 'Data Analyst']
diasact_ds = diasact_ds['Días activos'].copy()
lista_dias = ['segundo', 'segundos', 'minuto', 'minutos', 'hora', 'horas', 'día', 'días']
for i in lista_dias:
diasact_ds[diasact_ds.str.contains(i)] = '03-08-21'
diasact_ds[diasact_ds.str.contains(' 1 semana')] = '03-01-21'
diasact_ds[diasact_ds.str.contains(' 2 semanas')] = '02-22-21'
diasact_ds[diasact_ds.str.contains(' 3 semanas')] = '02-15-21'
diasact_ds[diasact_ds.str.contains(' 4 semanas')] = '02-08-21'
diasact_ds[diasact_ds.str.contains(' 1 mes')] = '02-01-21'
diasact_ds[diasact_ds.str.contains(' 2 meses')] = '01-25-21'
diasact_ds[diasact_ds.str.contains(' 3 meses')] = '01-18-21'
diasact_ds[diasact_ds.str.contains(' 4 meses')] = '01-11-21'
diasact_ds[diasact_ds.str.contains(' 5 meses')] = '01-04-21'
diasact_ds = diasact_ds.value_counts()
diasact_ds.drop('no_data', inplace=True)
df_diasact_ds = pd.DataFrame(diasact_ds)
df_diasact_ds.reset_index(inplace=True)
df_diasact_ds.rename(columns={'index': 'Fechas'}, inplace=True)
df_diasact_ds['Fechas'] = pd.to_datetime(df_diasact_ds['Fechas'])
df_diasact_ds.sort_values('Fechas', ascending=True, inplace=True)
graph_dias_ds = px.bar(df_diasact_ds, x='Fechas', y='Días activos',
template="presentation",
labels={'Fechas': 'Semanas',
'Días activos': 'Nº puestos activos'})
graph_dias_ds.update_traces(marker=dict(color='rgba(48,45,255)'))
graph_dias_ds.update_layout(title_text='Nº publicaciones activas D. Scientist por semanas')
graph_dias_ds['layout']['xaxis']['autorange'] = "reversed"
return graph_dias_ds
def graph_nsolicitudes(df):
sol_da = df[df['Tipo puesto'] == 'Data Analyst']
sol_ds = df[df['Tipo puesto'] == 'Data Scientist']
mask_sol_da = sol_da.groupby(['Nombre empresa'])['Nº Solicitudes'].sum()
mask_sol_da = mask_sol_da.sort_values(ascending=False)[:10]
mask_sol_da = pd.DataFrame(mask_sol_da)
mask_sol_da.reset_index(inplace=True)
mask_sol_ds = sol_ds.groupby(['Nombre empresa'])['Nº Solicitudes'].sum()
mask_sol_ds = mask_sol_ds.sort_values(ascending=False)[:10]
mask_sol_ds = pd.DataFrame(mask_sol_ds)
mask_sol_ds.reset_index(inplace=True)
graph_nsolicitudes = go.Figure(data=[
go.Bar(name='Data Analysis', x=mask_sol_da['Nº Solicitudes'],
y=mask_sol_da['Nombre empresa'], orientation='h'),
go.Bar(name='Data Scientist', x=mask_sol_ds['Nº Solicitudes'],
y=mask_sol_ds['Nombre empresa'], orientation='h')
])
graph_nsolicitudes.update_layout(barmode='group', xaxis_tickangle=-45,
template="plotly_white"
)
return graph_nsolicitudes
def graph_nexp(df):
nexp_da = df[df['Tipo puesto'] == 'Data Analyst']
nexp_da = nexp_da['Nivel experiencia'].value_counts()
nexp_da = pd.DataFrame(nexp_da)
nexp_da.drop('no_data', inplace=True)
nexp_ds = df[df['Tipo puesto'] == 'Data Analyst']
nexp_ds = nexp_ds['Nivel experiencia'].value_counts()
nexp_ds = pd.DataFrame(nexp_ds)
labels1 = list(nexp_da.index)
labels2 = list(nexp_ds.index)
graph_nexp = make_subplots(rows=1, cols=2, specs=[[{'type': 'domain'}, {'type': 'domain'}]])
graph_nexp.add_trace(go.Pie(labels=labels1, values=nexp_da['Nivel experiencia']),
1, 1)
graph_nexp.add_trace(go.Pie(labels=labels2, values=nexp_ds['Nivel experiencia']),
1, 2)
graph_nexp.update_traces(hole=.4, hoverinfo="label+percent+name")
graph_nexp.update_layout(
annotations=[dict(text='DA', x=0.18, y=0.5, font_size=20, showarrow=False),
dict(text='DS', x=0.82, y=0.5, font_size=20, showarrow=False)])
graph_nexp['layout']['yaxis']['autorange'] = "reversed"
return graph_nexp
def graph_jornada(df):
jornada_da = df[df['Tipo puesto'] == 'Data Analyst']
jornada_da = jornada_da['Tipo jornada'].value_counts()
jornada_da = pd.DataFrame(jornada_da)
jornada_ds = df[df['Tipo puesto'] == 'Data Scientist']
jornada_ds = jornada_ds['Tipo jornada'].value_counts()
jornada_ds = pd.DataFrame(jornada_ds)
jornada_ds.drop('no_data', inplace=True)
labels1 = list(jornada_da.index)
labels2 = list(jornada_ds.index)
graph_jornada = make_subplots(rows=1, cols=2, specs=[[{'type': 'domain'}, {'type': 'domain'}]])
graph_jornada.add_trace(go.Pie(labels=labels1, values=jornada_da['Tipo jornada']),
1, 1)
graph_jornada.add_trace(go.Pie(labels=labels2, values=jornada_ds['Tipo jornada']),
1, 2)
graph_jornada.update_traces(hole=.4, hoverinfo="label+percent+name")
graph_jornada.update_layout(
title_text="Tipo jornada por puesto",
annotations=[dict(text='DA', x=0.18, y=0.5, font_size=20, showarrow=False),
dict(text='DS', x=0.82, y=0.5, font_size=20, showarrow=False)])
return graph_jornada
| cespanac/ds_job_market_research | graphs.py | graphs.py | py | 8,698 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "pandas.read_csv",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pandas.Series",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "pandas.Series",
"li... |
73146219235 | import warnings
import matplotlib.pyplot as plt
from utils.utils import *
from utils.objective import *
if __name__ == '__main__':
warnings.simplefilter('ignore')
asset_list = ['VTI', 'VEA', 'VWO', 'IAU', 'DBC', 'XLB', 'XLE', 'XLF', 'XLI', 'XLK', 'XLP', 'XLU', 'XLV', 'XLY']
price_df = get_price_df(asset_list)
rebal_dates = get_rebal_dates(price_df)
price_df = price_df.drop(['year','month'], axis=1)
rtn_df = np.log(price_df / price_df.shift(1)).dropna()
stock2vec_iv_mv = weights_by_stock2vec(rtn_df, rebal_dates, inverse_volatility_min_vol_optimize)
stock2vec_db_iv = weights_by_stock2vec(rtn_df, rebal_dates, double_inverse_volatility_optimize)
rp = weights_by_optimize(rtn_df, rebal_dates, rp_optimize)
mv = weights_by_optimize(rtn_df, rebal_dates, min_vol_optimize)
ms = weights_by_optimize(rtn_df, rebal_dates, max_sharpe_optimize)
ew = weights_by_vanilla(rtn_df, rebal_dates, 'equal')
iv = weights_by_vanilla(rtn_df, rebal_dates, 'inverse')
cum_rtn1 = compute_portfolio_cum_rtn(price_df, stock2vec_iv_mv).sum(axis=1)
cum_rtn2 = compute_portfolio_cum_rtn(price_df, stock2vec_db_iv).sum(axis=1)
cum_rtn3 = compute_portfolio_cum_rtn(price_df, rp).sum(axis=1)
cum_rtn4 = compute_portfolio_cum_rtn(price_df, mv).sum(axis=1)
cum_rtn5 = compute_portfolio_cum_rtn(price_df, ms).sum(axis=1)
cum_rtn6 = compute_portfolio_cum_rtn(price_df, ew).sum(axis=1)
cum_rtn7 = compute_portfolio_cum_rtn(price_df, iv).sum(axis=1)
plt.figure(figsize=(20, 10))
cum_rtn1.plot(label='Stock2Vec Inverse Volatility + Min Vol')
cum_rtn2.plot(label='Stock2Vec Double Inverse Volatility')
cum_rtn3.plot(label='Risk Parity')
cum_rtn4.plot(label='Min Vol')
cum_rtn5.plot(label='Max Sharpe')
cum_rtn6.plot(label='Equal Weights')
cum_rtn7.plot(label='Inverse Volatility')
plt.legend()
plt.savefig('result/cum_return.png')
plt.show()
| hobinkwak/Stock2Vec-Inverse-Volatility | main.py | main.py | py | 1,977 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "warnings.simplefilter",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "matpl... |
25156253486 | import re
import os
import copy
from collections import defaultdict
from collections import Counter
dir_path = os.path.dirname(os.path.realpath(__file__))
file = open(dir_path + '/inputs/day_15.txt', 'r')
lines = file.read().strip().split('\n')
input = [16,1,0,18,12,14,19]
keys = range(0, 30000000)
spoken = {key: None for key in keys}
#print(spoken)
spoken[0] = 16
spoken[1] = 1
spoken[2] = 0
spoken[3] = 18
spoken[4] = 12
spoken[5] = 14
spoken[6] = 19
for k,v in spoken.items():
if(k < 7):
continue
#print(f'k {k} last spoken pos: {spoken[k-1]} counter {Counter(spoken.values())[spoken[k-1]]} count {Counter(spoken.values())[spoken[k-1]]} vals {spoken.values()}')
if(Counter(spoken.values())[spoken[k-1]] < 2):
spoken[k] = 0
else:
last_seen = False
looking_for = spoken[k-1]
starting = 2
#print(f'k: {k} looking_for: {looking_for}')
while last_seen == False:
#print(f'----- lsp {k-1} starting {starting} ||| {spoken[k - starting]}')
if(spoken[k - starting] == looking_for):
#print(f'lsp {k-1} starting {starting}')
last_seen = True
spoken[k] = starting - 1
else:
starting += 1
print(spoken[29999999])
| jonfriskics/advent_of_code2020 | day_15.py | day_15.py | py | 1,195 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.path.dirname",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "os.path.realpath",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "collections.Counter",
... |
11507239217 | import numpy as np
from scipy import io
import matplotlib.pyplot as plt
import hio
import dynamic_support
import Error_Reduction
# 讀檔
filepath = 'D:\IP_20220311_25a\IP_20220311\patched_rawdata_20220311.mat'
patch_data = io.loadmat(filepath)
measured_intensity = (patch_data['sym_rawdata'])
measured_amp = np.sqrt(measured_intensity)
patched_ROI = patch_data['patched_rawdataROI']==1 #TrueFalse存成mat之後會變成10,這裡再次做成true false形式
center = patch_data['sym_center'][0]
cut_inensity = measured_intensity[round(center[0])-600:round(center[0])+600,round(center[1])-600:round(center[1])+600]
cut_inensity[np.isnan(cut_inensity)] = 0
cut_amp = measured_amp[round(center[0])-600:round(center[0])+600,round(center[1])-600:round(center[1])+600]
cut_amp[np.isnan(cut_amp)] = 0
cut_ROI = patched_ROI[round(center[0])-600:round(center[0])+600,round(center[1])-600:round(center[1])+600]
# hio初始參數
random_Phase = np.random.uniform((-1)*np.pi,np.pi,np.shape(cut_amp)) #做一組隨機相位
G_init = cut_amp* np.exp(1j*random_Phase)
rho_p_init = np.fft.fftshift(np.fft.ifft2(np.fft.ifftshift(G_init)))
# autoCorrelation support
autoCorr_rho_p_init = np.abs(np.fft.fftshift(np.fft.ifft2(np.fft.ifftshift(cut_inensity*cut_ROI))))
autoCorr_rho_p_init = autoCorr_rho_p_init.astype(int)
autocorr_support = np.full(np.shape(autoCorr_rho_p_init), False)
autocorr_support[autoCorr_rho_p_init > 0] = True
support = np.copy(autocorr_support)
# first hio
last_rho_init = np.zeros(np.shape(rho_p_init)) #先隨便做一個初始值給第一次的support constrain用
for iter in range(20):
new_rho_p,new_rho,diff_err = hio.HIO(rho_p_init, last_rho_init, support, 0.9, cut_amp, cut_ROI, "both")
rho_p_init1 = np.copy(new_rho_p) #這一次hio跑完未做support constrain的
last_rho_init1 = np.copy(new_rho) #上一次的R_hio做完support constrain的
# 流程:HIO > dynamic support > Error reduction > HIO
hio_iter = 300
dynamic_support_freq = 6
std_deviation = 0.0255
delta_std_deviation = 0.0005
std_deviation_threshold = 0.01
guassion_threshold = 19/100
dynSupp_hio_iter = 10
error_reduction_freq = 100
ErrReduc_iter = 10
ErrReduc_hio_iter = 20
for n in range(1,hio_iter+1):
print(n)
new_rho_p,new_rho,diff_err = hio.HIO(rho_p_init, last_rho_init, support, 0.9, cut_amp, cut_ROI, "both")
rho_p_init = np.copy(new_rho_p) #這一次hio跑完未做support constrain的
last_rho_init = np.copy(new_rho) #上一次的R_hio做完support constrain的
if n % dynamic_support_freq == 0:
std_deviation = std_deviation-delta_std_deviation
if std_deviation < std_deviation_threshold:
std_deviation = np.copy(std_deviation_threshold)
new_support = dynamic_support.dynamic_supp(new_rho_p, guassion_threshold, std_deviation)
support = np.copy(new_support)
for m in range(dynSupp_hio_iter):
new_rho_p,new_rho,diff_err = hio.HIO(rho_p_init, last_rho_init, support, 0.9, cut_amp, cut_ROI, "both")
rho_p_init = np.copy(new_rho_p) #這一次hio跑完未做support constrain的
last_rho_init = np.copy(new_rho) #上一次的R_hio做完support constrain的
if n % error_reduction_freq == 0:
for k in range(ErrReduc_iter):
reduct_rho_p,new_rho,reduct_diff_err = Error_Reduction.errReduction(rho_p_init, support, 0, cut_amp, cut_ROI, "both")
rho_p_init = np.copy(reduct_rho_p) #這一次hio跑完未做support constrain的
last_rho_init = np.copy(new_rho) #上一次的R_hio做完support constrain的
for p in range(ErrReduc_hio_iter):
new_rho_p,new_rho,diff_err = hio.HIO(rho_p_init, last_rho_init, support, 0.9, cut_amp, cut_ROI, "both")
rho_p_init = np.copy(new_rho_p) #這一次hio跑完未做support constrain的
last_rho_init = np.copy(new_rho) #上一次的R_hio做完support constrain的
plt.figure()
plt.imshow(support)
plt.title("final_support"+str(std_deviation))
plt.figure()
plt.imshow(np.real(rho_p_init))
plt.title("hio result errF:"+str(diff_err))
# 做方形support
release_support = np.full(np.shape(cut_inensity), False)
support_center = [round((np.shape(support)[0]+1)/2),round((np.shape(support)[1]+1)/2)]
rang = 150
release_support[support_center[0]-rang:support_center[0]+rang , support_center[1]-rang:support_center[1]+rang] = True
for iter2 in range(5):
print(iter2)
new_rho_p,new_rho,diff_err = hio.HIO(rho_p_init, last_rho_init, release_support, 0.9, cut_amp, cut_ROI, "both")
rho_p_init = np.copy(new_rho_p) #這一次hio跑完未做support constrain的
last_rho_init = np.copy(new_rho) #上一次的R_hio做完support constrain的
plt.figure()
plt.imshow(np.real(rho_p_init))
plt.title("release hio result errF:"+str(diff_err))
plt.show() | github-ywtsai/PyCDI | IP_HIOpack.py | IP_HIOpack.py | py | 4,836 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "scipy.io.loadmat",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "scipy.io",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "numpy.sqrt",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.isnan",
"line_number": 1... |
411212400 | # pylint: disable=W0621,C0114,C0116,W0212,W0613
import pathlib
import textwrap
import pytest
from dae.annotation.annotation_pipeline import AnnotatorInfo, AttributeInfo
from dae.annotation.annotation_factory import AnnotationConfigParser
from dae.genomic_resources import build_genomic_resource_repository
from dae.genomic_resources.repository import GenomicResourceRepo
from dae.testing import setup_directories, convert_to_tab_separated
@pytest.fixture
def test_grr(tmp_path: pathlib.Path) -> GenomicResourceRepo:
root_path = tmp_path
setup_directories(
root_path, {
"grr.yaml": textwrap.dedent(f"""
id: reannotation_repo
type: dir
directory: "{root_path}/grr"
"""),
"grr": {
"score_one": {
"genomic_resource.yaml": textwrap.dedent("""
type: position_score
table:
filename: data.txt
scores:
- id: score
type: float
name: s1
meta:
labels:
foo: ALPHA
bar: GAMMA
baz: sub_one
"""),
"data.txt": convert_to_tab_separated("""
chrom pos_begin s1
foo 1 0.1
""")
},
"score_two": {
"genomic_resource.yaml": textwrap.dedent("""
type: position_score
table:
filename: data.txt
scores:
- id: score
type: float
name: s2
meta:
labels:
foo: BETA
bar: GAMMA
baz: sub_two
"""),
"data.txt": convert_to_tab_separated("""
chrom pos_begin s2
foo 1 0.2
""")
},
"score_three": {
"genomic_resource.yaml": textwrap.dedent("""
type: np_score
table:
filename: data.txt
reference:
name: ref
alternative:
name: alt
scores:
- id: s3
name: s3
type: float
desc: ""
"""),
"data.txt": convert_to_tab_separated("""
chrom pos_begin ref alt s3
foo 1 A G 0.2
""")
},
"scores": {
"scoredir_one": {
"subscore": {
"genomic_resource.yaml": textwrap.dedent("""
type: position_score
table:
filename: data.txt
scores:
- id: score
type: float
name: s1
meta:
labels:
foo: ALPHA
bar: DELTA
"""),
"data.txt": convert_to_tab_separated("""
chrom pos_begin s1
foo 1 0.1
""")
},
},
"scoredir_two": {
"subscore": {
"genomic_resource.yaml": textwrap.dedent("""
type: position_score
table:
filename: data.txt
scores:
- id: score
type: float
name: s2
meta:
labels:
foo: BETA
bar: DELTA
"""),
"data.txt": convert_to_tab_separated("""
chrom pos_begin s2
foo 1 0.2
""")
},
},
"scoredir_three": {
"subscore": {
"genomic_resource.yaml": textwrap.dedent("""
type: np_score
table:
filename: data.txt
reference:
name: ref
alternative:
name: alt
scores:
- id: s3
name: s3
type: float
desc: ""
"""),
"data.txt": convert_to_tab_separated("""
chrom pos_begin ref alt s3
foo 1 A G 0.2
""")
},
},
},
},
}
)
return build_genomic_resource_repository(file_name=str(
root_path / "grr.yaml"
))
def test_simple_annotator_simple() -> None:
pipeline_config = AnnotationConfigParser.parse_str("""
- annotator:
resource_id: resource
""")
assert pipeline_config == [
AnnotatorInfo("annotator", [], {"resource_id": "resource"},
annotator_id="A0")
]
def test_short_annotator_config() -> None:
pipeline_config = AnnotationConfigParser.parse_str("""
- annotator: resource
""")
assert pipeline_config == [
AnnotatorInfo(
"annotator", [], {"resource_id": "resource"}, annotator_id="A0"
)
]
def test_minimal_annotator_config() -> None:
pipeline_config = AnnotationConfigParser.parse_str("""
- annotator
""")
assert pipeline_config == [
AnnotatorInfo("annotator", [], {}, annotator_id="A0")
]
def test_annotator_config_with_more_parameters() -> None:
pipeline_config = AnnotationConfigParser.parse_str("""
- annotator:
resource_id: resource
key: value
""")
assert pipeline_config == [
AnnotatorInfo(
"annotator", [], {"resource_id": "resource", "key": "value"},
annotator_id="A0"
)
]
def test_annotator_config_with_attributes() -> None:
pipeline_config = AnnotationConfigParser.parse_str("""
- annotator:
attributes:
- att1
- name: att2
- name: att3
source: some_score
- name: att4
source: some_score
att_param: foo
- name: att5
att_param: raz
internal: true
- source: att6
""")
assert pipeline_config == \
[AnnotatorInfo("annotator", [
AttributeInfo("att1", "att1", False, {}),
AttributeInfo("att2", "att2", False, {}),
AttributeInfo("att3", "some_score", False, {}),
AttributeInfo("att4", "some_score", False, {"att_param": "foo"}),
AttributeInfo("att5", "att5", True, {"att_param": "raz"}),
AttributeInfo("att6", "att6", False, {})],
{}, annotator_id="A0")]
def test_annotator_config_with_params_and_attributes() -> None:
pipeline_config = AnnotationConfigParser.parse_str("""
- annotator:
resource_id: resource
attributes:
- att1
- att2
""")
assert pipeline_config == \
[AnnotatorInfo("annotator", [
AttributeInfo("att1", "att1", False, {}),
AttributeInfo("att2", "att2", False, {}),
], {
"resource_id": "resource"
}, annotator_id="A0")]
def test_empty_config() -> None:
pipeline_config = AnnotationConfigParser.parse_str("")
# pylint: disable=use-implicit-booleaness-not-comparison
assert pipeline_config == []
def test_effect_annotator_extra_attributes() -> None:
pipeline_config = AnnotationConfigParser.parse_str("""
- effect_annotator:
gene_models: hg38/gene_models/refSeq_20200330
genome: hg38/genomes/GRCh38-hg38
promoter_len: 100
attributes:
- source: genes
name: list_of_genes
format: list
internal: yes
- source: genes
format: str
- source: genes_LGD
- genes_missense
""")
assert pipeline_config == [
AnnotatorInfo("effect_annotator", [
AttributeInfo("list_of_genes", "genes", True, {"format": "list"}),
AttributeInfo("genes", "genes", False, {"format": "str"}),
AttributeInfo("genes_LGD", "genes_LGD", False, {}),
AttributeInfo("genes_missense", "genes_missense", False, {})], {
"gene_models": "hg38/gene_models/refSeq_20200330",
"genome": "hg38/genomes/GRCh38-hg38",
"promoter_len": 100}, annotator_id="A0"
)
]
def test_wildcard_basic(test_grr: GenomicResourceRepo) -> None:
pipeline_config = AnnotationConfigParser.parse_str("""
- position_score: score_*
""", grr=test_grr)
assert pipeline_config == [
AnnotatorInfo(
"position_score", [], {"resource_id": "score_one"},
annotator_id="A0_score_one"
),
AnnotatorInfo(
"position_score", [], {"resource_id": "score_two"},
annotator_id="A0_score_two"
),
]
def test_wildcard_directory(test_grr: GenomicResourceRepo) -> None:
pipeline_config = AnnotationConfigParser.parse_str("""
- position_score: "scores/**/subscore"
""", grr=test_grr)
assert pipeline_config == [
AnnotatorInfo(
"position_score", [],
{"resource_id": "scores/scoredir_one/subscore"},
annotator_id="A0_scores/scoredir_one/subscore"
),
AnnotatorInfo(
"position_score", [],
{"resource_id": "scores/scoredir_two/subscore"},
annotator_id="A0_scores/scoredir_two/subscore"
),
]
def test_wildcard_label_single(test_grr: GenomicResourceRepo) -> None:
pipeline_config = AnnotationConfigParser.parse_str("""
- position_score: score_*[foo=ALPHA]
""", grr=test_grr)
assert pipeline_config == [
AnnotatorInfo(
"position_score", [], {"resource_id": "score_one"},
annotator_id="A0_score_one"
)
]
def test_wildcard_label_and_dir(test_grr: GenomicResourceRepo) -> None:
pipeline_config = AnnotationConfigParser.parse_str("""
- position_score: "*[foo=ALPHA]"
""", grr=test_grr)
assert pipeline_config == [
AnnotatorInfo(
"position_score", [], {"resource_id": "score_one"},
annotator_id="A0_score_one"
),
AnnotatorInfo(
"position_score", [],
{"resource_id": "scores/scoredir_one/subscore"},
annotator_id="A0_scores/scoredir_one/subscore"
),
]
def test_wildcard_label_multiple(test_grr: GenomicResourceRepo) -> None:
pipeline_config = AnnotationConfigParser.parse_str("""
- position_score: "*[foo=ALPHA and bar=GAMMA]"
""", grr=test_grr)
assert pipeline_config == [
AnnotatorInfo(
"position_score", [], {"resource_id": "score_one"},
annotator_id="A0_score_one"
),
]
def test_wildcard_label_substring(test_grr: GenomicResourceRepo) -> None:
pipeline_config = AnnotationConfigParser.parse_str("""
- position_score: "*[baz=sub_*]"
""", grr=test_grr)
assert pipeline_config == [
AnnotatorInfo(
"position_score", [], {"resource_id": "score_one"},
annotator_id="A0_score_one"
),
AnnotatorInfo(
"position_score", [], {"resource_id": "score_two"},
annotator_id="A0_score_two"
),
]
| iossifovlab/gpf | dae/dae/annotation/tests/test_annotation_pipeline_config.py | test_annotation_pipeline_config.py | py | 13,205 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "pathlib.Path",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "dae.testing.setup_directories",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "textwrap.dedent",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "textw... |
24901354288 | from hvac import Client
client = Client(
url='Your-Cluster-URL',
namespace='admin',
verify=False
)
client.auth.approle.login(
role_id='Your-Role-Id',
secret_id='Your-Secret-Id'
)
secret = client.read('Your-Secret-Path')
print(secret)
| AbdulManan10/hvac | main.py | main.py | py | 260 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "hvac.Client",
"line_number": 3,
"usage_type": "call"
}
] |
21979960588 | from flask import Flask
from flask import request
from flask import jsonify
from flask.json import dumps
from flask.wrappers import Response
from flask_pymongo import MongoClient
from datetime import datetime
import os
#import sys
#from pymongo import collection, mongo_client
from flask_cors import CORS
CON_STR=f"{os.environ['CON_STR']}"
MONGO_U=f"{os.environ['MONGO_U']}"
MONGO_P=f"{os.environ['MONGO_P']}"
app = Flask(__name__)
app.config["MONGO_URI"] = CON_STR
CORS(app)
client = MongoClient('db',
username=MONGO_U,
password=MONGO_P,
authSource='admin',
authMechanism='SCRAM-SHA-256')
mongoDB= client["myDatabase"]
comentarys=mongoDB["comentarys"]
views=mongoDB["views"]
@app.route('/product/<name>')
def get_product(name):
return "The product is " + str(name)
@app.route('/comentarys')
def getComentarys():
mongoCursor=comentarys.find()
response=[]
for item in mongoCursor:
item["_id"]=str(item["_id"])
response.append(
{
"id":str(item["_id"]),
"name":item['name'],
"email":item['email'],
"date":item['date'],
"commentary":item['commentary']
})
return jsonify(response)
@app.route('/comentary/<id>')
def getComentary(id):
result=comentarys.find_one({"_id":id})
return jsonify(result)
@app.route('/comentary', methods=['POST'])
def setComentary():
dateTimeObj = datetime.now()
timestampStr = dateTimeObj.strftime("%d-%b-%Y (%H:%M:%S.%f)")
content=request.json
user= {
"name":content["name"],
"email":content["email"],
"date":timestampStr,
"commentary":content["commentary"]
}
comentarys.insert(user)
return jsonify(True)
@app.route('/views')
def getviews():
result=views.count()
return jsonify(result)
@app.route('/view', methods=['POST'])
def setview():
ip=request.headers["X-Real-IP"]
_view= {
"ip":ip,
}
resp=views.find_one(_view)
if (resp==None):
views.insert(_view)
return jsonify(resp["ip"]) | Al3xDiaz/api-rest | backend/main.py | main.py | py | 2,390 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.environ",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "flask.Flask",
"li... |
24421756975 | from django.urls import path
from .views import*
from .views import delete_user, accept_user,pause_client
urlpatterns = [
path('controle/', controle, name="control"),
path('log', login_admin, name="log"),
path('index2', index2_admin, name="index2_admin"),
path('pause', pause_client, name="pause_client"),
path('inscrit', inscrit_client, name="inscrit_client"),
#path('users/', user_list, name='user_list'),
path('users/delete/<int:user_id>/', delete_user, name='delete_user'),
path('users/accept/<int:user_id>/', accept_user, name='accept_user'),
path('pause_details/<int:id>/', pause_details, name="pause_details"),
path('inscrit_details/<str:id>/', inscrit_details, name="inscrit_details"),
path('accepter/<int:id>/',accepter, name="accepter"),
path('refuser/<int:id>/',refuser,name="refuser"),
path('comptable',comptable_log, name="comptable_log"),
path('logouts', mylogout, name='logouts'),
path('agent', myagent, name='agents'),
path('agent_register', agent_register, name='agents_register'),
path('suivant', agent_suivant, name='suiv'),
path('liste_client', liste, name="list_clients"),
path('agent_details/<str:id>/', agent_detail, name="agent_details"),
path('news/<str:id>/',new_assign,name="new_assign"),
path('finance', finance_agent, name="finance_agent"),
path('finance_login', login_agent, name= "login_agent" ),
path('agent_connect', agent_connection, name="agent_connection" ),
path('agent_logout', agentlogout, name="agentlogout"),
path('agent_identifs', agent_identification, name="agent_identification"),
path('identificationreussi/<str:id>/', identif_reussi, name="identif_reussi")
] | SeikaLamproug/Gestion-epargne | controle/urls.py | urls.py | py | 1,683 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.urls.path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
... |
73111182754 | # -*- coding: utf-8 -*-
import scrapy
import time
import re
import datetime
from scrapy.http import Request
from loguru import logger
from SafetyInformation.items import SafeInfoItem
from SafetyInformation.settings import SLEEP_TIME, TOTAL_PAGES
class SecpulseSpider(scrapy.Spider):
name = 'secpulse'
allowed_domains = ['secpulse.com']
start_urls = ['https://www.secpulse.com/archives/category/news']
page = 1
headers = {
'Host': 'www.secpulse.com',
'Referer': 'https://www.secpulse.com/archives/category/news',
}
source = 'https://www.secpulse.com'
def parse(self, response):
logger.info("==========当前正在抓取第{}页==========".format(self.page))
item = SafeInfoItem()
info_list = response.xpath('//div[@id="main"]/div[2]/div[2]/ul/li')
for info in info_list:
title = info.xpath('./div/div[@class="slide_text fl"]/p/a/text()').extract_first('')
link = info.xpath('./div/div[@class="slide_text fl"]/p/a/@href').extract_first('')
intro = info.xpath('./div/div[@class="slide_text fl"]/p[2]/text()').extract_first('').strip()
date = info.xpath('./div/div[@class="slide_text fl"]/div[@class="top"]/div[1]/a[contains(@class,"time")]/text()').extract_first('').strip()
days = re.findall(r'\d{0,2}', date, re.S)[0]
delta = datetime.timedelta(days=int(days))
current_date = datetime.datetime.today().date()
if len(date.split('-')) == 3:
date = date.split(' ')[0]
elif len(date.split('-')) == 1:
date = (current_date - delta).strftime('%Y-%m-%d')
author = info.xpath('./div/div[@class="slide_text fl"]/div[@class="top"]/div[1]/a[2]/span/text()').extract_first('').strip()
source = self.source
info_type = 'news'
item['title'] = title
item['link'] = link
item['intro'] = intro
item['date'] = date
item['author'] = author
item['source'] = source
item['type'] = info_type
logger.info(item)
yield item
time.sleep(SLEEP_TIME)
self.page += 1
next_url = self.start_urls[0] + '/page/{}'.format(self.page)
if self.page <= TOTAL_PAGES:
yield Request(url=next_url, headers=self.headers, callback=self.parse)
| Silentsoul04/SafetyInformation | SafetyInformation/spiders/secpulse.py | secpulse.py | py | 2,422 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "scrapy.Spider",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "loguru.logger.info",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "loguru.logger",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "SafetyInformation... |
34144549460 | import numpy as np
from typing import Union
from sklearn.metrics import mutual_info_score
def mutual_info(
dataX: Union[str, bytes, list, tuple],
dataY: Union[str, bytes, list, tuple],
base=2,
):
"""
Return the mutual information of given dataX and dataY.
Parameters
----------
dataX: str, bytes, or iterable
dataY: str, bytes, or iterable
base: int
The logarithmic base to use, default is 2.
basis_dict: list
The basis of the information, default is None. If None, the basis will be the unique elements in data.
Returns
-------
mi: float
The mutual information of given dataX and dataY.
"""
x = []
if type(dataX) is bytes:
x = list(dataX)
elif type(dataX) is str:
for char in dataX:
x.append(char)
else:
x = dataX
y = []
if type(dataY) is bytes:
y = list(dataY)
elif type(dataY) is str:
for char in dataY:
y.append(char)
else:
y = dataY
if len(x) != len(y):
print("Error: The length of dataX and dataY should be equal.")
raise ValueError
mi = mutual_info_score(x, y) / np.log(base)
return mi
if __name__ == "__main__":
dataX = b"hello world!"
dataY = b"wheel rolled"
print(mutual_info(dataX, dataY))
| Jim137/Entropy | src/mutual_info.py | mutual_info.py | py | 1,365 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "typing.Union",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "sklearn.metrics.mutual_info_score",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "numpy.log",... |
10236991454 | from __future__ import division
import argparse
import logging
import os
import random
import time
import torch
import torch.nn as nn
import torch.optim as optim
from torch import FloatTensor
from torch.autograd import Variable, grad
from torch.utils.data import DataLoader
import torchvision.utils as vutil
from data import TagImageDataset
from model import Generator, Discriminator
parser = argparse.ArgumentParser(description='PyTorch SRResNet')
parser.add_argument('--cuda', action='store_true', help='use cuda')
parser.add_argument('--gpus', default='0', type=str, help='gpu ids (default: 0)')
parser.add_argument('--tag', type=str, required=True, help='the path to tags')
parser.add_argument('--image', type=str, required=True, help='the path to images')
parser.add_argument('--threads', type=int, default=8, help='number of threads for data loader to use')
parser.add_argument('--batch', type=int, default=64, help='training batch size')
parser.add_argument('--image_size', type=int, default=128, help='the height and width of images')
parser.add_argument('--noise_size', type=int, default=128, help='the length of noise vector')
parser.add_argument('--features', type=int, default=30, help='the number of features')
parser.add_argument('--lr', type=float, default=2e-4, help='learning rate')
parser.add_argument('--beta1', type=float, default=0.5, help='beta1 for Adam')
parser.add_argument('--step', type=int, default=10, help='decay the learning rate to the initial LR every n epoches')
parser.add_argument('--resume', default='', type=str, help='path to checkpoint (default: none)')
parser.add_argument('--pre_trained', default='', type=str, help='path to pretrained model (default: none)')
parser.add_argument('--epoch', type=int, default=100, help='number of epoches to train for')
parser.add_argument('--start_epoch', default=1, type=int, help='manual epoch number (useful on restarts)')
parser.add_argument('--check_step', default=10, type=int, help='save checkpoint after so many epoch')
opt = parser.parse_args()
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logfile = logging.FileHandler('train.log')
console = logging.StreamHandler()
logger.addHandler(logfile)
logger.addHandler(console)
def main():
if opt.cuda:
os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus
if not torch.cuda.is_available():
raise Exception('No GPU found or Wrong gpu id, please run without --cuda')
logger.info('[INFO] Loading datasets')
train_set = TagImageDataset(tag_path=opt.tag, img_path=opt.image)
train_loader = DataLoader(train_set, num_workers=opt.threads, batch_size=opt.batch, shuffle=True, drop_last=True)
logger.info('[INFO] Building model')
G = Generator(opt.features)
D = Discriminator(opt.features)
criterion = nn.BCEWithLogitsLoss()
logger.info('[INFO] Setting Optimizer')
G_optim = optim.Adam(G.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
D_optim = optim.Adam(D.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
logger.info('[INFO] Setting GPU')
if opt.cuda:
G = G.cuda()
D = D.cuda()
criterion = criterion.cuda()
if opt.resume:
if os.path.isfile(opt.resume):
logger.info('[LOAD] Loading checkpoint {}'.format(opt.resume))
checkpoint = torch.load(opt.resume)
opt.start_epoch = checkpoint['epoch'] + 1
G.load_state_dict(checkpoint['g'])
D.load_state_dict(checkpoint['d'])
G_optim.load_state_dict(checkpoint['g_optim'])
D_optim.load_state_dict(checkpoint['d_optim'])
else:
logger.warning('[ERROR] No checkpoint found at {}'.format(opt.resume))
if opt.pre_trained:
if os.path.isfile(opt.pre_trained):
logger.info('[LOAD] Loading model {}'.format(opt.pre_trained))
weights = torch.load(opt.pre_trained)
G.load_state_dict(weights['g'].state_dict())
D.load_state_dict(weights['d'].state_dict())
G_optim.load_state_dict(weights['g_optim'].state_dict())
D_optim.load_state_dict(weights['d_optim'].state_dict())
else:
logger.warning('[ERROR] No model found at {}'.format(opt.pre_trained))
logger.info('[INFO] Start Training')
train(train_loader, G, D, G_optim, D_optim, criterion)
def adjust_learning_rate(optimizer, epoch):
lr = opt.lr * (0.1 ** (epoch // opt.step))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def fake_features(tensor):
# discrete
# tensor.data.bernoulli_(0.16)
# continuous
# null = Variable(torch.zeros(1))
# tensor.data.bernoulli_(0.16)
# for vector in tensor:
# for ele in vector:
# if ele.data[0] > null.data[0]:
# ele.data.uniform_(0.1, 0.9)
# concrete
assert tensor.size() == torch.Size((opt.batch, 30))
tensor.data.fill_(0)
hair_color = random.randint(0, 12)
hair_style = random.randint(13, 16)
eyes_color = random.randint(17, 27)
is_smile = random.choice((False, True))
is_blush = random.choice((False, True))
tensor.data[hair_color] = 1
tensor.data[hair_style] = 1
tensor.data[eyes_color] = 1
if is_smile: tensor.data[-2] = 1
if is_blush: tensor.data[-1] = 1
lambda_adv = opt.features
lambda_gp = 0.5
def train(train_loader, gen, dis, g_optim, d_optim, criterion):
start_time = time.time()
X = Variable(FloatTensor(opt.batch, 3, opt.image_size, opt.image_size))
z = Variable(FloatTensor(opt.batch, opt.noise_size))
tag_real = Variable(FloatTensor(opt.batch, opt.features))
tag_fake = Variable(FloatTensor(opt.batch, opt.features))
# y_real = Variable(torch.ones(opt.batch))
# y_fake = Variable(torch.zeros(opt.batch))
labels = Variable(FloatTensor(opt.batch))
if opt.cuda:
X, z = X.cuda(), z.cuda()
tag_real, tag_fake = tag_real.cuda(), tag_fake.cuda()
# y_real, y_fake = y_real.cuda(), y_fake.cuda()
labels = labels.cuda()
for epoch in range(opt.start_epoch, opt.epoch+1):
adjust_learning_rate(g_optim, epoch)
adjust_learning_rate(d_optim, epoch)
for iteration, (tag, img) in enumerate(train_loader, start=1):
X.data.copy_(img)
tag_real.data.copy_(tag)
##########################
# Training discriminator #
##########################
dis.zero_grad()
# trained with real image
pred_real, pred_real_t = dis(X)
labels.data.fill_(1.0)
d_real_label_loss = criterion(pred_real, labels)
d_real_tag_loss = criterion(pred_real_t, tag_real)
d_real_loss = lambda_adv * d_real_label_loss + d_real_tag_loss
d_real_loss.backward()
# trained with fake image
z.data.normal_(0, 1)
fake_features(tag_fake)
vec = torch.cat((z, tag_fake), 1)
fake_X = gen(vec)
pred_fake, pred_fake_t = dis(fake_X)
labels.data.fill_(0.0)
d_fake_label_loss = criterion(pred_fake, labels)
d_fake_tag_loss = criterion(pred_fake_t, tag_fake)
d_fake_loss = lambda_adv * d_fake_label_loss + d_fake_tag_loss
d_fake_loss.backward()
# gradient penalty
shape = [opt.batch] + [1 for _ in range(X.dim()-1)]
alpha = torch.rand(*shape)
beta = torch.rand(X.size())
if opt.cuda:
alpha, beta = alpha.cuda(), beta.cuda()
x_hat = Variable(alpha*X.data + (1-alpha)*(X.data+0.5*X.data.std()*beta), requires_grad=True)
pred_hat, _ = dis(x_hat)
grad_out = torch.ones(pred_hat.size())
if opt.cuda:
grad_out = grad_out.cuda()
gradients = grad(outputs=pred_hat, inputs=x_hat, grad_outputs=grad_out,
create_graph=True, retain_graph=True, only_inputs=True)[0]
gradient_penalty = lambda_gp * ((gradients.norm(2, dim=1) - 1) ** 2).mean()
gradient_penalty.backward()
d_loss = d_real_loss + d_fake_loss + gradient_penalty
d_optim.step()
######################
# Training generator #
######################
gen.zero_grad()
z.data.normal_(0, 1)
fake_features(tag_fake)
vec = torch.cat((z, tag_fake), 1)
gen_X = gen(vec)
pred_gen, pred_gen_t = dis(gen_X)
labels.data.fill_(1.0)
g_label_loss = criterion(pred_gen, labels)
g_tag_loss = criterion(pred_gen_t, tag_fake)
g_loss = lambda_adv * g_label_loss + g_tag_loss
g_loss.backward()
g_optim.step()
elapsed = time.time() - start_time
logger.info('[%03d/%d] [%03d/%d] elapsd: %-10.4f loss_d: %10.4f loss_g: %10.4f' % (epoch, opt.epoch, iteration, len(train_loader), elapsed, d_loss.data[0], g_loss.data[0]))
save_stage(gen, dis, g_optim, d_optim, epoch)
def save_stage(gen, dis, gen_optim, dis_optim, epoch):
if not os.path.exists('checkpoint'):
os.makedirs('checkpoint')
if not os.path.exists('samples'):
os.makedirs('samples')
checkpoint_out_path = os.path.join('checkpoint', 'checkpoint_epoch_{:03d}.pth'.format(epoch))
state = {
'epoch': epoch,
'g': gen.state_dict(),
'd': dis.state_dict(),
'g_optim': gen_optim.state_dict(),
'd_optim': dis_optim.state_dict(),
}
if epoch % opt.check_step == 0:
torch.save(state, checkpoint_out_path)
logger.info('[DUMP] checkpoint in epoch {} saved'.format(epoch))
samples_out_path = os.path.join('samples', 'samples_epoch_{:03d}.png'.format(epoch))
z = Variable(FloatTensor(opt.batch, opt.noise_size))
tags = Variable(FloatTensor(opt.batch, opt.features))
z.data.normal_(0, 1)
fake_features(tags)
if opt.cuda:
z, tags = z.cuda(), tags.cuda()
sample = gen(torch.cat((z, tags), 1))
vutil.save_image(sample.data.view(opt.batch, 3, opt.image_size, opt.image_size), samples_out_path)
logger.info('[DEMO] samples in epoch {} saved'.format(epoch))
if __name__ == '__main__':
main()
| CirQ/AnimeNet | main.py | main.py | py | 10,620 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "logging.... |
27933091980 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn.model_selection import train_test_split
import numpy as np
import tensorflow as tf
import pickle
import scipy.ndimage as img
tf.logging.set_verbosity(tf.logging.INFO)
def cnn_model_fn(features, labels, mode):
# Input Layer
# Reshape our images
input_layer = tf.reshape(features["x"], [-1, 64, 64, 1])
# Batch normalization has proven to be effective at reducing our training
# time requirements and accuracy on the validation set.
batchN1 = tf.layers.batch_normalization(
inputs = input_layer)
# Convolutional Layer #1
# Perform convolution with ReLU activation,
# and don't normalize because we have back to back convolutions.
conv1 = tf.layers.conv2d(
inputs=batchN1,
filters=64,
kernel_size=[9, 9],
padding="same",
strides = 2,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.05),
bias_initializer=tf.random_uniform_initializer(minval=0.0, maxval=0.05),
activation=tf.nn.relu)
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[3, 3], strides=2)
normd1 = tf.nn.local_response_normalization(
input = pool1,
name="norm1")
# Convolutional Layer #2
# Start with batch normalization, perform convolution with ReLU activation,
# and then pool the results before normalizing them
batchN2 = tf.layers.batch_normalization(
inputs = normd1)
conv2 = tf.layers.conv2d(
inputs=batchN2,
filters=256,
kernel_size=[5, 5],
padding="same",
kernel_initializer=tf.truncated_normal_initializer(stddev=0.05),
bias_initializer=tf.random_uniform_initializer(minval=0.0, maxval=0.05),
activation=tf.nn.relu)
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[3, 3], strides=1)
normd2 = tf.nn.local_response_normalization(
input = pool2,
name="normd2")
# Convolutional Layer #3
# Start with batch normalization, perform convolution with ReLU activation,
# and then pool the results before normalizing them
batchN3 = tf.layers.batch_normalization(
inputs = normd2)
conv3 = tf.layers.conv2d(
inputs=batchN3,
filters=384,
kernel_size=[3, 3],
padding="same",
kernel_initializer=tf.truncated_normal_initializer(stddev=0.05),
bias_initializer=tf.random_uniform_initializer(minval=0.0, maxval=0.05),
activation=tf.nn.relu)
pool3 = tf.layers.max_pooling2d(inputs=conv3, pool_size=[3, 3], strides=1)
normd3 = tf.nn.local_response_normalization(
input = pool3,
name="normd3")
# Convolutional Layer #4
# Start with batch normalization, perform convolution with ReLU activation,
# and then pool the results before normalizing them
batchN4 = tf.layers.batch_normalization(
inputs = normd3)
conv4 = tf.layers.conv2d(
inputs=batchN4,
filters=384,
kernel_size=[3, 3],
padding="same",
kernel_initializer=tf.truncated_normal_initializer(stddev=0.05),
bias_initializer=tf.random_uniform_initializer(minval=0.0, maxval=0.05),
activation=tf.nn.relu)
pool4 = tf.layers.max_pooling2d(inputs=conv4, pool_size=[3, 3], strides=2)
normd4 = tf.nn.local_response_normalization(
input = pool4,
name="normd4")
# Convolutional Layer #5 (not counting the inception module layers)
# Start with batch normalization, perform convolution with ReLU activation,
# and do not use local response normalization. Only pool the results
batchN5 = tf.layers.batch_normalization(
inputs = normd4)
conv5 = tf.layers.conv2d(
inputs=batchN5,
filters=256,
kernel_size=[3, 3],
padding="same",
kernel_initializer=tf.truncated_normal_initializer(stddev=0.05),
bias_initializer=tf.random_uniform_initializer(minval=0.0, maxval=0.05),
activation=tf.nn.relu)
pool5 = tf.layers.max_pooling2d(inputs=conv5, pool_size=[3, 3], strides=2)
#Reshape the convolution results
pool2_flat = tf.reshape(pool5, [-1, 8 * 8 * 16])
# This is a large fully connected layer. Nearly 50% of these neurons
# will be dropped to make our model more robust to variations and
# potentially reduce overfit
dense = tf.layers.dense(inputs=pool2_flat, units=1024, activation=tf.nn.relu)
dropout = tf.layers.dropout(
inputs=dense, rate=0.5, training=mode==tf.estimator.ModeKeys.TRAIN)
# This is another large fully connected layer. Nearly 50% of these neurons
# will be dropped to make our model more robust to variations and
# potentially reduce overfit.
dense2 = tf.layers.dense(inputs=dropout, units=512, activation=tf.nn.relu)
dropout2 = tf.layers.dropout(
inputs=dense2, rate=0.5, training=mode==tf.estimator.ModeKeys.TRAIN)
logits = tf.layers.dense(inputs=dropout2, units=82)
predictions = {
# This generates predictions by finding the array location with the
# largest probability.
"classes": tf.argmax(input=logits, axis=1),
# Our softmax layer acts on our last fully connected layer.
# The results are used for preedictions
"probabilities": tf.nn.softmax(logits, name="softmax_tensor")
}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
# This will activate only during training mode.
onehot_labels = tf.one_hot(indices=tf.cast(labels, tf.int32), depth=82)
loss = tf.losses.softmax_cross_entropy(
onehot_labels=onehot_labels, logits=logits)
# Our optimizer:
# We use larger values of epsilon and learning rate during the initial
# training phases of our bigger architectures.
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdamOptimizer(learning_rate=0.0001, epsilon=0.00000001)
train_op = optimizer.minimize(
loss=loss,
global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
# This will give us an accuracy result on our validation set
eval_metric_ops = {
"accuracy": tf.metrics.accuracy(
labels=labels, predictions=predictions["classes"])}
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
def main(unused_argv):
# Load training and eval data
x_in = open('x_train_data.pkl','rb')
y_in = open('y_train_data.pkl', 'rb')
print('loading picked data...')
x = pickle.load(x_in) # load from text
y = pickle.load(y_in)
print('done loading data!')
y = np.asarray(y, dtype=np.int32)
# the data set will be preprocessed. Each image is done individually.
print('preproccessing data...')
#x = binarywithdilation(x, 0.97)
#x= binarynormalization(x,0.71)
print('donepreprocessing data!')
x = np.asarray(x, dtype=np.float32)
# Split the data up into our training and validation set.
train_data, eval_data, train_labels, eval_labels = train_test_split(x, y, test_size=0.25)
# We build our estimator based on the tensorflow recommendations
mnist_classifier = tf.estimator.Estimator(
model_fn=cnn_model_fn, model_dir="/tmp/comp551_theGoogleNetded10")
# We will log the loss rate to keep track of progress as our
# structure is trained. The labels come from the model method
tensors_to_log = {"probabilities": "softmax_tensor"}
logging_hook = tf.train.LoggingTensorHook(
tensors=tensors_to_log, every_n_iter=1000)
# Train the model:
train_input_fn = tf.estimator.inputs.numpy_input_fn(
# Our training data
x={"x": train_data},
# The training labels to match the data
y=train_labels,
# Increasing the batch size dramatically increases runtime
batch_size=100,
num_epochs=None,
# Random sampling batches will expose the model to more variations
# during training.
shuffle=True)
mnist_classifier.train(
input_fn=train_input_fn,
# This is not enough steps. We will run this model several times
# to make sure the accuracy and loss are stable.
steps=1000,
hooks=[logging_hook])
# Evaluate the model and print results
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": eval_data},
y=eval_labels,
num_epochs=1,
shuffle=False)
eval_results = mnist_classifier.evaluate(input_fn=eval_input_fn)
print(eval_results)
# Load training and eval data
x_test_in = open('x_test_data.pkl','rb')
#y_in = open('y_train_data.pkl', 'rb')
print('loading picked data...')
x_test = pickle.load(x_test_in) # load from text
print('done loading data!')
x_test= binarynormalization(x_test,0.71)
x_test = np.asarray(x_test, dtype=np.float32)
#y = np.asarray(y, dtype=np.int32)
x_test_in.close()
predict_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x":procdtestset},
num_epochs=1,
shuffle=False)
predictions = list(mnist_classifier.predict(input_fn=predict_input_fn))
predicted_classes = [p["classes"] for p in predictions]
#Next the predicitons will be printed in the proper order in the
#exported file.
output = io.open('google_pred9.csv', 'w', encoding='utf-8')
count = 1
output.write(u'Id,Label\n')
for x in predicted_classes:
output.write(str(count) + u',' + str(x) + u'\n')
count += 1
output.close()
if __name__ == "__main__":
tf.app.run()
| mrosen95/COMP551_A3 | CNN1.py | CNN1.py | py | 9,960 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "tensorflow.logging.set_verbosity",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "tensorflow.logging",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.reshape",
"line_number": 15,
"usage_type": "call"
},
{
"api_n... |
38663862525 | # -------------------------------------------------------
# Assignment 1
# Written by Raghav Sharda
# For COMP 472 Section ABJX – Summer 2020
# --------------------------------------------------------
import numpy as np
import shapefile as shp
import matplotlib as mpl
import matplotlib.pyplot as plt
import time
from itertools import product
from matplotlib.colors import ListedColormap
from Graph import *
def createBinnedCorodinates(xAxis, yAxis):
listofcoordinates=[]
for coordinate in product(xAxis, yAxis):
xydict = {'X':coordinate[0],'Y': coordinate[1]}
listofcoordinates.append(xydict)
return listofcoordinates
"""
This is called to generate first graph
Divided graphs into 2 to prevent crowding
"""
def createGridGraph(x,y,gridSize ,thresohold,xdistance,ydistance):
crimeRates,xAxis,yAxis=np.histogram2d(x, y, bins = gridSize)
# print(np.transpose(crimeRates)[::-1])
threshold_percentile = np.percentile(crimeRates,thresohold)
fig, ax = plt.subplots()
plt.title('Montreal Crime Analytics', fontweight ="bold")
plt.xticks(rotation=90)
ax.set_xticks(xAxis)
ax.set_yticks(yAxis)
plt.xlabel("Latitudes")
plt.ylabel("Longitudes")
cmap22 = mpl.colors.ListedColormap(['#D68910'])
cmap22.set_under('#D68910',1)
ax.set_aspect("equal")
hist, xbins, ybins, im = ax.hist2d(x,y, bins=gridSize,cmap=cmap22 , cmin=threshold_percentile)
for i in range(len(ybins)-1):
for j in range(len(xbins)-1):
ax.text(xbins[j]+((xdistance/gridSize)/2),ybins[i]+((xdistance/gridSize)/2), hist.T[i,j], color="black", ha="center", va="center",fontsize=6.5)
plt.show()
return(crimeRates,xAxis,yAxis,threshold_percentile)
"""
This is called to generate second graph
Provides that final path that A* returns
"""
def createGridGraph2(X,Y,gridSize ,thresohold,res,touch):
crimeRates,xAxis,yAxis=np.histogram2d(X, Y, bins = gridSize)
print("Average " , np.average(crimeRates))
print("Standard Deviation " , np.std(crimeRates))
xAxis =np.around(xAxis,3)
yAxis =np.around(yAxis,3)
threshold_percentile = np.percentile(crimeRates,thresohold)
print("Threshold Value " ,threshold_percentile)
fig, ax = plt.subplots()
ax.set_aspect("equal")
cmap22 = mpl.colors.ListedColormap(['#D68910'])
cmap22.set_under('#D68910',1)
xyz = plt.hist2d(X,Y,bins=gridSize,cmin=threshold_percentile,cmap=cmap22)
# crimeRates = np.transpose(crimeRates)[::-1] # for visual, to see crime rate.
plt.title('Montreal Crime Analytics', fontweight ="bold")
plt.xticks(rotation=90)
ax.set_xticks(xAxis)
ax.set_yticks(yAxis)
plt.xlabel("Latitudes")
plt.ylabel("Longitudes")
xPlotList=[]
yPlotList=[]
i=0
for l in res:
xPlotList.append(res[i].xCoordinate)
yPlotList.append(res[i].yCoordinate)
i+=1
plt.plot(xPlotList, yPlotList)
for y in touch:
plt.scatter(y.xCoordinate, y.yCoordinate,marker = "+")
plt.grid()
plt.show()
"""
Get Data from shape file
Round till 4 for better calculations
"""
def getDataFromShapeFile():
sf = shp.Reader(r'Shape\crime_dt.shp')
X = []
Y = []
for i in range(len(sf.shapes())):
s = sf.shape(i)
geoj = s.__geo_interface__
x, y = geoj["coordinates"]
X.append(round(x, 4))
Y.append(round(y, 4))
return(X,Y)
def examples():
s= """
===================================================================================
| Instructions |
| |
| Enter cell size in the format '0.00X': 0.002 |
| Enter the percentage threshold value in the format of 'XX': 50 |
| Enter Starting Point Coordinates in the format of '-XX.XXX,XX.XXX':-73.590,45.490 |
| Enter Final Point Coordinates in the format of '-XX.XXX,XX.XX':-73.590,45.490 |
| Maximize the graph window to see it properly |
===================================================================================
"""
print(s)
def main():
examples()
gridSize = float(input("Enter cell size as per instructions and press enter:"))
threshold = float(input("Enter the percenatge threshold value as per instructions and press enter':"))
startingPoint = input("Enter Starting Point Coordinates as per instructions and press enter:")
finalPoint = input("Enter Final Point Coordinates as per instructions and press enter:")
gridSize=int(gridSize*10000)
X,Y = getDataFromShapeFile()
xdistance = max(X)-min(X)
ydistance = max(Y)-min(Y)
crimeRates,xAxis,yAxis,threshold_percentile=createGridGraph(X,Y,gridSize,threshold,xdistance,ydistance)
listofcoordinates = createBinnedCorodinates(xAxis, yAxis)
dictOfVertexObjects = createVertex(listofcoordinates,xdistance,ydistance,gridSize)
makeFriends(dictOfVertexObjects)
grid_vertex_edges = setHighCrimeAreas(crimeRates,dictOfVertexObjects,gridSize,threshold_percentile)
vertexRef1,vertexRef2=findVertexForInputs(startingPoint,finalPoint,dictOfVertexObjects)
# vertexKey1,vertexRef1,vertexKey2,vertexRef2 = searchVertexKeys(startingPoint,finalPoint,dictOfVertexObjects)
tic = time.perf_counter()
res,touch = astar(dictOfVertexObjects,grid_vertex_edges,vertexRef1,vertexRef2)
toc = time.perf_counter()
print(f"Time taken to find the path {toc - tic:0.4f} seconds")
createGridGraph2(X,Y,gridSize,threshold,res,touch)
print("Done")
if __name__ == '__main__':
main()
| raghavsharda/Rover | index.py | index.py | py | 5,764 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "itertools.product",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.histogram2d",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "numpy.percentile",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplo... |
25314578639 | import uuid
from config import USR_ORG_MONGO_COLLECTION, USR_MONGO_COLLECTION
import db
from models.response import post_error
import logging
log = logging.getLogger('file')
class OrgUtils:
def __init__(self):
pass
#orgId generation
@staticmethod
def generate_org_id():
"""UUID generation for org registeration"""
return(uuid.uuid4().hex)
@staticmethod
def validate_org(org_code):
"""Validating Org
Org should be registered and active on Anuvaad system.
"""
try:
#connecting to mongo instance/collection
collections = db.get_db()[USR_ORG_MONGO_COLLECTION]
#searching for active org record
result = collections.find({"code": org_code}, {"_id": 0, "active": 1})
if result.count() == 0:
return post_error("Invalid Organization", "No such registered organization with the given Org Id", None)
for value in result:
if value["active"] == False:
return post_error("Invalid Organization", "Organization is currently inactive", None)
except Exception as e:
log.exception(f"Db connection exception : {e}")
return post_error("Database connection exception", "An error occurred while connecting to the database:{}".format(str(e)), None)
@staticmethod
def validate_org_upsert(i,org):
"""Org validation on upsert
deactivation of org allowed only once all the users in the corresponding org is inactive.
"""
if "code" not in org or not org["code"]:
return post_error("Data Missing", "code not found", None)
if "active" not in org:
return post_error("Data Missing", "active not found", None)
code = str(org["code"]).upper()
active = org["active"]
if not isinstance(active,bool):
return post_error("Invalid format", "active should be bool", None), 400
if active == False:
try:
#connecting to mongo instance/collection
collections = db.get_db()[USR_MONGO_COLLECTION]
#searching for active users in the org
result = collections.find({"orgID": code,"is_active":True})
if result.count()!=0:
log.info("Deactivation request for org failed, {} active users with the orgID".format(str(result.count())))
return post_error("Deactivation Failed","There exist active users in {} hence this action cannot be performed".format(code),None)
except Exception as e:
log.exception(f"Db connection exception : {e}")
return post_error("Database connection exception", "An error occurred while connecting to the database:{}".format(str(e)), None) | ishudahiya2001/ULCA-IN-ulca-Public | backend/api/ulca-ums-service/user-management/utilities/orgUtils.py | orgUtils.py | py | 2,852 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "uuid.uuid4",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "db.get_db",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "config.USR_ORG_MONGO_COLLECTION... |
30129412325 | from pymongo import MongoClient
# mongodb instance where database is running
connection_string = "mongodb://localhost:27017"
client = MongoClient(connection_string)
db = client.get_database("idk")
# similar to mysql tables to store data
collection = db.get_collection("information")
# document to insert inside of mongodb
# name, date, inpection details, location, quantity?
#document = {"name": [name at i], "date": [date at i], "inspection details": [insdets at i], "location": [location at i]} | MasoudKarimi4/MenuMate | server/test.py | test.py | py | 502 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "pymongo.MongoClient",
"line_number": 5,
"usage_type": "call"
}
] |
6685906309 | from song import Song
from date_parser import TimeParser
from functools import reduce
import random
from prettytable import PrettyTable
import json
from collections import OrderedDict
class Playlist:
def __init__(self, name='', repeat=False, shuffle=False):
self.name = name
self.repeat = repeat
self.shuffle = shuffle
self.songs = []
self.current_songs = []
self.songs_played = -1
def add_song(self, song):
try:
self.songs.append(song)
except TypeError as e:
print(e)
finally:
return self.songs
def remove_song(self, song):
try:
self.songs.remove(song)
except TypeError as e:
print(e)
finally:
return self.songs
def add_songs(self, songs):
try:
for song in songs:
self.songs.append(song)
except TypeError as e:
print(e)
finally:
return self.songs
def total_length(self):
return reduce(lambda x, y: x + y, self.songs)
def artists(self):
art_hist = {}
for song in self.songs:
if song.artist not in art_hist:
art_hist[song.artist] = 1
else:
art_hist[song.artist] += 1
return art_hist
def next_song(self):
if self.shuffle:
return self.__shuffle_help()
if self.repeat:
return self.__repeat_help()
def __repeat_help(self):
self.songs_played += 1
if self.songs_played == len(self.songs):
self.songs_played = 0
return self.songs[self.songs_played]
def __shuffle_help(self):
idx = random.randrange(0, len(self.songs))
if len(self.current_songs) == len(self.songs):
self.current_songs = [self.songs[idx]]
return self.songs[idx]
while self.songs[idx] in self.current_songs:
idx = random.randrange(0, len(self.songs))
self.current_songs.append(self.songs[idx])
return self.songs[idx]
def pprint_playlist(self):
table = PrettyTable(["Artist", "Song", "Length"])
for song in self.songs:
table.add_row([song.artist, song.title, song.length])
return table
def __dasherize(self):
return self.name.replace(' ', '-')
def save(self):
json_string = OrderedDict([['name', self.name],
['repeat', self.repeat],
['shuffle', self.shuffle]])
song_list = []
for song in self.songs:
song_list.append(OrderedDict([['title', song.title],
['artist', song.artist],
['album', song.album],
['length', str(song.length)]]))
json_string['songs'] = song_list
with open('playlist-data/{}.json'
.format(self.__dasherize()), 'w') as f:
json.dump(json_string, f, indent=4)
def load(self, filename):
with open(filename) as f:
data = json.load(f)
playlist = Playlist(data['name'], data['repeat'], data['shuffle'])
for song in data['songs']:
playlist.add_song(Song(song['title'], song['artist'],
song['album'], song['length']))
return playlist.pprint_playlist()
def main():
# song1 = Song(title="Odin", artist="Manowar",
# album="The Sons of Odin", length=TimeParser("3:44"))
# song2 = Song(title="Odina", artist="Nightwish",
# album="The Sons of Odina", length=TimeParser("3:44"))
# song3 = Song(title="Nothing else matters", artist="Metallica",
# album="The Black Album", length=TimeParser('2:45'))
# playlist = Playlist("my playlist", False, True) # shuffle is True
# playlist.add_song(song1)
# playlist.add_song(song2)
# playlist.add_song(song3)
# # print(playlist.next_song())
# # print(playlist.next_song())
# # print(playlist.next_song())
# # print(playlist.next_song())
# print(playlist.pprint_playlist())
# playlist.save()
playlist = Playlist()
print(playlist.load('playlist-data/my-playlist.json'))
if __name__ == '__main__':
main()
| Nimor111/MusicPlayer | src/playlist.py | playlist.py | py | 4,384 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "functools.reduce",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "song.artist",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "song.artist",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "song.artist",
... |
74934231074 | from tools import ToolsCmd
import time
from rich.console import Console
import re
console = Console()
def init_mysql(port, db_v):
console.print('\n7-开始初始化MySQL', style="bold yellow", highlight=True)
if '5.6' in db_v:
init_result = ToolsCmd('/dbs/mysqls/mysql{0}/service/scripts/mysql_install_db --defaults-file=/etc/my{0}.cnf --basedir=/dbs/mysqls/mysql{0}/service --datadir=/dbs/mysqls/mysql{0}/data --user=mysql'.format(port))
time.sleep(15)
for t in range(10):
a = ToolsCmd('ps -ef |grep mysql_install_db |grep my{0} |grep -v grep |wc -l'.format(port))[0].rstrip()
if a == '0':
break
time.sleep(30)
if len(re.findall('OK', init_result[0])) == 2:
console.print('MySQL初始化成功', style="bold green", highlight=True)
return True
else:
ToolsCmd('/dbs/mysqls/mysql{0}/service/bin/mysqld --defaults-file=/etc/my{0}.cnf --initialize-insecure'.format(port))
time.sleep(15)
for t in range(10):
a = ToolsCmd('ps -ef |grep initialize |grep my{0} |grep -v grep |wc -l'.format(port))[0].rstrip()
if a == '0':
break
time.sleep(30)
if ToolsCmd('tail -1 /dbs/mysqls/mysql{0}/log/mysql-error.log |grep "empty password" |wc -l'.format(port))[0].rstrip() == '1':
console.print('MySQL初始化成功', style="bold green", highlight=True)
return True
console.print('MySQL初始化失败', style="bold red", highlight=True)
return False
| xxyhhd/my_scripts | agent/install_mysql/h_init_mysql.py | h_init_mysql.py | py | 1,581 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "rich.console.Console",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "tools.ToolsCmd",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "tools.ToolsCmd",
"l... |
15767671976 | from pyspark import SparkConf, SparkContext
import collections
conf = SparkConf().setMaster("local").setAppName("RatingsHistogram")
sc = SparkContext(conf=conf)
lines = sc.textFile("file:///SparkCourse/ml-100k/u.data")
popular_movies = lines.map(lambda x: (int(x.split()[1]), 1)).reduceByKey(lambda x, y: x+y)
popular_movies_flipped = popular_movies.map(lambda x: (x[1], x[0]))
sorted = popular_movies_flipped.sortByKey(ascending=False)
sortedResults = sorted.collect()
for result in sortedResults:
print(result)
| TalhaAsmal/Taming-Big-Data-Pyspark-Udemy | popular-movies.py | popular-movies.py | py | 520 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pyspark.SparkConf",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "pyspark.SparkContext",
"line_number": 5,
"usage_type": "call"
}
] |
7662295610 | import pygame, functions, sys
class Button(pygame.Rect):
List = []
def __init__(self, action, x, y, width, height, text, textcolor=(0,0,0), color=(255,255,255)):
self.text = text
self.textcolor = textcolor
self.x = x
self.y = y
self.width = width
self.height = height
self.action = action
self.color = color
Button.List.append(self)
@staticmethod
def draw_buttons(screen):
for button in Button.List:
pygame.draw.rect(screen, button.color, (button.x, button.y, button.width, button.height))
functions.text_to_screen(screen, button.text, button.x, button.y + 10, color=button.textcolor, size=25) | shhmon/PointBlocks | menuC.py | menuC.py | py | 632 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "pygame.Rect",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "pygame.draw.rect",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "functions.text_to_s... |
30416561761 | """Identify RC4 compared to random and RC4A output with the ciphertext's second byte"""
import secrets
import RC4
# random
zeroes = 0
for i in range(65536):
secret = secrets.token_bytes(4)
if int(secret[1]) == 0:
zeroes += 1
print("Random")
print(f"Expected: 256")
print(f"Reality: {zeroes}")
print(f"Deviation: {(zeroes - 256)/256:.0%}")
# rc4a
zeroes = 0
for i in range(65536):
secret = secrets.token_hex(16)
rc4a = RC4.RC4A(secret)
ciphertext = bytes.fromhex(rc4a.cipher("Hello World", "plain", "hex"))
if rc4a.keystream_used[1] == 0:
zeroes += 1
print("RC4A")
print(f"Expected: 256")
print(f"Reality: {zeroes}")
print(f"Deviation: {(zeroes - 256)/256:.0%}")
# rc4
zeroes = 0
for i in range(65536):
secret = secrets.token_hex(16)
rc4 = RC4.RC4(secret)
ciphertext = bytes.fromhex(rc4.cipher("Hello World", "plain", "hex"))
if rc4.keystream_used[1] == 0:
zeroes += 1
print("RC4")
print(f"Expected: 256")
print(f"Reality: {zeroes}")
print(f"Deviation: {(zeroes - 256)/256:.0%}")
| slightlyskepticalpotat/rc4-variants | identify.py | identify.py | py | 1,047 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "secrets.token_bytes",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "secrets.token_hex",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "RC4.RC4A",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "secrets.token_hex",
... |
72495892835 | import torch
def drmsd(structure_1, structure_2, mask=None):
def prep_d(structure):
d = structure[..., :, None, :] - structure[..., None, :, :]
d = d ** 2
d = torch.sqrt(torch.sum(d, dim=-1))
return d
d1 = prep_d(structure_1)
d2 = prep_d(structure_2)
drmsd = d1 - d2
drmsd = drmsd ** 2
if(mask is not None):
drmsd = drmsd * (mask[..., None] * mask[..., None, :])
drmsd = torch.sum(drmsd, dim=(-1, -2))
n = d1.shape[-1] if mask is None else torch.min(torch.sum(mask, dim=-1))
drmsd = drmsd * (1 / (n * (n - 1))) if n > 1 else (drmsd * 0.)
drmsd = torch.sqrt(drmsd)
return drmsd
def drmsd_np(structure_1, structure_2, mask=None):
structure_1 = torch.tensor(structure_1)
structure_2 = torch.tensor(structure_2)
if(mask is not None):
mask = torch.tensor(mask)
return drmsd(structure_1, structure_2, mask)
def gdt(p1, p2, mask, cutoffs):
n = torch.sum(mask, dim=-1)
p1 = p1.float()
p2 = p2.float()
distances = torch.sqrt(torch.sum((p1 - p2)**2, dim=-1))
scores = []
for c in cutoffs:
score = torch.sum((distances <= c) * mask, dim=-1) / n
score = torch.mean(score)
scores.append(score)
return sum(scores) / len(scores)
def gdt_ts(p1, p2, mask):
return gdt(p1, p2, mask, [1., 2., 4., 8.])
def gdt_ha(p1, p2, mask):
return gdt(p1, p2, mask, [0.5, 1., 2., 4.])
| aqlaboratory/openfold | openfold/utils/validation_metrics.py | validation_metrics.py | py | 1,442 | python | en | code | 2,165 | github-code | 1 | [
{
"api_name": "torch.sqrt",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "torch.sum",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "torch.sum",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "torch.min",
"line_number": 19,
"... |
73751347235 | # Recupera las ofertas de la página de Amazon.
# Usamos herramientas de manipulación de strings para no entrar en selenium.
# Al ser una web dinámica y descargarla sin que se ejecute JavaScrip, nos limita solo a
# 8 ofertas, aunqe se puedan recuperar más Ids
#
# EJEMPLO DE USO
#
# app = Amazon()
# data = app.get_data() # diccionario de datos
# app.to_csv() # genera un csv
#
# EJEMPLO RE SALIDA
#
# {
# 'e79a0334' : {
# 'egressUrl': 'https://www.amazon.es/dp/B01ETRGE7M',
# 'primeAccessDuration': '0',
# 'glProductGroup': 'gl_digital_devices_4',
# 'msToEnd': '207110396',
# 'reviewRating': '4.395130244',
# 'primaryImage': 'https://images-na.ssl-images-amazon.com/images/I/415kiMyoKpL.jpg',
# 'maxPercentOff': '25',
# 'msToStart': '-396789604',
# 'reviewAsin': 'B01ETRGE7M',
# 'minPrevPrice': '39.99',
# 'maxListPrice': '39.99',
# 'isMAP': '0',
# 'displayPriority': '0',
# 'isEligibleForFreeShipping': '0',
# 'isPrimeEligible': '1',
# 'dealID': 'e79a0334',
# 'description': 'Ahorra 10€ en Fire TV Stick',
# '*className*': 'dcs.model.DealDetails',
# 'title': 'Ahorra 10€ en Fire TV Stick',
# 'type': 'BEST_DEAL',
# 'maxBAmount': '39.99',
# 'merchantName': 'Amazon.es',
# 'maxCurrentPrice': '29.99',
# 'impressionAsin': 'B01ETRGE7M',
# 'isFulfilledByAmazon': '0',
# 'maxDealPrice': '29.99',
# 'offerID': 'X5ZjOPB%2BAC1u8gwRPlYcXBeWT66QxCFxazdyZ4Mo4UZfsihRlytarHMP59NJF3nuQ%2BDDaUc8',
# '*classHierarchy*': '[\n dcs.model.DealDetails\n ]',
# 'maxPrevPrice': '39.99',
# 'minBAmount': '39.99',
# 'currencyCode': 'EUR',
# 'minListPrice': '39.99',
# 'merchantID': 'A1AT7YVPFBWXBL',
# 'score': '0',
# 'bKind': 'ULP',
# 'msToFeatureEnd': '0',
# 'minCurrentPrice': '29.99',
# 'ingressUrl': 'https://www.amazon.es/gp/goldbox',
# 'isFeatured': '0',
# 'totalReviews': '8166',
# 'minDealPrice': '29.99',
# 'itemType': 'SINGLE_ITEM',
# 'minPercentOff': '25',
# 'items': '[\n\n ]'
# },
# '13afff4c' : { ... },
# ....
# }
import requests
import file_handle
class Amazon(file_handle.FileHandle):
def __init__(self):
self.DEAL_ID = 'sortedDealIDs'
self.DEAL_DETAIL = 'dealDetails'
self.END = 'responseMetadata'
self.url = 'https://www.amazon.es/gp/goldbox/?ref_=nav_cs_npx_todaysdeals'
self.file_name = 'amazon'
self.url_text = self._get_url_data()
super(Amazon, self).__init__(self.file_name)
def get_data(self):
"""
Recupera datos de Ids y texto a buscar y devuelve un diccionario con los datos finales
:return: dict
"""
ids = self._find_ids()
new_text = self._text_deals()
return self._generate_data(new_text, ids)
def _generate_data(self, deals, ids):
"""
Recoge los datos finales y los devuelve en un diccionario
Reestructuración del diccionario para una mejor visión y acceso
:param deals: dict, diccionario con los datos en bruto
:param ids: list, listado de las ids de las ofertas
:return: dict, diccionario reestructurado para mejor acceso
"""
dic = {}
for item in ids:
dic[item] = self._find_deals(deals, item)
return dic
def _get_url_data(self):
"""
Recupera los datos en formato texto de la web
:return: string
"""
return requests.get(self.url).text
def _find_ids(self):
"""
Bisca las ids en el texto
:return: list
"""
text = self._cut_text(self.url_text, self.DEAL_ID, '[', ']')
cleaned = self._clean_text(text)
return cleaned[:8]
def _clean_text(self, text):
"""
Limpia espacios y finales de linea del texto
:param text: string
:return: list
"""
text = text.replace('"', '')
text = text.split(',')
return [item.strip() for item in text]
def _cut_text(self, text, begin, strt='{', stp='}'):
"""
Corta el texto con los datos de inicio y fin indicados
:param text: string, texto a cortar
:param begin: string, punto de partida a cortar
:param ini: string, simbolo de inicio
:param fin: string, simbolo fin
:return: string, texto cortado
"""
base = text.find(begin)
new_text = text[base:]
start = new_text.find(strt) + 1
stop = new_text.find(stp)
return new_text[start:stop]
def _text_deals(self):
"""
Recorta el texto que contiene los datos de todos los deals
:return: string
"""
text_start = self.url_text.find(self.DEAL_DETAIL)
text_stop = self.url_text.find(self.END)
return self.url_text[text_start:text_stop]
def _find_deals(self, text, inicio):
"""
Una vez recortado y limpiado el texto, generamos una lista de listas. Cada lista se separa en dos valores.
Con esa lista generamos un diccionario.
:param text: string, texto de busqueda
:param inicio: string, cadena de inicio de busquesda
:param be: string, simbolo deinicio de busqueda
:param en: string, simbolo de fin de busqueda
:return: dict, diccionario con datos en bruto.
"""
text = self._cut_text(text, inicio)
new_text = self._clean_text(text)
text_list = [element.split(' : ') for element in new_text]
new_list = [item for item in text_list if len(item) == 2]
dic = dict((key, value) for key, value in new_list)
return dic
def to_csv(self):
"""
Crea un archivo csv con los datos.
Producto, Precio, Url, Imagen
"""
data = self.get_data()
new_data = self._prepare_data(data)
self._file_write(new_data)
def _prepare_data(self, data):
"""
Recupera los datos en formato lista y los devuelve como diccionario con las keys
title, minDealPrice, egressUrl, primaryImage
:param data: list
:return: dict
"""
list_data = []
for item in data.keys():
list_data.append([
data[item].get('title'),
data[item].get('minDealPrice'),
data[item].get('egressUrl'),
data[item].get('primaryImage')
])
return list_data
if __name__ == '__main__':
app = Amazon()
print(app.get_data())
app.to_csv()
| alfonsoma75/web_scraping | amazon.py | amazon.py | py | 6,764 | python | es | code | 1 | github-code | 1 | [
{
"api_name": "file_handle.FileHandle",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "requests.get",
"line_number": 106,
"usage_type": "call"
}
] |
71660254754 | #! /usr/bin/python3
import sys
import serial, time
import os, stat
from os.path import exists
from os import access, R_OK, W_OK
import paho.mqtt.client as mqtt
import configparser
import argparse
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,
description='''control dpm86xx devices using mqtt''')
parser.add_argument('config_file', metavar="<config_file>", help="file with configuration")
args = parser.parse_args()
# read and parse config file
config = configparser.RawConfigParser()
config.read(args.config_file)
# [mqtt]
MQTT_CLIENTNAME = config.get("mqtt", "clientname")
MQTT_HOST = config.get("mqtt", "host")
MQTT_PORT = config.getint("mqtt", "port")
MQTT_LOGIN = config.get("mqtt", "login", fallback=None)
MQTT_PASSWORD = config.get("mqtt", "password", fallback=None)
ROOT_TOPIC = config.get("mqtt", "roottopic")
SET_TOPIC = config.get("mqtt", "settopic")
# [dpm86xx]
dpm86xx_id = config.get("dpm86xx", "id")
dpm86xx_port = config.get("dpm86xx", "port")
VOLTAGE_MAX = int(config.get("dpm86xx", "v_max"))
CURRENT_MAX = int(config.get("dpm86xx", "i_max"))
VOLTAGE_BAT = int(config.get("dpm86xx", "v_bat"))
APPNAME = "dpm86xx2mqtt"
# supported dpm functions -- see the document "dpm86xx-series-power-supply_simple-communication-protocol.odt/pdf" in this repository
F_MAX_VOLTAGE="00" # R/-: maximum output voltage
F_MAX_CURRENT="01" # R/-: maximum output current
F_VOLTAGE_SETTING="10" # R/W: output voltage target
F_CURRENT_SETTING="11" # R/W: output current target
F_OUTPUT="12" # R/W: output on/off
F_VOLTAGE="30" # R/-: output voltage
F_CURRENT="31" # R/-: output current
F_CONST="32" # R/W: constant current or constant voltage status
F_TEMPERATURE="33" # R/-: temperature
VOLTAGE_MIN=0 # 0 Volt
CURRENT_MIN=0 # 0 Ampere
# create serial-object
dpm86xx_serial = serial.Serial(
port=dpm86xx_port,
baudrate=9600,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
timeout=1, #None
inter_byte_timeout=None
)
# raw-communication-functions
def dpm86xx_value_read(opcode):
opcode = str(opcode)
# sending command
cmd = ":" + dpm86xx_id + "r" + opcode + "=0" + ",,\n"
bcmd = cmd.encode()
written = dpm86xx_serial.write(bcmd)
if written < len(bcmd): return(-999)
# reading response
bresponse = dpm86xx_serial.readline()
response = bresponse.decode(errors='replace')
if response == "": return(-999)
# return corrected response as word
response = response[7:-3]
if response.isdigit(): response = int(response)
return response
def dpm86xx_value_write(opcode, value):
opcode = str(opcode)
value = str(value)
# sending command
cmd =":" + dpm86xx_id + "w" + opcode + "=" + value + ",,\n"
bcmd = cmd.encode()
written = dpm86xx_serial.write(bcmd)
if written < len(bcmd): return(-999)
# reading response
bresponse = dpm86xx_serial.readline()
response = bresponse.decode(errors='replace')
# check and return value
response = response[:-2]
if response == ":" + dpm86xx_id + "ok": return(1)
else: return(-999)
# reading values
def dpm86xx_read_temperature():
return(dpm86xx_value_read(F_TEMPERATURE))
def dpm86xx_read_voltage():
return(float(dpm86xx_value_read(F_VOLTAGE)) / 100)
def dpm86xx_read_voltage_setting():
return(float(dpm86xx_value_read(F_VOLTAGE_SETTING)) / 100)
def dpm86xx_read_voltage_max():
return(float(dpm86xx_value_read(F_MAX_VOLTAGE)) / 100)
def dpm86xx_read_current():
return(float(dpm86xx_value_read(F_CURRENT)) / 1000)
def dpm86xx_read_current_setting():
return(float(dpm86xx_value_read(F_CURRENT_SETTING)) / 1000)
def dpm86xx_read_current_max():
return(float(dpm86xx_value_read(F_MAX_CURRENT)) / 1000)
def dpm86xx_read_power():
voltage = dpm86xx_read_voltage()
if voltage<0: return(-999)
current = dpm86xx_read_current()
if current<0: return(-999)
return(voltage * current)
def dpm86xx_read_power_max():
voltage = dpm86xx_read_voltage()
if voltage<0: return(-999)
current_max = dpm86xx_read_current_max()
if current_max<0: return(-999)
return(voltage * current_max)
def dpm86xx_read_output():
return(dpm86xx_value_read(F_OUTPUT))
def dpm86xx_read_mode():
return(dpm86xx_value_read(F_CONST)) #CV=0 / CC=1
# setting values
def dpm86xx_set_voltage(voltage):
if voltage < VOLTAGE_MIN or voltage > VOLTAGE_MAX: return(-999)
return(dpm86xx_value_write(F_VOLTAGE_SETTING, int(voltage * 100)))
def dpm86xx_set_current(current):
if current < CURRENT_MIN or current > CURRENT_MAX: return(-999)
return(dpm86xx_value_write(F_CURRENT_SETTING, int(current * 1000)))
def dpm86xx_set_power(power):
voltage = dpm86xx_read_voltage()
if (voltage < 5):
voltage = VOLTAGE_BAT
return(dpm86xx_set_current(power / voltage))
def dpm86xx_set_output(state):
if state in [0, 1]: return(dpm86xx_value_write(F_OUTPUT, str(state)))
else: return(-999)
def dpm86xx_set_mode(state): #CV=0 / CC=1
if state in [0, 1]: return(dpm86xx_value_write(F_CONST, str(state)))
else: return(-999)
def mqtt_callback(client, userdata, msg):
#print("got topic: %s" % (str(msg.topic)))
if (msg.topic == SET_TOPIC + "/voltage"):
dpm86xx_set_voltage(float(msg.payload.decode("utf-8")))
elif (msg.topic == SET_TOPIC + "/current"):
dpm86xx_set_current(float(msg.payload.decode("utf-8")))
elif (msg.topic == SET_TOPIC + "/power"):
dpm86xx_set_power(float(msg.payload.decode("utf-8")))
elif (msg.topic == SET_TOPIC + "/output"):
dpm86xx_set_output(int(msg.payload.decode("utf-8")))
elif (msg.topic == SET_TOPIC + "/readdata"):
mqtt_client.publish(ROOT_TOPIC + "/output", str(dpm86xx_read_output()))
mqtt_client.publish(ROOT_TOPIC + "/temperature", str(dpm86xx_read_temperature()))
voltage = dpm86xx_read_voltage()
mqtt_client.publish(ROOT_TOPIC + "/voltage", str(voltage))
current = dpm86xx_read_current()
mqtt_client.publish(ROOT_TOPIC + "/current", str(current))
mqtt_client.publish(ROOT_TOPIC + "/power", str(voltage * current))
# main-function
if __name__ == '__main__':
# MQTT-client
# ================================================================
mqtt_client = mqtt.Client(MQTT_CLIENTNAME)
mqtt_client.connect(MQTT_HOST, MQTT_PORT, 60)
mqtt_client.on_message=mqtt_callback
mqtt_client.subscribe(SET_TOPIC + "/voltage",qos=0)
mqtt_client.subscribe(SET_TOPIC + "/current", qos=0)
mqtt_client.subscribe(SET_TOPIC + "/power", qos=0)
mqtt_client.subscribe(SET_TOPIC + "/output", qos=0)
mqtt_client.subscribe(SET_TOPIC + "/readdata", qos=0)
# initialize the device with some desired values
# set constant-current-mode
#dpm86xx_set_mode(1)
# set voltage to 30V, current to 0A and enable output
#dpm86xx_set_voltage(30)
#dpm86xx_set_current(0)
#dpm86xx_set_output(1)
# wait at least 600ms to let device turnon
#time.sleep(1.0)
# set power to 70W
#dpm86xx_set_power(70)
# start MQTT-client
mqtt_client.loop_start()
# Start Main-Loop
# ================================================================
while(True):
#print("Output-State = " + str(dpm86xx_read_output()))
#print("Temperature = " + str(dpm86xx_read_temperature()) + "°C")
#print("Output-Voltage = " + str(dpm86xx_read_voltage()) + "V")
#print("Output-Current = " + str(dpm86xx_read_current()) + "A")
#print("Output-Power = " + str(dpm86xx_read_power()) + "W")
time.sleep(1)
| xn--nding-jua/pv_mqtt_controller | dpm86xx2mqtt.py | dpm86xx2mqtt.py | py | 7,771 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "argparse.RawDescriptionHelpFormatter",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "configparser.RawConfigParser",
"line_number": 17,
"usage_type": "call"
... |
72387697954 | from collections import defaultdict
import heapq
def solution(gems: list[str]):
answer = []
# 우선순위큐 : 데이터를 담을 temp array 생성
temp = []
collected_gem = []
collected_gem = defaultdict(int)
# 최소 스타트 [0] => 보석이름,[1] => 보석 위치
min_start = ''
for idx,gem in enumerate(gems):
collected_gem[gem] = idx+1
# 연속된 보석중 가장 앞에 있는 보석이 나올경우
if min_start == gem or min_start == '':
# 누가 최소인지 찾기
min_num = 10e9
# 이미 제일 앞에 있는 보석이 제일 뒤로 가서 가장 앞에 있는 보석의 순서를 찾아야 합니다.
for c in collected_gem.items():
if min_num > c[1]:
min_start = c[0]
min_num= c[1]
# print("min_start update",min_start)
# 최소 거리 계산
distance = (idx+1)-collected_gem[min_start]
# 가지고 있는 보석 개수 저장
gems_cnt = len(collected_gem.keys())
# 시작 포인트
start_point = collected_gem[min_start]
# 개수(큰수대로))거리(작은순),시작(작은순) 순으로 우선순위를 두고 우선순위 큐에 저장
group = ((-1)*gems_cnt,distance,start_point)
if not temp:
heapq.heappush(temp,((-1)*gems_cnt,distance,start_point))
elif group != temp[0]:
heapq.heappush(temp,((-1)*gems_cnt,distance,start_point))
#print(temp[0])
_,dist,start = heapq.heappop(temp)
answer = [start,start+dist]
return answer
print(solution(["A","B","B","B","B","B","B","C","B","A"])==[8,10])
print(solution(["A"])==[1,1])
print(solution(["A","A","A","B","B"])== [3,4])
print(solution(["AB","x","y","BD","e","AB","AC"])==[2, 7]);
print(solution(["AB","BD","AB","BD","BD","AB","AC"])==[5, 7]);
print(solution(["A", "A", "B"])==[2, 3]);
print(solution(["DIA", "RUBY", "RUBY", "DIA", "DIA", "EMERALD", "SAPPHIRE", "DIA"])==[3,7])
print(solution(["AA", "AB", "AC", "AA", "AC"])==[1, 3])
print(solution(["XYZ", "XYZ", "XYZ"])==[1, 1])
print(solution(["ZZZ", "YYY", "NNNN", "YYY", "BBB"])==[1, 5])
| cafe-jun/codingTest-Algo | programmers/2020카카오인턴십/보석쇼핑.py | 보석쇼핑.py | py | 2,236 | python | ko | code | 0 | github-code | 1 | [
{
"api_name": "collections.defaultdict",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "heapq.heappush",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "heapq.heappush",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "heapq.heappop",
... |
11506375548 | import tcod
from random import randint
from random_utils import random_choice_from_dict, from_dungeon_level
from entity import Entity
from map_objects.tile import Tile
from map_objects.rect import Rect
from components.ai import Brute
from components.combatant import Combatant
from components.item import Item
from components.stairs import Stairs
from components.equipment import EquipmentSlots
from components.equippable import Equippable
from render_functions import RenderOrder
from item_functions import heal, dorkbolt, dorkblast, confusodockulus
from game_messages import Message
class GameMap:
def __init__(self, width, height, depth=1):
self.width=width
self.height=height
self.depth=depth
self.tiles=self.init_tiles()
def init_tiles(self):
# True: walls!
tiles=[[Tile(True) for y in range(self.height)] for x in range(self.width)]
return tiles
def make_map(self, max_rooms, room_min_size, room_max_size, map_width, map_height, player, entities,):
rooms=[]
num_rooms=0
last_room_c=(None, None)
for _ in range(max_rooms):
w= randint(room_min_size, room_max_size)
h= randint(room_min_size, room_max_size)
x= randint(0, map_width-w-1)
y= randint(0, map_height-h-1)
new_room=Rect(x, y, w, h)
for ref_room in rooms:
if new_room.intersect(ref_room):
break
# weird python syntax stuff
else:
self.carve_room(new_room)
(new_x, new_y)=new_room.centre()
last_room_c=(new_x, new_y)
if num_rooms==0:
player.x=new_x
player.y=new_y
else:
# tunnel to SPECIFICALLY THE PREVIOUS ROOM, imitating the chaotic nature of dcss
(prev_x, prev_y)=rooms[num_rooms-1].centre()
if randint(0, 1)==0:
self.carve_tunnel_x(prev_x, new_x, prev_y)
self.carve_tunnel_y(prev_y, new_y, new_x)
else:
self.carve_tunnel_y(prev_y, new_y, prev_x)
self.carve_tunnel_x(prev_x, new_x, new_y)
self.spawn_entities(new_room, entities)
rooms.append(new_room)
num_rooms+=1
stairs_x, stairs_y=last_room_c
entities.append(Entity(stairs_x, stairs_y, '>', tcod.white, 'Stairs', render_order=RenderOrder.CORPSE, stairs=Stairs(self.depth+1)))
def carve_room(self, room):
for y in range(room.y1+1, room.y2):
for x in range (room.x1+1, room.x2):
self.tiles[x][y].block_movement=False
self.tiles[x][y].block_sight=False
def carve_tunnel_x(self, x1, x2, y):
for x in range(min(x1, x2), max(x1, x2)+1):
self.tiles[x][y].block_movement=False
self.tiles[x][y].block_sight=False
def carve_tunnel_y(self, y1, y2, x):
for y in range(min(y1, y2), max(y1, y2)+1):
self.tiles[x][y].block_movement=False
self.tiles[x][y].block_sight=False
def is_blocked(self, x, y):
if self.tiles[x][y].block_movement:
return True
return False
def spawn_entities(self, room, entities):
# spawning some baddies
max_monsters_per_room=from_dungeon_level([(2, 1), (3, 4), (5, 6)], self.depth)
number_of_monsters=randint(0, max_monsters_per_room)
monster_chances={
'man': 72,
'orckh': from_dungeon_level([(15, 3), (30, 5), (60, 7)], self.depth)
}
for _ in range(number_of_monsters):
x=randint(room.x1+1, room.x2-1)
y=randint(room.y1+1, room.y2-1)
if not any([entity for entity in entities if entity.x==x and entity.y==y]):
monster_choice=random_choice_from_dict(monster_chances)
if monster_choice=='man':
monster=Entity(x, y, 'm', tcod.white, 'Man', block_movement=True, render_order=RenderOrder.ACTOR, combatant=Combatant(health=15, stamina=40, attack=4, ac=1, xp=100), ai=Brute())
else:
monster=Entity(x, y, 'o', tcod.desaturated_green, 'Orckh', block_movement=True, render_order=RenderOrder.ACTOR, combatant=Combatant(health=50, stamina=50, attack=7, ac=2, xp=150), ai=Brute())
entities.append(monster)
# spawning some items
max_items_per_room=from_dungeon_level([(1, 1), (2, 4)], self.depth)
number_of_items=randint(0, max_items_per_room)
item_chances={
'pot_juju': 24,
'scroll_confuse': from_dungeon_level([(24, 4)], self.depth),
'scroll_dorkbolt': from_dungeon_level([(32, 2)], self.depth),
'scroll_dorkblast': from_dungeon_level([(16, 5)], self.depth),
'axe': from_dungeon_level([(15, 1)], self.depth),
'shield': from_dungeon_level([(12, 1)], self.depth),
'ring': from_dungeon_level([(3, 1)], self.depth)
}
for _ in range(number_of_items):
x=randint(room.x1+1, room.x2-1)
y=randint(room.y1+1, room.y2-1)
if not any([entity for entity in entities if entity.x==x and entity.y==y]):
item_choice=random_choice_from_dict(item_chances)
if item_choice=='scroll_confuse':
item=Entity(x, y, '#', tcod.light_pink, 'Scroll of Confusodockulus', render_order=RenderOrder.ITEM, item=Item(use_function=confusodockulus, targeting=True, targeting_message=Message('Left-click an enemy to confuse it, or right-click to cancel.', tcod.light_cyan)))
elif item_choice=='pot_juju':
item=Entity(x, y, '!', tcod.violet, 'Rejujuvenation Potion', render_order=RenderOrder.ITEM, item=Item(use_function=heal, amount=6))
elif item_choice=='scroll_dorkbolt':
item=Entity(x, y, '#', tcod.yellow, 'Scroll of Dorkbolt', render_order=RenderOrder.ITEM, item=Item(use_function=dorkbolt, damage=23, maximum_range=11))
elif item_choice=='scroll_dorkblast':
item=Entity(x, y, '#', tcod.orange, 'Scroll of Dorkblast', render_order=RenderOrder.ITEM, item=Item(use_function=dorkblast, targeting=True, targeting_message=Message('Use your mouse to fire because facepalm.', tcod.lighter_blue), damage=17, radius=2))
elif item_choice=='axe':
item=Entity(x, y, '/', tcod.sky, 'Scale Axe +{0}'.format(self.depth), render_order=RenderOrder.ITEM, equippable=Equippable(EquipmentSlots.MAIN_HAND, bonus_attack=1+self.depth*2))
elif item_choice=='shield':
item=Entity(x, y, '[', tcod.darker_orange, 'Scale Shield +{0}'.format(self.depth), render_order=RenderOrder.ITEM, equippable=Equippable(EquipmentSlots.OFF_HAND, bonus_ac=-1+self.depth))
elif item_choice=='ring':
item=Entity(x, y, '=', tcod.amber, 'Ring of Bonding +{0}'.format(self.depth), render_order=RenderOrder.ITEM, equippable=Equippable(EquipmentSlots.FINGER, bonus_max_hp=7+self.depth*13))
entities.append(item)
def next_floor(self, player, message_log, constants):
self.depth+=1
entities=[player]
self.tiles=self.init_tiles()
self.make_map(constants['max_rooms'], constants['room_min_size'], constants['room_max_size'], constants['map_width'], constants['map_height'], player, entities)
player.combatant.take_damage(-player.combatant.health//2)
message_log.add_message(Message('You feed on the ground.', tcod.light_violet))
return entities | propfeds/project-regular | map_objects/game_map.py | game_map.py | py | 7,788 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "map_objects.tile.Tile",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "random.randint",
... |
71705104033 | #!/usr/bin/env python
import sys
import json
import logging
import pika
from os import environ
from artnet import dmx
from os.path import realpath, dirname, join
class ArtNetTransmitter(object):
FPS = 15
ARTNET_BROADCAST_IP = "2.255.255.255"
RABBIT_HOST = '192.168.0.42'
def __init__(self):
self.logger = logging.getLogger("Artnet")
self.logger.setLevel(logging.INFO)
self.channel = None
self.connection = None
self.frames = {} # A different frame of 512 dmx addresses per universe
self.mapping = None # This is the Art-Net mapping [row, col] -> {"universe": 0, "dmx": 511}
self.dmx = None
self.num_universes = 0
def callback(self, ch, method, properties, body):
raw = json.loads(body.decode('ascii'))
if self.num_universes > 0:
for row in range(len(self.mapping)):
for col in range(len(self.mapping[0])):
matches = self.mapping[row][col]
# matches should be as such: [{"uni": 0, "dmx": 511}, {"uni": 0, "dmx": 500}, ...]
r, g, b = raw[row][col]
for match in matches:
universe = match["universe"]
dmx = match["dmx"]
self.frames[universe][dmx] = min(255, max(0, int(r*255)))
self.frames[universe][dmx+1] = min(255, max(0, int(g*255)))
self.frames[universe][dmx+2] = min(255, max(0, int(b*255)))
for universe in self.frames:
self.dmx.add(iter([self.frames[universe]]), universe)
"""
This will declare all existing universes
"""
def init(self):
path_mapping = join(realpath(dirname(__file__)), "config", "mapping.json")
with open(path_mapping) as f:
self.mapping = json.load(f)
for row in range(len(self.mapping)):
for col in range(len(self.mapping[0])):
matches = self.mapping[row][col]
for match in matches:
universe = match["universe"]
if universe not in self.frames:
self.frames[universe] = [0]*512 # Declare a new universe with 512 DMX addresses = 0
self.num_universes = max(self.frames) + 1 # e.g. universes 4,5 will create universes 0,1,2,3,4,5
self.dmx = dmx.Controller(self.ARTNET_BROADCAST_IP, universes=self.num_universes, fps=self.FPS)
self.dmx.start()
self.logger.info("Created {} DMX universes".format(self.num_universes))
def run(self):
if 'RABBITMQ_DEFAULT_PASS' not in environ or 'RABBITMQ_DEFAULT_USER' not in environ:
raise ValueError("The Art-Net transmitter requires RABBITMQ_DEFAULT_USER and RABBITMQ_DEFAULT_PASS in its environment")
rabbit_host, rabbit_user, rabbit_pwd = self.RABBIT_HOST, environ['RABBITMQ_DEFAULT_USER'], environ['RABBITMQ_DEFAULT_PASS']
credentials = pika.PlainCredentials(rabbit_user, rabbit_pwd)
self.connection = pika.BlockingConnection(pika.ConnectionParameters(host=rabbit_host, credentials=credentials, heartbeat = 0))
self.channel = self.connection.channel()
self.channel.exchange_declare(exchange='pixels', exchange_type='fanout')
result = self.channel.queue_declare('', exclusive=True)
queue_name = result.method.queue
self.channel.queue_bind(exchange='pixels', queue=queue_name)
self.logger.info('Waiting for pixel data...')
self.channel.basic_consume(self.callback, queue=queue_name)
self.channel.start_consuming()
def close(self):
if self.channel is not None:
self.channel.stop_consuming()
if self.channel is not None:
self.channel.close()
if self.connection is not None:
self.connection.close()
if self.dmx is not None:
self.dmx.stop()
if __name__ == '__main__':
transmitter = ArtNetTransmitter()
try:
transmitter.init()
transmitter.run()
except KeyboardInterrupt:
transmitter.logger.warning("Closing all ArtNet transmissions upon request!")
pass
finally:
transmitter.close() | arbalet-project/frontage-artnet | transmitter.py | transmitter.py | py | 4,257 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "json.loads",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "artnet.dmx",
"line... |
31900400729 | # -*- coding: utf-8 -*-
"""
@File : findTargetSumWays.py
@Author : wenhao
@Time : 2023/2/20 15:55
@LC : 494
"""
from typing import List
from functools import cache
class Solution:
# dp 递推写法 优化:使用 1 个数组
# 为了避免覆盖掉前面的数据,要从右向左更新数组
def findTargetSumWays3(self, nums: List[int], target: int) -> int:
target += sum(nums)
if target < 0 or target % 2:
return 0
target //= 2
f = [0] * (target + 1)
f[0] = 1 # 边界条件
for x in nums:
for c in range(target, x - 1, -1):
f[c] = f[c] + f[c - x]
return f[target]
# dp 递推写法 优化:使用 2 个数组
def findTargetSumWays3(self, nums: List[int], target: int) -> int:
target += sum(nums)
if target < 0 or target % 2:
return 0
target //= 2
n = len(nums)
# 使用 2 个数组优化空间复杂度
f = [[0] * (target + 1) for _ in range(2)]
f[0][0] = 1 # 边界条件
for i, x in enumerate(nums):
for c in range(target + 1):
if c < x:
f[(i + 1) % 2][c] = f[i % 2][c]
else:
f[(i + 1) % 2][c] = f[i % 2][c] + f[i % 2][c - x]
return f[n % 2][target]
# dp 递推写法
def findTargetSumWays3(self, nums: List[int], target: int) -> int:
target += sum(nums)
if target < 0 or target % 2:
return 0
target //= 2
n = len(nums)
# 数组的行数代表可选择数的范围 列数代表选择目标值 数组值代表方案数
# 返回值就是 f[n][target] 即用前 n 行组成 target 的目标数
f = [[0] * (target + 1) for _ in range(n + 1)]
f[0][0] = 1 # 边界条件
for i, x in enumerate(nums):
for c in range(target + 1):
if c < x:
f[i + 1][c] = f[i][c]
else:
f[i + 1][c] = f[i][c] + f[i][c - x]
return f[n][target]
# 记忆化搜索
def findTargetSumWays2(self, nums: List[int], target: int) -> int:
# 数字总和是 s
# 假设添加正号的数之和是 p
# 则添加负号的数之和是 s - p
# 则目标等于 p - (s - p) = t
# 也即 p = (s + t) / 2
# 问题变成从 nums 中选出一些数字 和为 p 这就是一个 01背包 的变形
# 也就是:从 nums 中选出一些数 使之和为 p 求选择方案数目
# dfs(i, p) = dfs(i - 1, p) + dfs(i - 1, p - nums[i])
# 函数返回值代表方案数 后者需要判断 p 与 nums[i] 的大小关系
# 边界条件为:if i < 0: return 1 if p == 0 else 0
target += sum(nums)
if target < 0 or target % 2:
return 0
target //= 2
n = len(nums)
@cache # 记忆化搜索
def dfs(i: int, p: int) -> int:
# dfs 边界条件
if i < 0:
return 1 if p == 0 else 0
if nums[i] > p:
return dfs(i - 1, p)
return dfs(i - 1, p) + dfs(i - 1, p - nums[i])
return dfs(n - 1, target) # 从后向前挑数
# dfs 暴力枚举 TLE
def findTargetSumWays1(self, nums: List[int], target: int) -> int:
n = len(nums)
ans = 0
def dfs(i: int, s):
nonlocal ans
# 递归停止条件
if i < 0:
if s == target:
ans += 1
return
dfs(i - 1, s + nums[i]) # +
dfs(i - 1, s - nums[i]) # -
dfs(n - 1, 0) # 从最后一个数开始
return ans
| callmewenhao/leetcode | 基础算法精讲/动态规划/01背包 完全背包 多重背包/findTargetSumWays.py | findTargetSumWays.py | py | 3,772 | python | zh | code | 0 | github-code | 1 | [
{
"api_name": "typing.List",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": ... |
24618527866 | import os
import sys
import numpy as np
import pandas as pd
pd.options.display.float_format = "{:,.2f}".format
import logging
from datetime import datetime, timedelta
from features import quantize
# from __main__ import logger_name
logger_name = "simple_average"
log = logging.getLogger(logger_name)
class SimpleAverage(object):
"""Makes the predictions based on weekday and time history data"""
def __init__(self, model_name, horizon, window):
self.model = dict()
self.name = model_name
self.horizon = horizon
self.window = window
def train_model(self, rawdata):
"""An abstract training function
retrieves mean and std of all historical values
"""
mean_all = (
rawdata.groupby([rawdata.index.weekday, rawdata.index.time]).mean().round(2)
)
std_all = (
rawdata.groupby([rawdata.index.weekday, rawdata.index.time]).std().round(2)
)
self.model["train"] = {"mean": mean_all, "std": std_all}
return
def make_predictions(self, rawdata, start, end):
"""An abstract prediction function
Averages mean and std obtained during training and the same day previous week
"""
mean_prev = rawdata.groupby([rawdata.index.weekday, rawdata.index.time]).mean()
# std_prev = rawdata.groupby([rawdata.index.weekday, rawdata.index.time]).std()
self.model["predict"] = {"mean": mean_prev} # , 'std':std_prev
df_mean = pd.concat(
[self.model["train"]["mean"], self.model["predict"]["mean"]]
)
df_std = self.model["train"][
"std"
] # pd.concat([self.model['train']['std'].pow(2), self.model['predict']['std'].pow(2)])
mean = df_mean.groupby(level=[0, 1]).mean().round(2)
# std = df_std.groupby(df_std.index).sum().pow(1/2).round(2)
mean = mean[(start.weekday(), start.time()) : (end.weekday(), end.time())]
std = self.model["train"]["std"][
(start.weekday(), start.time()) : (end.weekday(), end.time())
]
q10, q50, q90 = self.postprocess_data([mean.values, std.values])
return np.array([q10, q50, q90])
def postprocess_data(self, data):
q10 = quantize(data[0], data[1], 0.1).astype(int).clip(0)
q50 = quantize(data[0], data[1], 0.5).astype(int).clip(0)
q90 = quantize(data[0], data[1], 0.9).astype(int).clip(0)
return q10, q50, q90
| aleksei-mashlakov/parking-forecast | src/PMV4Cast/simple_average.py | simple_average.py | py | 2,470 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "pandas.options",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "numpy.array",
... |
7447934892 | # -*- coding: utf-8 -*-
from OFS.Image import Image
from zope.interface import implements
from zope.component import getUtility
from zope.publisher.interfaces import IPublishTraverse, NotFound
from zope.component import getUtilitiesFor
from Products.Five import BrowserView
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
from souper.interfaces import ICatalogFactory
from genweb.core.utils import get_safe_member_by_id
from ulearn.core import _
from plone import api
class userProfile(BrowserView):
""" Return an user profile ../profile/{username} """
implements(IPublishTraverse)
index = ViewPageTemplateFile('views_templates/user_profile.pt')
def __init__(self, context, request):
super(userProfile, self).__init__(context, request)
self.username = None
self.portal = api.portal.get()
self.portal_url = self.portal.absolute_url()
def __call__(self):
return self.index()
def publishTraverse(self, request, name):
if self.username is None: # ../profile/username
self.username = name
self.user_info = api.user.get(self.username)
member_info = get_safe_member_by_id(self.user_info.id)
self.user_fullname = member_info.get('fullname', '')
else:
raise NotFound(self, name, request)
return self
def get_posts_literal(self):
literal = api.portal.get_registry_record(name='ulearn.core.controlpanel.IUlearnControlPanelSettings.people_literal')
if literal == 'thinnkers':
return 'thinnkins'
else:
return 'entrades'
def has_complete_profile(self):
if self.user_info:
pm = api.portal.get_tool('portal_membership')
portrait = pm.getPersonalPortrait(self.user_info.id)
member_info = get_safe_member_by_id(self.user_info.id)
typePortrait = portrait.__class__.__name__
changePortrait = typePortrait == 'Image' and portrait.size != 9715 and portrait.size != 4831
if member_info.get('fullname', False) \
and member_info.get('fullname', False) != self.username \
and member_info.get('email', False) \
and changePortrait:
return True
else:
return False
else:
# The user doesn't have any property information for some weird
# reason or simply beccause we are admin
return False
def get_user_info_for_display(self):
user_properties_utility = getUtility(ICatalogFactory, name='user_properties')
extender_name = api.portal.get_registry_record('genweb.controlpanel.core.IGenwebCoreControlPanelSettings.user_properties_extender')
rendered_properties = []
if extender_name in [a[0] for a in getUtilitiesFor(ICatalogFactory)]:
extended_user_properties_utility = getUtility(ICatalogFactory, name=extender_name)
for prop in extended_user_properties_utility.profile_properties:
rendered_properties.append(dict(
name=_(prop),
value=self.user_info.getProperty(prop, '')
))
return rendered_properties
else:
# If it's not extended, then return the simple set of data we know
# about the user using also the profile_properties field
for prop in user_properties_utility.profile_properties:
rendered_properties.append(dict(
name=_(prop),
value=self.user_info.getProperty(prop, '')
))
return rendered_properties
def get_member_data(self):
return api.user.get_current()
def user_properties(self):
member_data = self.get_member_data()
return {'fullname': member_data.getProperty('fullname'),
'email': member_data.getProperty('email'),
'language': member_data.getProperty('language'),
'home_page': member_data.getProperty('home_page'),
'description': member_data.getProperty('description'),
'twitter_username': member_data.getProperty('twitter_username'),
'location': member_data.getProperty('location'),
'telefon': member_data.getProperty('telefon'),
'ubicacio': member_data.getProperty('ubicacio'),
}
| UPCnet/ulearn.theme | ulearn/theme/browser/user_profile.py | user_profile.py | py | 4,468 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "Products.Five.BrowserView",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "zope.interface.implements",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "zope.publisher.interfaces.IPublishTraverse",
"line_number": 18,
"usage_type": "argume... |
17398511199 | from math import *
from random import *
import pygame
pygame.init()
win=pygame.display.set_mode((1800,900))
S1=pygame.Surface((200,100))
S2=pygame.image.load("MAP.xcf")
run=True
font=pygame.font.SysFont("papyrus",20)
Sags=pygame.Surface((1,100))
Sags.set_colorkey((0,0,0))
Sags.set_alpha(100)
Glassground=pygame.Surface((200,100))
Glassground.set_alpha(1)
Glassground.fill((0,228,255))
Map=[]
light_mode=True
for i in range(50):
line=[]
for i1 in range(50):
if S2.get_at((i,i1))==(255,255,255):
line.append(0)
else:
line.append(1)
Map.append(line)
x=84
y=897
angle=0
bounced=False
btimer=0
sprint_timer=0
#Inventory Item indexes and descriptions
# 0. Flashlight, (power)
#
#
#
#
inventory=[[0,3000]]
hand_item=0
flashlight_on=False
pygame.mouse.set_visible(False)
t1=False
alive=True
death_reason="Undefined"
hp=100
while run and alive and hp>0:
for event in pygame.event.get():
if event.type==pygame.QUIT:
run=False
keys=pygame.key.get_pressed()
if keys[pygame.K_ESCAPE]: run=False
mouse_rel=pygame.mouse.get_pos()
mouse_down=pygame.mouse.get_pressed()
pygame.mouse.set_pos((900,450))
angle+=(mouse_rel[0]-900)/100
S1.fill((0,0,0))
win.blit(pygame.transform.scale(S2,(900,900)),(900,0))
sec_rays=[]
for i in range(200):
angle2=angle+i/100-1
distance=2
walkdistance=2
ray_x=x
ray_y=y
rayspeed=[cos(angle2)/2,sin(angle2)/2]
color=(255,255,255)
difrakcija=4
cbonus=[0,0,0]
dislimit=102
if 50<i<150 and flashlight_on:
dislimit=204
while color in [(255,255,255),(0,228,255),(0,0,1),(0,0,2),(0,0,3),(0,0,4)] and distance<dislimit:
try:
color=S2.get_at((round(ray_x),round(ray_y)))
except:
break
if color==(0,228,255):
sec_rays.append([i,40-150/distance,(0,228,255)])
rayspeed=[-i for i in rayspeed]
if color==(0,0,1):
ray_x+=38
ray_y+=14
if color==(0,0,2):
ray_x-=38
ray_y-=14
if color==(0,0,3):
ray_x-=4
ray_y-=209
rayspeed=[-i for i in rayspeed]
if color==(0,0,4):
ray_x+=2
ray_y+=209
rayspeed=[-i for i in rayspeed]
ray_x+=rayspeed[0]
ray_y+=rayspeed[1]
distance+=1
walkdistance+=1
if color==(255,255,255):
color=(0,0,0)
if i==100 and walkdistance>4:
if keys[pygame.K_UP] or keys[pygame.K_w]:
x+=cos(angle)*0.05
y+=sin(angle)*0.05
if keys[pygame.K_SPACE] and sprint_timer<1000:
x+=cos(angle)*0.15
y+=sin(angle)*0.15
sprint_timer+=3
elif sprint_timer>0:
sprint_timer-=1
elif walkdistance<=4:
x-=cos(angle)*0.05
y-=sin(angle)*0.05
distance=40-150/distance
color=[min(255,max(0,color[i]+cbonus[i])) for i in range(3)]
try:
if light_mode:
if dislimit==204:
disbonus=abs(100-i)/24
pygame.draw.line(S1,(max(color[0]-distance*(1.5+disbonus),0),max(color[1]-distance*(1.5+disbonus),0),max(color[2]-distance*(1.5+disbonus),0)),(i,distance),(i,100-distance))
else:
pygame.draw.line(S1,(max(color[0]-distance*3.5,0),max(color[1]-distance*3.5,0),max(color[2]-distance*3.5,0)),(i,distance),(i,100-distance))
else:
if dislimit==204:
disbonus=abs(100-i)/24
pygame.draw.line(S1,(max(color[0]-distance*(3.5+disbonus),0),max(color[1]-distance*(3.5+disbonus),0),max(color[2]-distance*(3.5+disbonus),0)),(i,distance),(i,100-distance))
else:
pygame.draw.line(S1,(max(color[0]-distance*5.5,0),max(color[1]-distance*5.5,0),max(color[2]-distance*5.5,0)),(i,distance),(i,100-distance))
except:
pass
for i in range(len(sec_rays)):
c2=(min(255,max(sec_rays[i][2][0]-sec_rays[i][1]*5,0)),min(255,max(sec_rays[i][2][1]-sec_rays[i][1]*5,0)),min(255,max(sec_rays[i][2][2]-sec_rays[i][1]*5,0)))
Sags.fill((0,0,0))
Sags.set_alpha(int(max(0,30-sec_rays[i][1]))*6)
pygame.draw.line(Sags,c2,(0,sec_rays[i][1]),(0,100-sec_rays[i][1]))
S1.blit(Sags,(sec_rays[i][0],0))
pcolor=S2.get_at((round(x),round(y)))
if pcolor==(0,228,255):
angle+=pi
bounced=not bounced
btimer=0
t1=True
elif pcolor==(0,0,1):
x+=38
y+=14
t1=True
elif pcolor==(0,0,2):
x-=38
y-=14
t1=True
elif pcolor==(0,0,3):
x-=4
angle+=pi
y-=209
t1=True
elif pcolor==(0,0,4):
x+=2
angle+=pi
y+=209
t1=True
elif t1:
t1=False
else:
if bounced:
btimer+=1
if btimer>800:
alive=False
death_reason="Mirror"
Glassground.set_alpha(int(btimer/4))
S1.blit(Glassground,(0,0))
true_x=(x+0.5)*18
true_y=(y+0.5)*18
if len(inventory)>0:
item=inventory[hand_item]
if item[0]==0: #Flashlight
if mouse_down[0]:
item[1]-=1
flashlight_on=True
else:
flashlight_on=False
if item[1]<=0:
inventory.remove(item)
flashlight_on=False
pygame.draw.rect(S1,(255,255,0),(198,25,1,50*item[1]/3000))
pygame.draw.rect(S1,(255,0,255),(199,25,2,50-50*sprint_timer/1000))
pygame.draw.rect(S1,(0,255,255),(199,75,2,25-25*btimer/800))
pygame.draw.circle(win,(255,0,0),(true_x+900,true_y),5)
win.blit(pygame.transform.scale(S1,(1800,900)),(0,0))
pygame.display.update()
if alive==False:
if death_reason=="Mirror":
S1.blit(font.render("Tu tiki Spogulots",1,(0,114,125)),(30,30))
win.blit(pygame.transform.scale(S1,(1800,900)),(0,0))
while run:
for event in pygame.event.get():
if event.type==pygame.QUIT:
run=False
pygame.display.update()
pygame.quit()
| makazis/Five-Nights-at-AVG | FNAG.py | FNAG.py | py | 6,701 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pygame.init",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "pygame.display.set_mode",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "pygame.Surface",... |
40471160368 | import cv2
from cv2 import HOUGH_GRADIENT
import numpy as np
from matplotlib import pyplot as plt
sct_img = cv2.imread('coins.png')
def nothing(x):
pass
H = 103
S = 255
V = 255
Hl = 0
Sl = 191
Vl = 119
while True:
hsv = cv2.cvtColor(sct_img, cv2.COLOR_RGB2HSV)
cv2.imshow('image',hsv)
lower_blue = np.array([Hl,Sl,Vl])
upper_blue = np.array([H,S,V])
mask = cv2.inRange(hsv, lower_blue, upper_blue)
# Find the circle blobs on the binary mask:
contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# Use a list to store the center and radius of the target circles:
detectedCircles = []
for i, c in enumerate(contours):
# Approximate the contour to a circle:
(x, y), radius = cv2.minEnclosingCircle(c)
# Compute the center and radius:
center = (int(x), int(y))
radius = int(radius)
if radius > 20 and radius < 40:
# Draw the circles:
cv2.circle(sct_img, center, radius, (0, 0, 255), 2)
cv2.rectangle(sct_img, (center[0] - 5, center[1] - 5), (center[0] + 5, center[1] + 5), (0, 128, 255), -1)
# Store the center and radius:
detectedCircles.append([center, radius])
| GitRekton/EETAC-Applied-Image-Processing | otherMethod.py | otherMethod.py | py | 1,289 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "cv2.imread",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_RGB2HSV",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "cv2.imshow",
"line_... |
10053148775 | from typing import Any, Dict, Optional, Tuple
import xml.etree.ElementTree as ET
import chc.util.IndexedTable as IT
def has_control_characters(s: str) -> bool:
for c in s:
if ord(c) < 32 or ord(c) > 126:
return True
else:
return False
def byte_to_string(b: int) -> str:
return "{:02x}".format(b)
def value_from_hex(s: str) -> int:
return int(s, 16)
def hexstring(s: str) -> str:
result = ""
for c in s:
result += byte_to_string(ord(c))
return result
def dehexstring(h: str) -> str:
h = h[:]
result = ""
try:
for i in range(int(len(h) / 2)):
result += chr(int(h[:2], 16))
h = h[2:]
return result
except BaseException:
print("Error in dehexing string: " + h)
exit(1)
def decode(ishex: bool, h: str) -> str:
if ishex:
return dehexstring(h)
else:
return h
def encode(s: str) -> Tuple[bool, str]:
if has_control_characters(s):
return (True, hexstring(s))
else:
return (False, s)
class IndexedTableError(Exception):
def __init__(self, msg: str) -> None:
self.msg = msg
def __str__(self) -> str:
return self.msg
class StringIndexedTable(IT.IndexedTableSuperclass):
def __init__(self, name: str) -> None:
IT.IndexedTableSuperclass.__init__(self, name)
self.stringtable: Dict[str, int] = {} # string -> index
self.indextable: Dict[int, str] = {} # index -> string
self.next = 1
def reset(self) -> None:
self.stringtable = {}
self.indextable = {}
self.next = 1
def add(self, s: Optional[str]) -> int:
if s is None:
print("Attempt to index None in string table")
raise IndexedTableError(self.name + ": Attempt to index None")
if s in self.stringtable:
return self.stringtable[s]
else:
index = self.next
self.stringtable[s] = index
self.indextable[index] = s
self.next += 1
return index
def size(self) -> int:
return self.next - 1
def retrieve(self, index: int) -> str:
if index in self.indextable:
return self.indextable[index]
else:
msg = (
"Unable to retrieve item "
+ str(index)
+ " from table "
+ self.name
+ " (size: "
+ str(self.size())
+ ")"
)
raise IndexedTableError(
msg + "\n" + self.name + ", size: " + str(self.size())
)
def read_xml(self, node: Optional[ET.Element]) -> None:
if node is None:
print("Xml node not present in string table")
raise IndexedTableError("string table")
for snode in node.findall("n"):
xml_ix = snode.get("ix")
if xml_ix is None:
raise IndexedTableError("`ix` missing from element")
index = int(xml_ix)
ishex = snode.get("hex", "no") == "yes"
xml_v = snode.get("v")
if xml_v is None:
raise IndexedTableError("`v` missing from element")
s = decode(ishex, xml_v)
self.stringtable[s] = index
self.indextable[index] = s
if index >= self.next:
self.next = index + 1
def write_xml(self, node: ET.Element) -> None:
for index in sorted(self.indextable):
s = self.indextable[index]
(ishex, sencoded) = encode(s)
snode = ET.Element("n")
snode.set("v", sencoded)
snode.set("ix", str(index))
node.append(snode)
def __str__(self) -> str:
lines = []
lines.append("\nstring-table")
for ix in sorted(self.indextable):
lines.append(str(ix).rjust(4) + " " + str(self.indextable[ix]))
return "\n".join(lines)
if __name__ == "__main__":
print(str(has_control_characters("\n")))
print(str(has_control_characters("string")))
print(hexstring("\n\n"))
print(dehexstring("0a0a"))
print(decode(*encode("string")))
print(decode(*encode("\n\n")))
print(dehexstring("4d4158504154484c454e3d25640a"))
print(
dehexstring(
"496e7075742070617468203d2025732c207374726c656e287061746829203d2025640a"
)
)
print(dehexstring("4d4158504154484c454e203d2025640a"))
| static-analysis-engineering/CodeHawk-C | chc/util/StringIndexedTable.py | StringIndexedTable.py | py | 4,513 | python | en | code | 20 | github-code | 1 | [
{
"api_name": "typing.Tuple",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "chc.util.IndexedTable.IndexedTableSuperclass",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "chc.util.IndexedTable",
"line_number": 66,
"usage_type": "name"
},
{
... |
73501757154 | """
Author: Brian Mascitello
Date: 12/16/2017
Websites: http://adventofcode.com/2017/day/14
Info: --- Day 14: Disk Defragmentation ---
--- Part Two ---
"""
import copy
from functools import reduce
def construct_dense_hash(sparse_hash):
constructed_hash = list()
groups_of_sixteen = [sparse_hash[index:index + 16] for index in range(0, len(sparse_hash), 16)]
for group in groups_of_sixteen:
xor = reduce((lambda x, y: x ^ y), group)
constructed_hash.append(xor)
return constructed_hash
def determine_ordinals_list(input_data):
data_length_list = list()
for item in input_data:
temp_list = list()
for char in item:
temp_list.append(ord(char))
temp_list.extend([17, 31, 73, 47, 23])
data_length_list.append(temp_list)
return data_length_list
def find_valid_neighbors(x, y, maximum):
neighbors = set()
neighbors.add((x - 1, y))
neighbors.add((x + 1, y))
neighbors.add((x, y - 1))
neighbors.add((x, y + 1))
remove_set = neighbors.copy()
for coordinates in neighbors:
for position in coordinates:
if position < 0 or position >= maximum:
if coordinates in remove_set:
remove_set.remove(coordinates)
return remove_set
def get_data(input_text):
with open(input_text) as file:
data_from_file = file.read()
file.close()
return data_from_file
def knot_hash(input_list, length_list, rounds):
copy_input_list = copy.copy(input_list)
current_position = 0
skip_size = 0
while rounds:
for item in length_list:
end_position = (current_position + item) % len(copy_input_list)
if current_position <= end_position:
copy_input_list[current_position: end_position] = list(
reversed(copy_input_list[current_position: end_position]))
else:
sub_list = copy_input_list[current_position:]
sub_list_first_size = len(sub_list)
sub_list.extend(copy_input_list[:end_position])
sub_list.reverse() # Reverses the sub_list before modifying input_list values.
copy_input_list[current_position:] = sub_list[:sub_list_first_size]
copy_input_list[:end_position] = sub_list[sub_list_first_size:]
current_position = (end_position + skip_size) % len(copy_input_list)
skip_size += 1
rounds -= 1
return copy_input_list
def list_to_hex_string(list_of_numbers):
hex_str = ''
for number in list_of_numbers:
hex_str += hex(number)[2:].zfill(2) # Forward fill hex with 0 if too small.
return hex_str
def make_data_list(input_data, rounds):
output_list = list()
for index in range(0, rounds):
output_list.append(input_data + '-' + str(index))
return output_list
standard_list = list(range(256))
# data = 'flqrgnkx' # test 8108
data = 'jxqlasbh'
data_list = make_data_list(data, 128)
lengths_list = determine_ordinals_list(data_list)
knots_list = list()
for length in lengths_list:
knots_list.append(knot_hash(standard_list, length, 64))
dense_list = list()
for knot in knots_list:
dense_list.append(construct_dense_hash(knot))
hex_list = list()
for dense in dense_list:
hex_list.append(list_to_hex_string(dense))
binary_list = list()
for hex_value in hex_list:
temp_bin = list()
for character in hex_value:
# https://stackoverflow.com/questions/1425493/convert-hex-to-binary
temp_bin.append(bin(int(character, 16))[2:].zfill(4))
binary_list.append(''.join(temp_bin))
ones_locations = set()
for row, line in enumerate(binary_list):
for column, character in enumerate(line):
if character == '1':
ones_locations.add((row, column))
regions = 0
while ones_locations:
new_region = [ones_locations.pop()]
regions += 1
while new_region:
old_locations = new_region.copy()
new_region = list()
for row, column in old_locations:
for pair in find_valid_neighbors(row, column, len(binary_list)):
if pair in ones_locations:
ones_locations.remove(pair)
new_region.append(pair)
print(regions) # 1182
| Brian-Mascitello/Advent-of-Code | Advent of Code 2017/Day 14 2017/Day14Q2 2017.py | Day14Q2 2017.py | py | 4,323 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "functools.reduce",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "copy.copy",
"line_number": 56,
"usage_type": "call"
}
] |
75082514912 | import os
from collections import Counter
from string import punctuation
import glob
import re
class Transcript:
"""This class is a representation of a transcript following the norm from
the CHILDES database. As long as the raw transcript file follows that norm,
all kinds of informations and extractions from the transcript should be
obtainable through the class methods and variables."""
def __init__(self, filepath):
try:
file = open(filepath, 'r', encoding='utf-8')
self.name = os.path.basename(file.name)
# store the raw transcript, but clean it a little bit
self.raw_transcript = file.read()
remove_list = ['\t', '\r']
for item in remove_list:
self.raw_transcript = self.raw_transcript.replace(item, '')
# extract headerlines and transcriptlines
text = self.raw_transcript.split('\n')
self.headers = []
self.lines = []
for line in text:
if line.startswith('@'):
self.headers.append(line)
elif line.startswith('*') or line.startswith('%'):
self.lines.append(line)
else: # continuation of previous line
if not self.lines:
self.headers[-1] = self.headers[-1] + line
else:
self.lines[-1] = self.lines[-1] + line
self.fully_loaded = True # flag that transcript is fully loaded
except IOError as e:
self.fully_loaded = False # flag that transcript was not loaded
print('An error occured when loading:', filepath)
print('Error message:', e)
def lines_as_tuples(self, speakers='all', annotations=False,
as_blocks=False):
"""Return a list of tuples of all utterance lines, where tuple[0] is
the three letter initials for the speaker and tuple[1] is the line. One
or more speakers can be specified to retrieve just lines by these and
one or more flags can be marked to get annotations for the requested
speaker(s). If as_blocks is flagged, the lines along with their
annotations are passed in lists."""
if speakers == 'all':
speakers = self.speakers()
if type(speakers) == str:
speakers = [speakers]
# check if the requested speakers are present in the transcript
# and report if they are not
for speaker in speakers:
if speaker not in self.speakers():
print(f'WARNING: The speaker {speaker} is not present ' +
f'in the transcript {self.name}.')
# make list with lines as three part tuples
tuples = [(line[0], line[1:4], line[5:]) for line in self.lines]
# divide into blocks of turns with their annotations
blocks = []
for line in tuples:
if line[0] == '*':
blocks.append([(line[1], line[2])])
elif line[0] == '%' and annotations == True:
blocks[-1].append((line[1], line[2]))
blocks = [block for block in blocks if block[0][0] in speakers]
if as_blocks:
return blocks
# put together the blocks of the requested speakers with annotations
# if requested
tuples = []
for block in blocks:
if block[0][0] in speakers and not annotations:
tuples += [line for line in block if line[0] in speakers]
elif block[0][0] in speakers and annotations:
tuples += [line for line in block]
return tuples
def tokens(self, speakers='all'):
"""Return a list of tokens uttered by the specified speaker(s). If no
speakers are specified, return tokens for all speakers."""
if speakers == 'all':
speakers = self.speakers()
if type(speakers) == str:
speakers = [speakers]
# get tokens from the specified speakers
tuples = self.lines_as_tuples(speakers)
tokens = [word.lower() for tpl in tuples for word in tpl[1].split()]
# clean for punctuation
tokens = ' '.join(tokens)
tokens = ''.join(c for c in tokens if c not in punctuation)
tokens = tokens.split()
return tokens
def types(self, speakers='all'):
"""Return a list of types uttered by the specified speaker(s). If no
speakers are specified, return types for all speakers."""
return set(self.tokens(speakers=speakers))
def ttr(self, speakers='all', disregard=()):
"""Return the type-to-token-ratio of the transcript in whole. Pass
specific speaker(s) to get it for only that/these speaker(s). A list of
words to be disregarded in the calculation, e.g. function words, can be
passed if needed."""
tokens = [word for word in self.tokens(speakers=speakers)
if word not in disregard]
types = set(tokens)
return len(types) / len(tokens)
def mlu(self, speaker='CHI', disregard=('www', 'yyy', 'xxx')):
"""Return the MLU for the given speaker, the target child as default,
in the transcript. """
blocks = self.lines_as_tuples(speakers=speaker, annotations=True,
as_blocks=True)
# filter out utterances containing the disregarded words
lines = []
for block in blocks:
unclear = False
for word in disregard:
if word in block[0][1]:
unclear = True
if not unclear:
lines += block
annotation = [clean_line(line[1]) for line in lines if line[0] == 'mor']
morphemes = []
for line in annotation:
words = line.split()
for word in words:
word = re.split('[-~#]', word)
morphemes += word
return len(morphemes) / len(annotation)
def word_freqs(self, speakers='all'):
"""Return a Counter object of tokens uttered by the specified
speaker(s). If no speakers are specified, return a Counter object for
all speakers."""
return Counter(self.tokens(speakers=speakers))
def prop_word_freqs(self, speakers='all'):
"""Return a dict of words and their proportional frequencies."""
# get number of tokens and a list of tuples with words and frequencies
freqs = self.word_freqs(speakers=speakers)
tokens = sum(freqs.values())
freqs = freqs.most_common()
# make a dict with the word as key and prop freq as value
prop_freqs = {word[0]: word[1]/tokens for word in freqs}
return prop_freqs
def speakers(self):
"""Return a list of all speakers that appear in the transcript"""
return list({line[1:4] for line in self.lines if line.startswith('*')})
def speaker_details(self):
"""Return a dictionary of dictionaries containing details about the
given speaker(s). If no info is given in the original transcript file
on some details, e.g. age or sex, those entries will simply be empty.
The entries are: lang, corp, name, age, sex, role. As an example, the
child's age is called by transcript.speaker_details()['CHI']['age']"""
# find the ID lines from the header lines and split these
ids = [id_str for id_str in self.headers if id_str.startswith('@ID')]
ids = [entry[4:].split(sep='|') for entry in ids]
# assign the values to their respective dict entries
ids = [{'lang': entry[0], 'corp': entry[1], 'name': entry[2],
'age': entry[3], 'sex': entry[4], 'role': entry[7]}
for entry in ids]
# create a dict with names as keys and the dicts as values
ids = {entry['name']: entry
for entry in ids if entry['name'] in self.speakers()}
return ids
def children(self):
"""Return a list of the target child(ren) in the transcript."""
children = [entry['name'] for entry in self.speaker_details().values()
if entry['role'] == 'Target_Child']
return children
def load_all_from_dir(dirname):
"""Return a list of Transcript objects loaded from the given directory
sorted after file names. The directory name should be stated either as
relative path from the working directory or as an absolute path."""
prev_dir = os.getcwd()
os.chdir(dirname)
# load all transcripts from the folder and clean out non-loaded ones
trans = [Transcript(file) for file in glob.glob('*.cha')]
trans = [trn for trn in trans if trn.fully_loaded]
# make sure the list is sorted
trans.sort(key=lambda x: x.name)
os.chdir(prev_dir)
return trans
def age_in_months(age):
"""Return an age passed in the format y;mm.dd as the number of months with
two decimal numbers."""
# split the passed age string at the specified characters
y_md = age.split(';')
m_d = y_md[1].split('.')
# convert each number to a float
years = float(y_md[0])
months = float(m_d[0])
# in case days is not specified, assign 0
try:
days = float(m_d[1])
except:
days = 0
# calculate number of months
total = years * 12 + months + days / 30
return float(f'{total:.2f}')
def clean_line(line):
"""Clean a string for ' .', ' ?', ' !'"""
remove_list = [' .', ' ?', ' !']
for item in remove_list:
line = line.replace(item, '')
return line
def word_freqs_all(transcripts, speakers='all'):
"""Return one Counter object of all transcripts passed counting only
utterances from the specified speaker(s)."""
counters = [trn.word_freqs(speakers=speakers) for trn in transcripts]
counter_all = Counter()
for counter in counters:
counter_all.update(counter)
return counter_all
def basic_stats(transcript: Transcript, speakers='CHI'):
"""Return a tuple containing age in months, number of tokens, number of
types and TTR."""
age = age_in_months(transcript.speaker_details()['CHI']['age'])
tokens = len(transcript.tokens(speakers='CHI'))
types = len(transcript.types(speakers='CHI'))
ttr = transcript.ttr(speakers=speakers)
return age, tokens, types, ttr
| KasperFyhn/ChildLangAcqui | src/childes_transcripts.py | childes_transcripts.py | py | 10,751 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.path.basename",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "string.punctuation",
"line_number": 109,
"usage_type": "name"
},
{
"api_name": "re.split",
"lin... |
32196464996 | """
Scraper for fire alerts in Los Angeles
http://groups.google.com/group/LAFD_ALERT/
RSS: http://groups.google.com/group/LAFD_ALERT/feed/rss_v2_0_msgs.xml?num=50
"""
from ebdata.retrieval.scrapers.base import ScraperBroken
from ebdata.retrieval.scrapers.list_detail import RssListDetailScraper, SkipRecord
from ebdata.retrieval.scrapers.newsitem_list_detail import NewsItemListDetailScraper
from ebpub.db.models import NewsItem
import datetime
import re
class AlertScraper(NewsItemListDetailScraper, RssListDetailScraper):
schema_slugs = ('fire-alerts',)
has_detail = False
sleep = 4
def list_pages(self):
yield self.get_html('http://groups.google.com/group/LAFD_ALERT/feed/rss_v2_0_msgs.xml?num=50')
def clean_list_record(self, rss_record):
record = {
'pub_date': datetime.date(*rss_record.pop('updated_parsed')[:3]),
'summary': rss_record['summary'].strip(),
}
if re.search(r'^(?i)\*UPDATE:', record['summary']):
m = re.search(r'^\*UPDATE:\s*(?P<location_name>[^\*]*)\*\s*(?P<description>.*)\s*-\s*(?P<reporter>.*?)\#\#\#$', record['summary'])
if not m:
self.logger.warn('Could not parse update %r' % record['summary'])
raise SkipRecord('Could not parse update %r' % record['summary'])
record.update(m.groupdict())
record.update({
'is_update': True,
'incident_type': '',
'fire_station': '',
'radio_channels': '',
'incident_time': '',
})
else: # Not an update
m = re.search(r'^\*(?P<incident_type>[^\*]*)\*\s*(?P<location_name>[^;]*);\s*MAP (?:\d+[- ]\w\d)?;\s*FS (?P<fire_station>\d+); (?P<description>.*?); Ch:(?P<radio_channels>[\d, ]+)\s*@(?P<incident_time>\d\d?:\d\d [AP]M)?\s*-(?P<reporter>.*?)\#\#\#$', record['summary'])
if not m:
raise SkipRecord('Could not parse %r' % record['summary'])
record.update(m.groupdict())
record['incident_type'] = record['incident_type'].upper() # Normalize
record['radio_channels'] = ','.join(record['radio_channels'].split(','))
record['is_update'] = False
record['description'] = record['description'].replace(' ', ' ').replace('"', '"').replace('&', '&').strip()
record['location_name'] = record['location_name'].strip()
# Get the incident ID and message ID from the Google Groups URL.
# We'll use these as unique identifiers.
m = re.search(r'browse_thread/thread/(?P<incident_id>[^/]*)/(?P<message_id>[^\?]*)\?', rss_record['link'])
if not m:
raise ScraperBroken('Got weird URL: %r', rss_record['link'])
record.update(m.groupdict())
record['link'] = rss_record['link']
# I can't figure out why this record is causing errors, so for now
# we'll just skip it.
if record['message_id'] == '0faabeab3aad8492':
raise SkipRecord()
return record
def existing_record(self, record):
try:
qs = NewsItem.objects.filter(schema__id=self.schema.id)
qs = qs.by_attribute(self.schema_fields['message_id'], record['message_id'])
return qs[0]
except IndexError:
return None
def save(self, old_record, list_record, detail_record):
if old_record is not None:
return
incident_type = self.get_or_create_lookup('incident_type', list_record['incident_type'], list_record['incident_type'], make_text_slug=False)
reporter = self.get_or_create_lookup('reporter', list_record['reporter'], list_record['reporter'])
fire_station = self.get_or_create_lookup('fire_station', list_record['fire_station'], list_record['fire_station'])
attributes = {
'incident_type': incident_type.id,
'description': list_record['summary'],
'reporter': reporter.id,
'fire_station': fire_station.id,
'incident_time': list_record['incident_time'],
'incident_id': list_record['incident_id'],
'message_id': list_record['message_id'],
}
if list_record['is_update']:
title = 'Update' # TODO: Better title that takes into account the incident type.
else:
title = incident_type.name
self.create_newsitem(
attributes,
title=title,
description=list_record['description'],
url=list_record['link'],
item_date=list_record['pub_date'],
location_name=list_record['location_name'],
)
if __name__ == "__main__":
from ebdata.retrieval import log_debug
AlertScraper().update()
| brosner/everyblock_code | everyblock/everyblock/cities/la/fire_alerts/retrieval.py | retrieval.py | py | 4,793 | python | en | code | 130 | github-code | 1 | [
{
"api_name": "ebdata.retrieval.scrapers.newsitem_list_detail.NewsItemListDetailScraper",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "ebdata.retrieval.scrapers.list_detail.RssListDetailScraper",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "datetime.date... |
33653947346 | # encoding: utf-8
import argparse
import os
import sys
import torch
from torch.backends import cudnn
sys.path.append('.')
from config import cfg
from data import make_data_loader
from engine.trainer import do_train, do_train_with_center
from modeling import build_model
from layers import make_loss, make_loss_with_center
from solver import make_optimizer, make_optimizer_with_center, WarmupMultiStepLR
from utils.logger import setup_logger
def train(cfg):
# prepare dataset
train_loader, val_loader, num_query, num_classes, clustering_loader = make_data_loader(cfg)
# prepare model
model = build_model(cfg, num_classes)
if cfg.MODEL.IF_WITH_CENTER == 'on':
loss_func, center_criterion_part, center_criterion_global, center_criterion_fore = make_loss_with_center(cfg, num_classes)
optimizer, optimizer_center = make_optimizer_with_center(cfg, model, center_criterion_part, center_criterion_global, center_criterion_fore)
else:
loss_func = make_loss(cfg, num_classes)
optimizer = make_optimizer(cfg, model)
# Add for using self trained model
if cfg.MODEL.PRETRAIN_CHOICE == 'imagenet':
start_epoch = 0
scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR,
cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD)
else:
print('Only support pretrain_choice for imagenet, but got {}'.format(cfg.MODEL.PRETRAIN_CHOICE))
if cfg.MODEL.IF_WITH_CENTER == 'on':
do_train_with_center(
cfg,
model,
center_criterion_part,
center_criterion_global,
center_criterion_fore,
train_loader,
val_loader,
optimizer,
optimizer_center,
scheduler, # modify for using self trained model
loss_func,
num_query,
start_epoch, # add for using self trained model
clustering_loader
)
else:
do_train(
cfg,
model,
train_loader,
val_loader,
optimizer,
scheduler, # modify for using self trained model
loss_func,
num_query,
start_epoch, # add for using self trained model
clustering_loader
)
def main():
parser = argparse.ArgumentParser(description="ReID Baseline Training")
parser.add_argument(
"--config_file", default="", help="path to config file", type=str
)
parser.add_argument("opts", help="Modify config options using the command-line", default=None,
nargs=argparse.REMAINDER)
args = parser.parse_args()
num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
if args.config_file != "":
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
output_dir = cfg.OUTPUT_DIR
if output_dir and not os.path.exists(output_dir):
os.makedirs(output_dir)
logger = setup_logger("reid_baseline", output_dir, 0)
logger.info("Using {} GPUS".format(num_gpus))
logger.info(args)
if args.config_file != "":
logger.info("Loaded configuration file {}".format(args.config_file))
with open(args.config_file, 'r') as cf:
config_str = "\n" + cf.read()
logger.info(config_str)
logger.info("Running with config:\n{}".format(cfg))
if cfg.MODEL.DEVICE == "cuda":
os.environ['CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID # new add by gu
cudnn.benchmark = True
train(cfg)
if __name__ == '__main__':
main()
| CASIA-IVA-Lab/ISP-reID | tools/train.py | train.py | py | 3,739 | python | en | code | 90 | github-code | 1 | [
{
"api_name": "sys.path.append",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "data.make_data_loader",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "config.cfg",
... |
30130945625 | from ExtendDT import ext_dt
from Sequence import *
#> Find the optimal sequence for a single node when its children are optimized
def PruneOptimalSingle(xdt, main_seq, root_id):
current_seq = copy.copy(main_seq[root_id])
#* if optimized is not available, make it
if not current_seq.optimized:
#* Begin of optimization procedure ==============================
print(f"[{root_id} (d:{xdt.node_depth[root_id]})] | Opting ===>")
# * check this is a leaf
if xdt.is_leaf[root_id]:
combined_seq = PruneSequence()
combined_seq.OptFinished()
current_seq = combined_seq
else:
#* Find children IDs
left_root = xdt.children[root_id][0]
right_root = xdt.children[root_id][1]
#* Get optimal sequence of children
lef_seq, node_id = PruneOptimalSingle(xdt, main_seq, left_root)
right_seq, node_id = PruneOptimalSingle(xdt, main_seq, right_root)
#* Combine children sequences with each other
combined_seq = CombineSequence(lef_seq, right_seq, root_id)
# * Append combined seq to the sequence of root
current_seq.AppendSequence(combined_seq)
#+ Tuple of changing root into a leaf
current_tuple = [xdt.abs_cost[root_id], xdt.miss_class[root_id], [root_id]]
#* Append this to the sequence
current_seq.AppendTuple(current_tuple)
#* Set register to finished
current_seq.OptFinished()
#* End of optimization procedure ==============================
print(f"[{root_id}] ===> Ready |")
return current_seq, root_id
#> Function for finding the optimal sequence of a node recursively
def PruneOptimal(xdt, goal_node):
#* Get depth list
xdt.getNodeDepth()
depth_list = xdt.nodes_in_depth
#* get the cost factors
xdt.getAbsCostFactor()
#* Find the children relations
xdt.getChildren()
#* Make the empty sequence for all
main_seq = [PruneSequence()] * xdt.n_all_nodes
#* Go through all depth from bottom to top
for depth_row in range(len(depth_list)-1, -1, -1):
row_list = depth_list[depth_row]
#* Go through nodes in this depth
for node_in_list in row_list:
current_seq, node_id = PruneOptimalSingle(xdt, main_seq, node_in_list)
main_seq[node_in_list] = current_seq
if node_in_list == goal_node:
break
if node_in_list == goal_node:
break
return main_seq[goal_node]
#> Function for finding the optimal sequence of a node recursively
def PruneOptimalParallel(xdt, goal_node):
#* Load multi-processor pckg
import multiprocessing as mp
#* Get depth list
xdt.getNodeDepth()
depth_list = xdt.nodes_in_depth
#* get the cost factors
xdt.getAbsCostFactor()
#* Find the children relations
xdt.getChildren()
#* Make the empty sequence for all
main_seq = [PruneSequence()] * xdt.n_all_nodes
#+ Is goal reached
goal_reached = False
#* Start the pool
num_processor = mp.cpu_count()
pool = mp.Pool(processes = int(num_processor/2))
#* Go through all depth from bottom to top
for depth_row in range(len(depth_list)-1, -1, -1):
#* Go through nodes in this depth
row_list = depth_list[depth_row]
#* Run the pool in this depth
result_async = [pool.apply_async(PruneOptimalSingle, args = (xdt, main_seq, node_in_list)) for node_in_list in row_list]
#* Get the pool results
for worker_idx in result_async:
current_seq = worker_idx.get()[0]
current_node = worker_idx.get()[1]
main_seq[current_node] = current_seq
if current_node == goal_node:
goal_reached = True
if goal_reached:
break
pool.close()
# pool.join()
return main_seq[goal_node]
#> A function to get the
def get_ORAP_sequence(dt_base, **kwargs):
# parse optional input
use_parallel = False # by default use a single core
for key, value in kwargs.items():
# if use_parallel passed in set its value
if key == "use_parallel":
use_parallel = value
#* Build the extended tree object
xdt = ext_dt(dt_base)
#* Calculate the optimal sequence for the root of the whole DT
goal_node = 0
#* Decide about number of processors
if use_parallel:
goal_seq = PruneOptimalParallel(xdt, goal_node)
else:
goal_seq = PruneOptimal(xdt, goal_node)
#* Get the sequence for root (complete sequence)
ORAP_base_sequence = copy.copy(goal_seq)
#* Sort sequence according to the cost
sort_factor = "cost"
ORAP_base_sequence.SortSequence(sort_factor)
#* Add the original tree to the sequence
ORAP_base_sequence.cost.insert(0, 0)
ORAP_base_sequence.miss.insert(0, 0)
ORAP_base_sequence.removed.insert(0, [])
return ORAP_base_sequence
#>A function to remove a series of nodes from a base DT
def series_prune(base_dt, node_series):
pruned_xdt = ext_dt(copy.deepcopy(base_dt))
#* Step by step remove branches
for prune_node in node_series:
current_dt = pruned_xdt.removeBranch(prune_node)
pruned_xdt = ext_dt(current_dt)
return pruned_xdt.base_dt | masoudinejad/dt_pruning | orapMethod.py | orapMethod.py | py | 5,353 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "multiprocessing.cpu_count",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "multiprocessing.Pool",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "ExtendDT.ext_dt",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "Exten... |
27666434875 | import numpy as np
import os
import cv2
from sklearn.feature_extraction import image
import random
import csv
import shutil
def colors_new_train_patches_to_npy_file():
path = '/Users/eloymarinciudad/Downloads/colors_new/train'
label = {'black': 0, 'blue': 1, 'brown': 2, 'green': 3, 'grey': 4, 'orange': 5, 'pink': 6,
'purple': 7, 'red': 8, 'white': 9, 'yellow': 10}
labels = []
first = True
classes = os.listdir(path)
for color in classes:
subdir_class = path + '/' + color
print(subdir_class)
if os.path.isdir(subdir_class):
for imagen in os.listdir(subdir_class):
if os.path.isfile(subdir_class + '/' + imagen) and imagen.endswith('.jpg'):
bgr = cv2.imread(subdir_class + '/' + imagen)
img = bgr[:, :, [2, 1, 0]]
n = img.shape[0]
m = img.shape[1]
# mid_ima = img[int(n / 2 - n / 4): int(n / 2 + n / 4), int(m / 2 - m / 4): int(m / 2 + m / 4)]
patches = image.extract_patches_2d(img, (32, 32))
random_index = random.randrange(len(patches))
patch = patches[random_index]
r_max = max(patch[:, :, 0].flatten())
r_min = min(patch[:, :, 0].flatten())
g_max = max(patch[:, :, 1].flatten())
g_min = min(patch[:, :, 1].flatten())
b_max = max(patch[:, :, 2].flatten())
b_min = min(patch[:, :, 2].flatten())
count = 0
repeat = False
if (r_max - r_min >= 100 and g_max - g_min >= 100) or (
r_max - r_min >= 100 and b_max - b_min >= 100) or (
g_max - g_min >= 100 and b_max - b_min >= 100):
repeat = True
while repeat:
random_index = random.randrange(len(patches))
patch = patches[random_index]
r_max = max(patch[:, :, 0].flatten())
r_min = min(patch[:, :, 0].flatten())
g_max = max(patch[:, :, 1].flatten())
g_min = min(patch[:, :, 1].flatten())
b_max = max(patch[:, :, 2].flatten())
b_min = min(patch[:, :, 2].flatten())
count += 1
if (r_max - r_min >= 100 and g_max - g_min >= 100) or (
r_max - r_min >= 100 and b_max - b_min >= 100) or (
g_max - g_min >= 100 and b_max - b_min >= 100):
repeat = True
else:
repeat = False
if count == 10000:
break
if count > 50000:
print(
f'Count: {count}\nR: {r_min} to {r_max}\n G: {g_min} to {g_max}\n B: {b_min} to {b_max}\n')
cv2.imwrite(f'test_patch/patch_{imagen}', patch[:, :, [2, 1, 0]])
patch = cv2.cvtColor(patch, cv2.COLOR_RGB2LAB)
patch = patch / 255
img = np.reshape(patch, 32 * 32 * 3)
if first:
dataset = img
first = False
else:
dataset = np.vstack((dataset, img))
labels = np.asarray(labels)
np.save('colors_new_train_patches_data.npy', dataset)
np.save('colors_new_train_patches_labels.npy', labels)
def split_colors_new_dataset():
path = "/Users/eloymarinciudad/Downloads/colors_new_original"
classes = os.listdir(path)
print(classes)
num_of_images = 0
for color in classes:
subdir_class = path + '/' + color
print(subdir_class)
if os.path.isdir(subdir_class):
for imagen in os.listdir(subdir_class):
shutil.copy(subdir_class + '/' + imagen,
f'/Users/eloymarinciudad/Downloads/colors_new/{color}_{imagen}')
num_of_images += 1
dest_path = '/Users/eloymarinciudad/Downloads/colors_new'
imagenes = os.listdir(dest_path)
random_index_list = random.sample(range(num_of_images), round(num_of_images * 0.2))
test_list = random_index_list[:round(len(random_index_list) / 2)]
val_list = random_index_list[round(len(random_index_list) / 2):]
ima_index = 0
for imagen in imagenes:
if imagen.endswith('.jpg'):
color = imagen.split(sep='_')[0]
if ima_index in test_list:
shutil.move(dest_path + '/' + imagen, dest_path + f'/test/{color}/' + imagen)
elif ima_index in val_list:
shutil.move(dest_path + '/' + imagen, dest_path + f'/validation/{color}/' + imagen)
else:
shutil.move(dest_path + '/' + imagen, dest_path + f'/train/{color}/' + imagen)
ima_index += 1
def load_termisk_reduced():
path = "/content/termisk_dataset"
# path = "/Users/eloymarinciudad/Downloads/termisk_dataset"
split_paths = os.listdir(path)
print(split_paths)
labels = []
index_label = 0
first = True
for split in split_paths:
subdir = path + '/' + split
print(subdir)
if os.path.isdir(subdir):
classes = os.listdir(subdir)
classes = [s for s in classes if '.' not in s]
print(classes)
for label in classes:
class_path = subdir + '/' + label
count = 0
if label not in ('17', '3', '0', '1', '2', '4', '7', '9', '12', '13', '14', '16'):
for image in os.listdir(class_path):
if os.path.isfile(class_path + '/' + image) and image.endswith('.png'):
img = cv2.imread(class_path + '/' + image, cv2.IMREAD_GRAYSCALE)
img = cv2.resize(img, (64, 64), interpolation=cv2.INTER_AREA)
img = np.reshape(img, 64 * 64)
img = img / 255
labels.append(index_label)
if first:
dataset = img
first = False
else:
dataset = np.vstack((dataset, img))
count += 1
if count >= 1650:
break
index_label += 1
labels = np.asarray(labels)
return dataset, labels
def load_colors_new():
path = "/content/colors_new"
classes = os.listdir(path)
logs_file = open('/content/ClusterGAN/color_patches_logs.csv', 'w')
writer = csv.writer(logs_file)
writer.writerow(['ima_path', 'patch_index'])
print(classes)
labels = []
index_label = 0
first = True
for color in classes:
subdir_class = path + '/' + color
print(subdir_class)
if os.path.isdir(subdir_class):
for imagen in os.listdir(subdir_class):
if os.path.isfile(subdir_class + '/' + imagen):
bgr = cv2.imread(subdir_class + '/' + imagen)
# bgr = cv2.resize(bgr, (128, 128), interpolation=cv2.INTER_AREA)
# img = cv2.cvtColor(bgr, cv2.COLOR_BGR2LAB)
img = bgr[:, :, [2, 1, 0]]
n = img.shape[0]
m = img.shape[1]
mid_ima = img[int(n / 2 - n / 4): int(n / 2 + n / 4), int(m / 2 - m / 4): int(m / 2 + m / 4)]
patches = image.extract_patches_2d(mid_ima, (32, 32))
random_index = random.randrange(len(patches))
patch = patches[random_index]
patch = cv2.cvtColor(patch, cv2.COLOR_RGB2LAB)
patch = patch / 255
img = np.reshape(patch, 32 * 32 * 3)
labels.append(index_label)
# path, patch_index
writer.writerow([subdir_class + '/' + imagen, random_index])
if first:
dataset = img
first = False
else:
dataset = np.vstack((dataset, img))
index_label += 1
labels = np.asarray(labels)
logs_file.close()
return dataset, labels
def load_google_colors():
path = "/content/ClusterGAN/colors/google_colors"
dirs = os.listdir(path)
print(dirs)
labels = []
index_label = 0
first = True
for item in dirs:
subdir = path + '/' + item
print(subdir)
if os.path.isdir(subdir):
for image in os.listdir(subdir):
if os.path.isfile(subdir + '/' + image):
bgr = cv2.imread(subdir + '/' + image)
# img = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
img = bgr[:, :, [2, 1, 0]]
img = np.reshape(img, 32 * 32 * 3)
img = img / 255
labels.append(index_label)
if first:
dataset = img
first = False
else:
dataset = np.vstack((dataset, img))
index_label += 1
labels = np.asarray(labels)
return dataset, labels
def closest_node(node, nodes):
nodes = np.asarray(nodes)
deltas = nodes - node
dist_2 = np.einsum('ij,ij->i', deltas, deltas)
return dist_2.argsort()
def closest(X, p):
disp = X - p
return np.argmin((disp * disp).sum(1))
def sample_Z(batch, z_dim, sampler='one_hot', num_class=10, n_cat=1, label_index=None, save_label=False):
if sampler == 'mul_cat':
if label_index is None:
label_index = np.random.randint(low=0, high=num_class, size=batch)
return np.hstack((0.10 * np.random.randn(batch, z_dim - num_class * n_cat),
np.tile(np.eye(num_class)[label_index], (1, n_cat))))
elif sampler == 'one_hot':
if label_index is None:
label_index = np.random.randint(low=0, high=num_class, size=batch)
if save_label:
return np.hstack(
(0.10 * np.random.randn(batch, z_dim - num_class), np.eye(num_class)[label_index])), label_index
else:
return np.hstack((0.10 * np.random.randn(batch, z_dim - num_class), np.eye(num_class)[label_index]))
elif sampler == 'uniform':
return np.random.uniform(-1., 1., size=[batch, z_dim])
elif sampler == 'normal':
return 0.15 * np.random.randn(batch, z_dim)
elif sampler == 'mix_gauss':
if label_index is None:
label_index = np.random.randint(low=0, high=num_class, size=batch)
return (0.1 * np.random.randn(batch, z_dim) + np.eye(num_class, z_dim)[label_index])
def sample_labelled_Z(batch, z_dim, sampler='one_hot', num_class=10, n_cat=1, label_index=None):
if sampler == 'mul_cat':
if label_index is None:
label_index = np.random.randint(low=0, high=num_class, size=batch)
return label_index, np.hstack((0.10 * np.random.randn(batch, z_dim - num_class * n_cat),
np.tile(np.eye(num_class)[label_index], (1, n_cat))))
elif sampler == 'one_hot':
if label_index is None:
label_index = np.random.randint(low=0, high=num_class, size=batch)
return label_index, np.hstack(
(0.10 * np.random.randn(batch, z_dim - num_class), np.eye(num_class)[label_index]))
elif sampler == 'mix_gauss':
if label_index is None:
label_index = np.random.randint(low=0, high=num_class, size=batch)
return label_index, (0.1 * np.random.randn(batch, z_dim) + np.eye(num_class, z_dim)[label_index])
def reshape_mnist(X):
return X.reshape(X.shape[0], 28, 28, 1)
def clus_sample_Z(batch, dim_gen=20, dim_c=2, num_class=10, label_index=None):
if label_index is None:
label_index = np.random.randint(low=0, high=num_class, size=batch)
batch_mat = np.zeros((batch, num_class * dim_c))
for b in range(batch):
batch_mat[b, label_index[b] * dim_c:(label_index[b] + 1) * dim_c] = np.random.normal(loc=1.0, scale=0.05,
size=(1, dim_c))
return np.hstack((0.10 * np.random.randn(batch, dim_gen), batch_mat))
def clus_sample_labelled_Z(batch, dim_gen=20, dim_c=2, num_class=10, label_index=None):
if label_index is None:
label_index = np.random.randint(low=0, high=num_class, size=batch)
batch_mat = np.zeros((batch, num_class * dim_c))
for b in range(batch):
batch_mat[b, label_index[b] * dim_c:(label_index[b] + 1) * dim_c] = np.random.normal(loc=1.0, scale=0.05,
size=(1, dim_c))
return label_index, np.hstack((0.10 * np.random.randn(batch, dim_gen), batch_mat))
def sample_info(batch, z_dim, sampler='one_hot', num_class=10, n_cat=1, label_index=None):
if sampler == 'one_hot':
if label_index is None:
label_index = np.random.randint(low=0, high=num_class, size=batch)
return label_index, np.hstack(
(np.random.randn(batch, z_dim - num_class), np.eye(num_class)[label_index]))
elif sampler == 'mul_cat':
if label_index is None:
label_index = np.random.randint(low=0, high=num_class, size=batch)
return label_index, np.hstack((np.random.randn(batch, z_dim - num_class * n_cat),
np.tile(np.eye(num_class)[label_index], (1, n_cat))))
if __name__ == '__main__':
load_termisk_reduced()
| eloymc98/ClusterGAN | util.py | util.py | py | 14,002 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "os.listdir",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number":... |
19612913911 | import os
from os import listdir
from zipfile import ZipFile
import img2pdf
#Get Current Working Directory
zipfolder = os.getcwd()
# read all zip files in folder
for zip_files in os.listdir(zipfolder):# 這三行可以用來避免'.DS_Store' problem in Mac
if not zip_files.endswith(".zip"):
if not zip_files.endswith(".rar"):
continue
clean_name = os.path.splitext(zip_files)[0]
dirname = zipfolder+'/'+clean_name
# create a folder by zip_files's name
if zip_files.endswith('.zip'):
os.makedirs(clean_name)
# 把zip files的圖片unzip到上面建立的folder內
if zip_files.endswith('.zip'):
with ZipFile(zip_files, 'r') as zipObj:
zipObj.extractall(dirname)
# 把PDF放在當前folder內,二選一
with open(f"{clean_name}.pdf", "wb") as f:
imgs = []
for pdf_files in os.listdir(dirname):
if not pdf_files.endswith(".jpg"):
if not pdf_files.endswith(".png"):
continue
path = os.path.join(dirname, pdf_files)
if os.path.isdir(path):
continue
imgs.append(path)
imgs.sort()
f.write(img2pdf.convert(imgs))
# 把PDF放在圖片新開的folder內,二選一
# pdf_path = zipfolder+'/'+'/'+clean_name+'/'+clean_name+'.pdf'
# with open(pdf_path, "wb") as f:
# imgs = []
# for pdf_files in os.listdir(dirname):
# if not pdf_files.endswith(".jpg"):
# if not pdf_files.endswith(".png"):
# continue
# path = os.path.join(dirname, pdf_files)
# if os.path.isdir(path):
# continue
# imgs.append(path)
# imgs.sort()
# f.write(img2pdf.convert(imgs))
# can using `rm .DS_Store` to detele this file first then run these code
| jeddstudio/Qunzip | Qunzip.py | Qunzip.py | py | 1,877 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.getcwd",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path.splitext",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 15,... |
19774523735 | '''
LeetCode #701 - Insert Into a Binary Search Tree prompt:
You are given the root node of a binary search tree (BST) and a
value to insert into the tree. Return the root node of the BST
after the insertion. It is guaranteed that the new value does
not exist in the original BST.
Notice that there may exist multiple valid ways for the
insertion, as long as the tree remains a BST after insertion.
You can return any of them.
Example 1:
Input: root = [4,2,7,1,3], val = 5
Output: [4,2,7,1,3,5]
Explanation: Another accepted tree is:
Example 2:
Input: root = [40,20,60,10,30,50,70], val = 25
Output: [40,20,60,10,30,50,70,null,null,25]
Example 3:
Input: root = [4,2,7,1,3,null,null,null,null,null,null], val = 5
Output: [4,2,7,1,3,5]
Constraints:
The number of nodes in the tree will be in the range [0, 10^4].
-10^8 <= Node.val <= 10^8
All the values Node.val are unique.
-10^8 <= val <= 10^8
It's guaranteed that val does not exist in the original BST.
'''
from typing import Optional
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val: int = 0, left = None, right = None):
self.val = val
self.left = left # left < Parent Node
self.right = right # right > Parent Node
class Solution:
# Time complexity: O(log n) = O(h)
# Space complexity: O(log n)
def insertIntoBST(self, root: Optional[TreeNode], val: int) -> Optional[TreeNode]:
if not root: # if tree doesnt exist insert single node tree
return TreeNode(val) # & if parent node points to NULL insert node
if val < root.val: # iterate left
root.left = self.insertIntoBST(root.left, val)
else: # iterate right
root.right = self.insertIntoBST(root.right, val)
return root
# Ex 1:
# Input: root = [4,2,7,1,3], val = 5
# Output: [4,2,7,1,3,5]
root = TreeNode(4)
root.left = TreeNode(2)
root.left.left = TreeNode(1)
root.left.right = TreeNode(3)
root.right = TreeNode(7)
val = 5
result = Solution().insertIntoBST(root, val)
assert result.val == 4
assert result.left.val == 2
assert result.left.left.val == 1
assert result.left.right.val == 3
assert result.right.val == 7
assert result.right.left.val == 5
# Ex 2:
# Input: root = [40,20,60,10,30,50,70], val = 25
# Output: [40,20,60,10,30,50,70,null,null,25]
root = TreeNode(40)
root.left = TreeNode(20)
root.left.left = TreeNode(10)
root.left.right = TreeNode(30)
root.right = TreeNode(60)
root.right.left = TreeNode(50)
root.right.right = TreeNode(70)
val = 25
result = Solution().insertIntoBST(root, val)
assert result.val == 40
assert result.left.val == 20
assert result.left.left.val == 10
assert result.left.right.val == 30
assert result.right.val == 60
assert result.right.left.val == 50
assert result.right.right.val == 70
assert result.left.right.left.val == 25
# Ex 3:
# Input: root = [4,2,7,1,3,null,null,null,null,null,null], val = 5
# Output: [4,2,7,1,3,5]
root = TreeNode(4)
root.left = TreeNode(2)
root.left.left = TreeNode(1)
root.left.right = TreeNode(3)
root.right = TreeNode(7)
val = 5
result = Solution().insertIntoBST(root, val)
assert result.val == 4
assert result.left.val == 2
assert result.left.left.val == 1
assert result.left.right.val == 3
assert result.right.val == 7
assert result.right.left.val == 5
| Reddimus/LeetCode_Notes | Trees/LeetCode #701 - Insert into a Binary Search Tree.py | LeetCode #701 - Insert into a Binary Search Tree.py | py | 3,225 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "typing.Optional",
"line_number": 44,
"usage_type": "name"
}
] |
27474072980 | #!/usr/bin/env python3
import pprint, argparse, pickle, json
import maze
def fail(message):
return {
'score' : 0,
'output' : message,
'visibility': 'visible',
}
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description = 'CS440 MP1 Autograder',
formatter_class = argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--gradescope', default = False, action = 'store_true',
help = 'save output in gradescope-readable json file')
arguments = parser.parse_args()
try:
import search
except ImportError:
message = 'could not find module \'search\', did you upload `search.py`?'
if arguments.gradescope:
with open('results.json', 'w') as file:
file.write(json.dumps(fail(message)))
else:
print(message)
raise SystemExit
def generate_answer_key(path, mazes, solutions):
key_instructor = tuple({case: getattr(search, solution)(maze)
for case, maze in mazes.items()}
for mazes, solution in zip(mazes, solutions))
key_student = tuple({case: len(path) for case, path in part.items()}
for part in key_instructor)
pickle.dump(key_instructor, open(path['instructor'], 'wb'))
pickle.dump(key_student, open(path['student'], 'wb'))
def load_answer_key(path):
try:
return pickle.load(open(path['instructor'], 'rb'))
except FileNotFoundError:
print('running in student mode (instructor key unavailable)')
return pickle.load(open(path['student'], 'rb'))
def grade_closed(name, key, mazes, solution, weight = 1):
def grade(case, maze):
y = key[case]
z = getattr(search, solution)(maze)
# check that the path is valid
score_validity = int(maze.validate_path(z) is None)
# check that the length of the student’s path matches
score_length = int(len(z) == (y if type(y) is int else len(y)))
# if instructor key available, check that path vertices match
score_vertices = int(type(y) is not int and z == y)
return (
{
'name' : '{0}: `validate_path(_:)` for \'{1}\' maze'.format(name, case),
'score' : 0.5 * weight * score_validity,
'max_score' : 0.5 * weight,
'visibility': 'visible'
},
{
'name' : '{0}: correct path length for \'{1}\' maze'.format(name, case),
'score' : 0.5 * weight * score_length,
'max_score' : 0.5 * weight,
'visibility': 'visible'
},
#{
# 'name' : '{0}: correct path vertices for \'{1}\' maze'.format(name, case),
# 'score' : 0.75 * weight * score_vertices,
# 'max_score' : 0.75 * weight * (type(y) is not int),
# 'visibility': 'visible'
#},
)
return tuple(item for case, maze in mazes.items() for item in grade(case, maze))
def grade_open(name, key, mazes, solution, weight = 1):
def grade(case, maze):
y = key[case]
z = getattr(search, solution)(maze)
# check that the path is valid
score_validity = int(maze.validate_path(z) is None)
# score student path by dividing the length of an MST-based solution by
# the length of the student path
score_length = min((y if type(y) is int else len(y)) / max(len(z), 1), 1)
return (
{
'name' : '{0}: `validate_path(_:)` for \'{1}\' maze'.format(name, case),
'score' : 0.5 * weight * score_validity,
'max_score' : 0.5 * weight,
'visibility': 'visible'
},
{
'name' : '{0}: path length for \'{1}\' maze'.format(name, case),
'score' : 0.5 * weight * score_length,
'max_score' : 0.5 * weight,
'visibility': 'visible'
},
)
return tuple(item for case, maze in mazes.items() for item in grade(case, maze))
def main():
solutions = ('bfs', 'astar_single', 'astar_corner', 'astar_multiple', 'fast')
for solution in solutions:
if not hasattr(search, solution):
return fail('module \'search\' is missing expected member \'{0}\''.format(solution))
if not callable(getattr(search, solution)):
return fail('member \'{0}\' in module \'search\' is not callable'.format(solution))
mazes = (
# part 1: 20 points total, 4 points per case
{case: maze.maze('data/part-1/{0}'.format(case))
for case in ('tiny', 'small', 'medium', 'large', 'open')},
# part 2: 20 points total, 4 points per case
{case: maze.maze('data/part-2/{0}'.format(case))
for case in ('tiny', 'small', 'medium', 'large', 'open')},
# part 3: 30 points total, 10 points per case
{case: maze.maze('data/part-3/{0}'.format(case))
for case in ('tiny', 'medium', 'large')},
# part 4: 30 points total, 10 points per case
{case: maze.maze('data/part-4/{0}'.format(case))
for case in ('tiny', 'small', 'medium')},
# part 5: 22 points total, 22 points per case
#{case: maze.maze('data/part-5/{0}'.format(case))
# for case in ('large',)},
)
#generate_answer_key({'instructor': 'key', 'student': 'key-student'}, mazes, solutions)
key = load_answer_key({'instructor': 'key', 'student': 'key-student'})
parts_closed = tuple(item for i, points in zip(range(0, 4), (4, 4, 10, 10))
for item in grade_closed('part-{0}'.format(i + 1), key[i], mazes[i], solutions[i],
weight = points))
#parts_open = tuple(item for i, points in zip(range(4, 5), (22,))
# for item in grade_open( 'part-{0}'.format(i + 1), key[i], mazes[i], solutions[i],
# weight = points))
# construct grade dictionary for gradescope
return {
'visibility': 'visible',
'tests': parts_closed
#'tests': parts_closed + parts_open
}
if __name__ == "__main__":
results = main()
if arguments.gradescope:
with open('results.json', 'w') as file:
file.write(json.dumps(results))
else:
pprint.pp(results)
| xxxfzxxx/AI-MPs | maze search/grade.py | grade.py | py | 6,537 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentDefaultsHelpFormatter",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "json.dumps",
"line_number": 29,
"usage_type": "call"
},
{
"ap... |
9536429374 | import subprocess
from itertools import product, combinations
import numpy as np
def runner():
SEQ = 'MQYKLILNGKTLKGETTTEAVDAATAEKVFKQYANDNGVDGEWTYDDATKTFTVTE'
AA = ['A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L',
'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'Y']
AA_dict = dict((k,v) for v,k in enumerate(AA))
SEQ = list(map(AA_dict.get, list(SEQ)))
# Test
mutations = []
for pos1 in range(1):
for mut1 in range(4):
if mut1!=SEQ[pos1]:
mutations.append([pos1, mut1, -1, -1])
mutations = np.array(mutations)
np.savetxt("./assets/mutation_input/test.csv", mutations, delimiter=",")
run_process("test")
# Single mutation
mutations = []
for pos1 in range(1,len(SEQ)):
for mut1 in range(len(AA)):
if mut1!=SEQ[pos1]:
mutations.append([pos1, mut1, -1, -1])
mutations = np.array(mutations)
fnm = 'single_mutations'
np.savetxt('./assets/mutation_input/'+fnm+'.csv', mutations, delimiter=',')
run_process(fnm)
# Double mutation
residues_list = combinations(list(range(1,len(SEQ))),2)
for cnt, residues in enumerate(residues_list):
mutations = []
for mut1 in range(20):
for mut2 in range(20):
if mut1!=SEQ[residues[0]] and mut2!=SEQ[residues[1]]:
mutations.append([residues[0],mut1,residues[1],mut2])
mutations = np.array(mutations)
fnm = "double_mutations_{0:d}_{1:d}".format(residues[0],residues[1])
np.savetxt('./assets/mutation_input/'+fnm+'.csv', mutations, delimiter=',')
run_process(fnm)
write_files_to_run(fnm, cnt)
def write_files_to_run(fnm, cnt):
fnm_list = ['run_files_{0:d}'.format(i) for i in range(1,4)]
index = cnt//495
with open(fnm_list[index], 'a+') as f:
f.write('sbatch ./assets/sbatch_scripts/'+fnm+'.sh \n')
def run_process(fnm):
with open('./assets/sbatch_scripts/'+fnm+'.sh', 'w+') as f:
partitions = ['sched_mit_arupc_long',
'sched_mit_arupc',
'sched_any',
'sched_mit_hill',
'newnodes']
f.write('#!/bin/bash \n')
f.write('#SBATCH --job-name='+fnm+' \n')
f.write('#SBATCH --nodes=1 \n')
f.write('#SBATCH --cpus-per-task=2 \n')
f.write('#SBATCH --time=12:00:00 \n')
f.write('#SBATCH --partition='+','.join(partitions)+' \n')
f.write('#SBATCH --mem-per-cpu=2000 \n')
f.write('#SBATCH -o /nobackup1c/users/leerang/UniRep/output/output_%j.txt \n')
f.write('#SBATCH -e /nobackup1c/users/leerang/UniRep/error/error_%j.txt \n\n')
f.write('cd /nobackup1c/users/leerang/UniRep')
f.write('module add /home/software/modulefiles/singularity/3.7.0 \n')
f.write('singularity exec -B /nobackup1c/users/leerang/UniRep ./UniRep.sif python3 embedding_getter.py '+fnm)
if __name__ == '__main__':
runner()
#result = subprocess.run(["python", "embedding_getter.py", "test.csv"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
#print(result.stdout)
#print(result.stderr)
#print(result.returncode) | leerang77/UniRep_Epistasis_Prediction | Utils/run_main.py | run_main.py | py | 3,208 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.array",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.savetxt",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "numpy.savetxt",
"line_numbe... |
31660008971 | import socket
import os
import multiprocessing
def send_message(s_socket, client_address):
message = 'Hi ' + client_address[0] + ':' + str(client_address[1]) + '. This is server ' + str(
os.getpid())
s_socket.sendto(str.encode(message), client_address)
print('Sent to client: ', message)
def test(x):
return x * x
if __name__ == "__main__":
number_processes = 4
# Initialise the pool
pool = multiprocessing.Pool(number_processes)
server_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Bind the socket to the port
server_address = '127.0.0.1'
server_port = 10001
# Buffer size
buffer_size = 1024
server_socket.bind((server_address, server_port))
print('Server up and running at {}:{}'.format(server_address, server_port))
while True:
data, address = server_socket.recvfrom(buffer_size)
print('Received message \'{}\' at {}:{}'.format(data.decode(), address[0], address[1]))
pool.apply_async(send_message, args=(server_socket, address,))
| digitalhhz/DSTutorial_Programmierprojekt | simplepoolserver.py | simplepoolserver.py | py | 1,094 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "os.getpid",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "multiprocessing.Pool",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "socket.socket",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "socket.AF_INET",
"lin... |
36292377175 | from PIL import Image
im1 = Image.open("lena.png")
im2 = Image.open("lena_modified.png")
#im.show()
width, height = im1.size
print(width)
print(height)
for y in range(height):
for x in range(width):
rgba1 = im1.getpixel( (x, y) )
rgba2 = im2.getpixel( (x, y) )
#print(rgba)
if rgba1 == rgba2 :
rgba2 = (rgba2[0] - rgba1[0], # R
rgba2[1] - rgba1[1], # G
rgba2[2] - rgba1[2], # B
rgba2[3] - rgba1[3]); # A
im1.putpixel((x, y), rgba2)
im1.show()
im1.save("new.png")
| Zrump159/ML2017 | hw0/HW0_Q2.py | HW0_Q2.py | py | 593 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "PIL.Image.open",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 3,
"usage_type": "name"
},
{
"api_name": "PIL.Image.open",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 4... |
2209668303 | from rest_framework import serializers
from . import twitterhelper
from .register import register_social_user
import os
from rest_framework.exceptions import AuthenticationFailed
class TwitterAuthSerializer(serializers.Serializer):
"""Handles serialization of twitter related data"""
access_token_key = serializers.CharField()
access_token_secret = serializers.CharField()
def validate(self, attrs):
access_token_key = attrs.get('access_token_key')
access_token_secret = attrs.get('access_token_secret')
user_info = twitterhelper.TwitterAuthTokenVerification.validate_twitter_auth_tokens(
access_token_key, access_token_secret)
try:
user_id = user_info['id_str']
email = user_info['email']
name = user_info['name']
provider = 'twitter'
first_token = attrs.get('access_token_key')
second_toekn = attrs.get('access_token_secret')
except:
raise serializers.ValidationError(
'The tokens are invalid or expired. Please login again.'
)
return register_social_user(
provider=provider, user_id=user_id, email=email, name=name, first_token = first_token, second_token = second_token)
| charlesDavid009/tweety | python_tips/social_auth/serializers.py | serializers.py | py | 1,295 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "rest_framework.serializers.Serializer",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.serializers",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "rest_framework.serializers.CharField",
"line_number": 10,
"usage_type... |
15876514759 |
import transformers
from utils import printf
import copy
class prompt:
def __init__(self, tokenizer, max_len, add_eos=True):
self.tokenizer = tokenizer
self.max_len = max_len
self.add_eos=add_eos
class instruct_prompt(prompt):
prompt = (
"Below is an instruction that describes a task. Write a response that appropriately completes the request.\n\n"
"### Instruction:\n{instruction}\n\n### Response:"
)
prompt_input = (
"Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.\n\n"
"### Instruction:{instruction}\n\n### Input:{input}\n\n### Response:"
)
prompt_history = "User:{input}\n\nAssistant:{output}\n\n"
prompt_post = "User:{input}\n\nAssistant:"
def preprocess_gen(self, data_point):
if 'history' not in data_point:
# single instruction format {'instruction':..,'input':..}
if 'input' in data_point:
user_prompt = self.prompt_input.format_map(data_point)
else:
user_prompt = self.prompt.format_map(data_point)
else:
# multi turn format {'history':[..], 'input':[..]}
user_prompt = "\n".join(["User:" + i['input']+"\n"+"Assistant:" + i['output'] for i in data_point['history']]) + "\nUser:" + data_point['input'] + "\nAssistant:"
user_prompt = user_prompt[-self.max_len:]
user_prompt=self.prompt.format_map({'instruction':user_prompt})
input_ids = self.tokenizer(user_prompt)["input_ids"]
return input_ids
def preprocess_train(self, data_point):
# single instruction format {'instruction':..,'input':..,'output':..}
if 'instruction' in data_point:
if 'input' in data_point:
user_prompt = self.prompt_input.format_map(data_point)
else:
user_prompt = self.prompt.format_map(data_point)
output = data_point["output"]
# multi turn format {'input':[..], 'output':[..]}
else:
user_prompt = ''
lens = len(data_point['input'])
for i in range(lens-1):
user_prompt += self.prompt_history.format_map({'input':data_point['input'][i],'output':data_point['output'][i]})
user_prompt += self.prompt_post.format_map({'input':data_point['input'][-1]})
user_prompt = self.prompt.format_map({'instruction': user_prompt})
output = data_point['output'][-1]
len_user_prompt_tokens = (len(self.tokenizer(
user_prompt,
truncation=True,
max_length=self.max_len + 1,
)["input_ids"])- 1) # no eos token
full_tokens = self.tokenizer(
user_prompt + output,
truncation=True,
max_length=self.max_len + 1,
padding="max_length",
)["input_ids"][:-1]
return {
"input_ids": full_tokens,
"labels": [-100] * len_user_prompt_tokens
+ full_tokens[len_user_prompt_tokens:],
"attention_mask": [1] * (len(full_tokens)),
}
def data_collator(self,):
return transformers.DataCollatorForLanguageModeling(self.tokenizer, mlm=False)
def postprocess(self, text, render=True):
#import pdb;pdb.set_trace()
printf(text)
output = text.split("### Response:")[1].strip()
output = output.replace("Belle", "Vicuna")
printf(output)
if '###' in output:
output = output.split("###")[0]
if 'User' in output:
output = output.split("User")[0]
output = output.replace('�','').replace('</s>', '')
if render:
# fix gradio chatbot markdown code render bug
lines = output.split("\n")
for i, line in enumerate(lines):
if "```" in line:
if line != "```":
lines[i] = f'<pre><code class="language-{lines[i][3:]}">'
else:
lines[i] = '</code></pre>'
else:
if i > 0:
lines[i] = "<br/>" + line.replace("<", "<").replace(">", ">").replace("__", '\_\_')
output = "".join(lines)
# output = output.replace('<br/><pre>','\n<pre>') work for html; but not for gradio
return output
class chat_prompt(prompt):
prompt_pre = (
"The following is a conversation between an AI assistant called Assistant and a human user called User. "
"The assistant is intelligent, knowledgeable and polite to answer questions of user.\n\n"
)
prompt_history = "User:{input}\n\nAssistant:{output}\n\n"
prompt_post = "User:{input}\n\nAssistant:"
def preprocess_gen(self, data_point):
user_prompt = self.prompt_pre
len_avail = self.max_len - len(self.tokenizer(user_prompt, add_special_tokens=False)['input_ids'])
input_prompt = self.prompt_post.format_map({'input':data_point['input']})
len_avail -= len(self.tokenizer(input_prompt, add_special_tokens=False)['input_ids'])
lens = len(data_point['history'])
tokenized_lens = []
for i in range(lens):
tmp_prompt = self.prompt_history.format_map(data_point['history'][i])
tokenized_lens.append(len(self.tokenizer(tmp_prompt,add_special_tokens=False)["input_ids"]))
# 启发式:/2 优先除前面的
i = 0
while sum(tokenized_lens) > len_avail and i < lens:
history = data_point['history'][i]
tmp_len1 = len(history['input'])
tmp_len2 = len(history['output'])
if tmp_len2 > tmp_len1:
history['output'] = history['output'][:tmp_len2//2]
else:
history['input'] = history['input'][:tmp_len1//2]
prompt = self.prompt_history.format_map(history)
single_len =(len(self.tokenizer(prompt,add_special_tokens=False)["input_ids"]))
tokenized_lens[i] = single_len
i += 1
total_len = sum(tokenized_lens)
# 还不够的话 直接截断
while total_len > len_avail and i < lens - 1 :
total_len -= tokenized_lens[i]
data_point['history'] = data_point['history'][1:]
i += 1
# 最终合并
for i in range(lens):
user_prompt += self.prompt_history.format_map(data_point['history'][i])
user_prompt += input_prompt
printf({'real_input:':user_prompt})
inputs = self.tokenizer(user_prompt)["input_ids"]
return inputs
def preprocess_train(self, data_point):
user_prompt = self.prompt_pre
lens = len(data_point['input'])
for i in range(lens-1):
user_prompt += self.prompt_history.format_map({'input':data_point['input'][i].strip(),'output':data_point['output'][i].strip()})
user_prompt += self.prompt_post.format_map({'input':data_point['input'][-1].strip()})
len_user_prompt_tokens = len(self.tokenizer(
user_prompt,
truncation=True,
max_length=self.max_len,
)["input_ids"]) - 1 # remove extra eos
if self.add_eos:
full_tokens = self.tokenizer(
user_prompt + data_point["output"][-1].strip(),
truncation=True,
padding=False,
max_length=self.max_len,
)["input_ids"] # need eos
else:
full_tokens = self.tokenizer(
user_prompt + data_point["output"][-1].strip(),
truncation=True,
padding=False,
max_length=self.max_len+1,
)["input_ids"][:-1] # delete eos
return {
"input_ids": full_tokens,
"labels": [-100] * len_user_prompt_tokens + full_tokens[len_user_prompt_tokens:],
"attention_mask": [1] * (len(full_tokens)),
}
def data_collator(self,):
return transformers.DataCollatorForSeq2Seq(self.tokenizer)
def postprocess(self, text, render=False):
output = text.split("Assistant:")[-1].strip()
if 'User:' in output:
output = output.split("User:")[0]
output = output.replace('�','')
if render:
# fix gradio chatbot markdown code render bug
lines = output.split("\n")
for i, line in enumerate(lines):
if "```" in line:
if line != "```":
lines[i] = f'<pre><code class="language-{lines[i][3:]}">'
else:
lines[i] = '</code></pre>'
else:
if i > 0:
lines[i] = "<br/>" + line.replace("<", "<").replace(">", ">").replace("__", '\_\_')
output = "".join(lines)
# output = output.replace('<br/><pre>','\n<pre>') work for html; but not for gradio
return output
def get_data_collator():
return transformers.DataCollatorForLanguageModeling
| Facico/Chinese-Vicuna | prompt.py | prompt.py | py | 9,178 | python | en | code | 4,045 | github-code | 1 | [
{
"api_name": "transformers.DataCollatorForLanguageModeling",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "utils.printf",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "utils.printf",
"line_number": 83,
"usage_type": "call"
},
{
"api_name":... |
33489243945 | from dotenv import load_dotenv
from logging import error
import telegram
import requests
import os
import logging
from telegram.ext import CommandHandler, Updater, Filters, MessageHandler
from telegram.message import Message
from pprint import pprint
load_dotenv()
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s, %(levelname)s, %(message)s, %(name)s'
)
TOKEN = os.getenv('TOKEN')
CHAT_ID = os.getenv('CHAT_ID')
bot = telegram.Bot(token=TOKEN)
updater = Updater(token=TOKEN)
URL = 'https://api.thecatapi.com/v1/images/search'
def start_work(update, context):
chat = update.effective_chat
name = update.message.chat.first_name
button = telegram.ReplyKeyboardMarkup(
[
['/newcat'],
['Выход']
], resize_keyboard=True
)
context.bot.send_message(
chat_id=chat.id,
text='спс, от души, брат-{}!'.format(name),
reply_markup=button
)
context.bot.send_photo(chat.id, get_image())
def get_image():
try:
response = requests.get(URL)
except Exception:
logging.exception(f'Ошибка при запросе к основному API: {error}')
NEW_URL = 'https://api.thedogapi.com/v1/images/search'
response = requests.get(NEW_URL)
rnd_cat = response.json()[0]['url']
return rnd_cat
def put_image(update, context):
chat = update.effective_chat
context.bot.send_photo(chat.id, get_image())
def say_hi(update, context):
chat = update.effective_chat
context.bot.send_message(chat_id=chat.id, text='message')
def main():
updater.dispatcher.add_handler(CommandHandler('start', start_work))
updater.dispatcher.add_handler(CommandHandler('newcat', put_image))
updater.dispatcher.add_handler(MessageHandler(Filters.text, say_hi))
updater.start_polling(poll_interval=3.0)
updater.idle()
if __name__ == '__main__':
main()
def send_message(message):
bot.send_message(chat_id=CHAT_ID, text=message)
text = 'Вам телеграмма!'
send_message(text)
| Mikhail-Kushnerev/kittybot | kittybot.py | kittybot.py | py | 2,076 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "dotenv.load_dotenv",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "logging.basicConfig",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "os.getenv",
... |
5037552319 | import xml.etree.ElementTree as ET
import subprocess
import asyncio
import sys
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
async def execute11(line):
cmd = "openssl s_client -connect %s < /dev/null 2>/dev/null | openssl x509 -fingerprint -noout -in /dev/stdin" % line
fp_ = await asyncio.create_subprocess_shell(cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE)
stdout, stderr = await fp_.communicate()
f[line] = stdout
return f
print(f"""{bcolors.HEADER}
_____ _ _ ____ ____ _
| ___(_)_ __ __| / ___| __ _ _ __ ___ ___ / ___|___ _ __| |_
| |_ | | '_ \ / _` \___ \ / _` | '_ ` _ \ / _ \ | / _ \ '__| __|
| _| | | | | | (_| |___) | (_| | | | | | | __/ |__| __/ | | |_
|_| |_|_| |_|\__,_|____/ \__,_|_| |_| |_|\___|\____\___|_| \__|
{bcolors.ENDC}"""
)
argc = len(sys.argv)
if argc == 1 or argc == 2:
print('Usage:\n\tpython3 FindSameCert.py domain:port nmap.xml')
print('For example:\n\tpython3 FindSameCert.py kkk.com:443 nmap.xml')
sys.exit()
else:
url = sys.argv[1]
nmap = sys.argv[2]
tree = ET.parse(nmap)
root = tree.getroot()
SslPorts = []
ListDomCheck = {}
hosts = root.findall('host')
for host in hosts:
try:
HostNameElem = host.findall('hostnames')
HostName = HostNameElem[0].findall('hostname')[0].attrib['name']
PortElement = host.findall('ports')
ports = PortElement[0].findall('port')
for port in ports:
service = port.findall('service')[0].attrib['name']
if "tunnel" in port.findall('service')[0].attrib:
_port = port.attrib['portid']
SslPorts.append(_port)
SslPortsCopy = SslPorts.copy()
ListDomCheck[HostName] = SslPortsCopy
SslPorts.clear()
except:
pass
DomainList = []
for i in ListDomCheck:
for j in range(len(ListDomCheck[i])):
line = i+":"+ListDomCheck[i][j]
DomainList.append(line)
f = {}
cmd = "openssl s_client -connect %s < /dev/null 2>/dev/null | openssl x509 -fingerprint -noout -in /dev/stdin" %url
fp = subprocess.Popen([cmd], stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True).communicate()[0]
print (f"{bcolors.HEADER}TLS certificate fingerprint for %s:{bcolors.ENDC}" %url)
print(fp.decode("utf-8"))
futures = [execute11(domain) for domain in DomainList]
loop = asyncio.get_event_loop()
fp_ = loop.run_until_complete(asyncio.wait(futures))
print(f"{bcolors.HEADER}The same TLS certificate is used on:{bcolors.ENDC}")
for k, v in f.items():
if v == fp:
print(f"{bcolors.BOLD}%s{bcolors.ENDC}" %k)
| delyura/FindSameCert | FindSameCert.py | FindSameCert.py | py | 2,909 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "asyncio.create_subprocess_shell",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "asyncio.subprocess",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": ... |
37080385953 | from flask import request
from flask.json import jsonify
from . import habits
from .models import Habit
from .dbmodel import Habitdb
import json
from bson import json_util
from flask_jwt_extended import get_jwt_identity,jwt_required
@habits.route('/',methods=['POST'])
@jwt_required()
def create():
current_user = get_jwt_identity()
habit = Habit(**request.json)
new_habit=Habitdb().create_habit(habit,current_user)
if(new_habit):
return jsonify(request.json),200
return 'Error',409
@habits.route('/',methods=['GET'])
@jwt_required()
def get():
current_user = get_jwt_identity()
habit_db=Habitdb().get_all_habits(current_user)
response=json.loads(json_util.dumps(habit_db))
return jsonify(response),200
@habits.route('/<string:_id>',methods=['DELETE'])
@jwt_required()
def delete(_id):
habit_db=Habitdb().delete_habit(_id)
if(habit_db):
return 'Sucess',200
return 'Cant delete it',409
@habits.route('/<string:_id>',methods=['PUT'])
@jwt_required()
def edit(_id):
habit=Habitdb().get_habit_Object(_id)
habit.changes(**request.json)
habit_db=Habitdb().update_habit(habit)
if(habit_db):
return jsonify(request.json),200
return 'Cant edit anything',405
@habits.route('/today/<string:_id>',methods=['POST'])
@jwt_required()
def marktoday(_id):
r=request.json
habit=Habitdb().get_habit_Object(_id)
try:
if(r['mark']):
habit.date.mark_today()
else:
habit.date.unmark_today()
update_habit=Habitdb().update_habit(habit)
if(update_habit):
return 'Sucess',200
except KeyError:
return 'error in date',405
| isaac152/APIHabitsTracker | app/habits/views.py | views.py | py | 1,684 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask_jwt_extended.get_jwt_identity",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "models.Habit",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "flask.request.json",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name... |
24879624643 | from pydantic import ValidationError
from saltapi.web.schema.common import ProposalCode
import pytest
@pytest.mark.parametrize("code", ["2021-1-SCI-001", "2022-2-ORP-007", "2023-1-DDT-001"])
def test_valid_proposal_code(code: str):
assert ProposalCode.validate(code) == code
@pytest.mark.parametrize(
"invalid_code",
[
"20a1-1-ABC-123", # Invalid year
"2021-3-ABC-123", # Invalid semester
"2021-1-_ABC-123", # Underscore can't start the letter sequence
"2021-1-ABC_-123", # Underscore can't end the letter sequence
"2021-1-ABC-12", # Invalid number of digits
],
)
def test_invalid_proposal_code(invalid_code: str):
with pytest.raises(ValueError, match="incorrect proposal code"):
ProposalCode.validate(invalid_code)
def test_non_string_input():
with pytest.raises(TypeError, match="string required"):
ProposalCode.validate(123)
| saltastroops/salt-api | tests/schema/test_proposal_code_input.py | test_proposal_code_input.py | py | 920 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "saltapi.web.schema.common.ProposalCode.validate",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "saltapi.web.schema.common.ProposalCode",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 7,
"usage_t... |
4185758536 | from PIL import Image, ImageTk
import tkinter
from . import battleship
from .battleshipconfig import *
from .errortypes import BadLocationError, PlacementError
from .gamecanvas import GameCanvas
from .targetgridcanvas import TargetGridCanvas
from .startmenucanvas import StartMenuCanvas
from .imageloader import instance as gImageLoader
from .pegsprite import PegSprite
# Canvas for setup portion of the game (placing ships). Also maintains
# visual representation of a player's own board state.
class OceanGridCanvas(GameCanvas):
def __init__(self, parent: tkinter.Tk, player: battleship.ClassicPlayer, width: int, height: int, on_ships_are_placed: 'func'):
super().__init__(parent, bg='blue', width=width, height=height)
self._parent = parent
self._width = width
self._height = height
self._player = player
self._on_ships_are_placed = on_ships_are_placed
### load images of the board and ships ###
ocean_grid = gImageLoader.load(OCEAN_GRID_PATH)
self._ocean_grid_img = ocean_grid
self._ocean_grid_photo = ImageTk.PhotoImage(self._ocean_grid_img)
self._ocean_grid_id = None
carrier_img = gImageLoader.load(CARRIER_PATH)
self._carrier_img = carrier_img
battleship_img = gImageLoader.load(BATTLESHIP_PATH)
self._battleship_img = battleship_img
destroyer_img = gImageLoader.load(DESTROYER_PATH)
self._destroyer_img = destroyer_img
patrolboat_img = gImageLoader.load(PATROLBOAT_PATH)
self._patrolboat_img = patrolboat_img
submarine_img = gImageLoader.load(SUBMARINE_PATH)
self._submarine_img = submarine_img
# Pegs to show enemy moves / hits
self._pegs = [[PegSprite() for _ in range(battleship.COLUMNS)] for _ in range(battleship.ROWS)]
# Images to be manipulated and turned into Photos
self._ship_imgs = {battleship.Carrier.__name__: {0: self._carrier_img, 90: None, 180: None, 270: None, 360: None},
battleship.BattleShip.__name__: {0: self._battleship_img, 90: None, 180: None, 270: None, 360: None},
battleship.Destroyer.__name__: {0: self._destroyer_img, 90: None, 180: None, 270: None, 360: None},
battleship.Submarine.__name__: {0: self._submarine_img, 90: None, 180: None, 270: None, 360: None},
battleship.PatrolBoat.__name__: {0: self._patrolboat_img, 90: None, 180: None, 270: None, 360: None}}
# Photos passed into create_image functions
self._ship_photos = {battleship.Carrier.__name__: {0:ImageTk.PhotoImage(self._carrier_img),90: None, 180: None, 270: None, 360: None},
battleship.BattleShip.__name__: {0: ImageTk.PhotoImage(self._battleship_img), 90: None, 180: None, 270: None, 360: None},
battleship.Destroyer.__name__: {0: ImageTk.PhotoImage(self._destroyer_img), 90: None, 180: None, 270: None, 360: None},
battleship.Submarine.__name__: {0: ImageTk.PhotoImage(self._submarine_img), 90: None, 180: None, 270: None, 360: None},
battleship.PatrolBoat.__name__: {0:ImageTk.PhotoImage(self._patrolboat_img), 90: None, 180: None, 270: None, 360: None}}
self._mouse_btn1_down = False
self.bind('<Button-1>', self._on_mouse_btn1_down)
self.bind('<ButtonRelease-1>', self._on_mouse_btn1_release)
self._ships_are_placed = False # true if all ships placed
self._is_placement_phase = True
self._confirm_text_id = None
self._confirm_text = "Press enter to confirm ship placement"
self._confirm_x = 0
self._confirm_y = 0
self._confirm_width = 0
self._has_ship_selected = False
self._selected_ship = None
self._selected_ship_type = None
self._selected_ship_is_vertical = True
self._selected_ship_degree = 0
def display(self) -> None:
print('Ocean should display')
self._start()
self.focus_set()
self.pack()
def hide(self) -> None:
self.pack_forget()
# sets peg colors depending on board state and redraws
def on_hit(self) -> None:
x_spacing = DEFAULT_PEG_AREA_WIDTH / battleship.COLUMNS
y_spacing = DEFAULT_PEG_AREA_WIDTH / battleship.ROWS
# set the PegSprite location, color for every peg
for r in range(battleship.ROWS):
for c in range(battleship.COLUMNS):
peg = self._pegs[r][c]
if self._player.ocean_grid.pegs[r][c] == battleship.RED:
peg.color = PEG_HIT
else:
peg.color = PEG_MISS
peg.x1 = c * x_spacing + DEFAULT_HEADER_WIDTH + DEFAULT_PEG_SIZE
peg.y1 = r * y_spacing + DEFAULT_HEADER_HEIGHT + DEFAULT_PEG_SIZE
peg.x2 = (c + 1) * x_spacing + DEFAULT_HEADER_WIDTH - DEFAULT_PEG_SIZE
peg.y2 = (r + 1) * y_spacing + DEFAULT_HEADER_HEIGHT - DEFAULT_PEG_SIZE
peg.id = self.create_oval((peg.x1, peg.y1), (peg.x2, peg.y2), fill=peg.color)
self._pegs[r][c] = peg
self._redraw()
def _start(self) -> None:
#self.delete(self._click_text_id)
self._redraw()
self.bind('<Motion>', self._on_mouse_moved)
self.bind('<Shift_L>', self._on_shift_down)
self.bind('<Return>', self._on_return_down)
def _on_return_down(self, event: tkinter.Event) -> None:
if self._ships_are_placed:
self._on_ships_are_placed()
if self._confirm_text_id:
self.delete(self._confirm_text_id)
self.unbind('<Return>')
self._is_placement_phase = False
def _redraw(self) -> None:
# Redraw ships ( and later pegs )
if self._ocean_grid_id:
self.delete(self._ocean_grid_id)
self._ocean_grid_id = self.create_image((round(self._width/2), round(self._height/2)),
image=self._ocean_grid_photo, anchor='center')
if self._confirm_text_id:
self.delete(self._confirm_text_id)
if self._ships_are_placed and self._is_placement_phase:
self._confirm_text_id = self.create_text((self._confirm_x, self._confirm_y), width=self._confirm_width,
text=self._confirm_text, fill='white', anchor='w')
for ship in self._player.ships.values():
if ship:
sprite = ship.sprite
if sprite.id:
self.delete(sprite.id)
sprite.id = self.create_image(
(sprite.x, sprite.y),
image=sprite.photo,
anchor="center")
for r in range(battleship.ROWS):
for c in range(battleship.COLUMNS):
if self._player.ocean_grid.pegs[r][c] != None:
peg = self._pegs[r][c]
if peg.id:
self.delete(peg.id)
peg.id = self.create_oval((peg.x1, peg.y1), (peg.x2, peg.y2), fill=peg.color)
def _on_mouse_btn1_down(self, event: tkinter.Event) -> None:
self._mouse_btn1_down = True
## if not self._ships_are_placed:
x_ratio = event.x / self.winfo_width() # ratio of click in relation to entire window
bay_x_ratio = DEFAULT_SHIPBAY_X1 / DEFAULT_WIDTH
print(event.x, event.y)
print(x_ratio, bay_x_ratio)
if self._is_placement_phase:
if x_ratio <=1 and x_ratio >= bay_x_ratio:
self._select_ship_from_bay(event.y)
if self._selected_ship_type:
existing_ship = self._player.ships[self._selected_ship_type]
if existing_ship:
if existing_ship.is_placed:
self._deselect_ship(False)
return
print(self._selected_ship_type, existing_ship.is_placed)
ROTATION_DEGREE = 0
selected_sprite = self._selected_ship.sprite
selected_sprite.x = event.x
selected_sprite.y = event.y
selected_sprite.degree = self._selected_ship_degree
selected_sprite.image = self._ship_imgs[self._selected_ship_type][ROTATION_DEGREE]
selected_sprite.photo = self._ship_photos[self._selected_ship_type][ROTATION_DEGREE]
selected_sprite.id = self.create_image(
(selected_sprite.x, selected_sprite.y),
image=selected_sprite.photo,
anchor="center")
print(selected_sprite.id)
else:
x_ratio = DEFAULT_PEG_AREA_WIDTH / battleship.COLUMNS
y_ratio = DEFAULT_PEG_AREA_WIDTH / battleship.ROWS
click_col = int(event.x/x_ratio)
col = click_col - 1
click_row = int(event.y/y_ratio)
row = click_row - 1
row_str = None
try:
row_str = battleship.ROW_LETTERS[row]
except KeyError as e:
print(e)
else:
try:
ship_at_click = self._player.ocean_grid.at((row_str, col))
except BadLocationError as e:
print(e)
else:
if ship_at_click:
self._select_ship(ship_at_click)
self._player.ocean_grid.unplace(ship_at_click)
def _on_mouse_btn1_release(self, event: tkinter.Event) -> None:
self._mouse_btn1_down = False
if self._has_ship_selected:
col_width = DEFAULT_PEG_AREA_WIDTH / battleship.COLUMNS
row_height = DEFAULT_PEG_AREA_WIDTH / battleship.ROWS
print(int(event.y/row_height))
click_col = int(event.x/col_width)
col = click_col - 1
click_row = int(event.y/row_height)
row = click_row - 1
start_row = end_row = start_col = end_col = None
if self._selected_ship_is_vertical:
print('Vertical placement')
if self._selected_ship.length > 2:
if self._selected_ship.length % 2 != 0:
start_row = row - int(self._selected_ship.length/2)
end_row = row + int(self._selected_ship.length/2)
else:
start_row = row - self._selected_ship.length % 3
end_row = row + int(self._selected_ship.length/2)
else:
start_row = row
end_row = row + 1
start_col = end_col = col
else:
print('Horizontal placement')
if self._selected_ship.length > 2:
if self._selected_ship.length % 2 != 0:
start_col = col - int(self._selected_ship.length/2)
end_col = col + int(self._selected_ship.length/2)
print('col: {}, click_col: {}, start_col: {}, end_col: {}'.format(col, click_col, start_col, end_col))
else:
start_col = col - self._selected_ship.length % 3
end_col = col + int(self._selected_ship.length/2)
else:
start_col = col
end_col = col + 1
start_row = end_row = row
try:
start_row_str = battleship.ROW_LETTERS[start_row]
end_row_str = battleship.ROW_LETTERS[end_row]
print('Attempting place. Degree: {}'.format(self._selected_ship_degree))
self._player.ocean_grid.place((start_row_str, start_col), (end_row_str, end_col), self._selected_ship)
except PlacementError as place_error:
print(place_error)
print('Failed place, x: {} y: {}, row1: {} col1: {}, row2: {} col2: {}'.format(
event.x, event.y, start_row_str, start_col+1, end_row_str, end_col+1))
self._deselect_ship(True)
except KeyError as key_error:
print('Failed place. Numeric key is out of expected range (ship will be placed out of board).')
print('start_row: {}, end_row: {}'.format(start_row, end_row))
print('start_col: {}, end_col: {}'.format(start_col, end_col))
self._deselect_ship(True)
else:
# Player's ship should now be placed
print(self._selected_ship.is_placed)
selected_sprite = self._selected_ship.sprite
if selected_sprite.id:
self.delete(selected_sprite.id)
x = y = 0
if self._selected_ship_is_vertical:
if self._selected_ship.length % 2 != 0:
x = (click_col * DEFAULT_COL_WIDTH) + (DEFAULT_COL_WIDTH/3)
y = (click_row * DEFAULT_ROW_HEIGHT) + (DEFAULT_ROW_HEIGHT/3)
else:
x = (click_col * DEFAULT_COL_WIDTH) + (DEFAULT_COL_WIDTH/3)
y = (click_row * DEFAULT_ROW_HEIGHT) + (DEFAULT_ROW_HEIGHT * 5/6)
else:
if self._selected_ship.length % 2 != 0:
x = (click_col * DEFAULT_COL_WIDTH) + (DEFAULT_COL_WIDTH /3)
y = (click_row * DEFAULT_ROW_HEIGHT) + (DEFAULT_ROW_HEIGHT/3)
else:
x = (click_col * DEFAULT_COL_WIDTH) + (DEFAULT_COL_WIDTH * 5/6)
y = (click_row * DEFAULT_ROW_HEIGHT) + (DEFAULT_ROW_HEIGHT/3)
print('Click: {}, {}. Placed at: {},{}'.format(event.x, event.y, x, y))
print('row1: {} col1: {}, row2: {} col2: {}'.format(start_row_str, start_col+1, end_row_str, end_col+1))
print('Degrees: {}'.format(self._selected_ship_degree))
selected_sprite.x = x
selected_sprite.y = y
selected_sprite.degree = self._selected_ship_degree
selected_sprite.image = self._ship_imgs[self._selected_ship_type][self._selected_ship_degree]
selected_sprite.photo = self._ship_photos[self._selected_ship_type][self._selected_ship_degree]
selected_sprite.id = self.create_image(
(selected_sprite.x, selected_sprite.y),
image=selected_sprite.photo,
anchor="center")
self._player.ships[self._selected_ship_type] = self._selected_ship
print(self._player.ships)
self._deselect_ship(False)
print('Checking if all placed')
self._ships_are_placed = self._player.ships_are_placed()
if self._ships_are_placed:
print('all placed: {}'.format(self._ships_are_placed))
print('All ships placed')
## self._on_ships_are_placed()
self._draw_confirm_text()
if self._is_placement_phase and not self._ships_are_placed and self._confirm_text_id:
self.delete(self._confirm_text_id)
def _draw_confirm_text(self) -> None:
self._confirm_x = DEFAULT_SHIPBAY_X1
self._confirm_y = DEFAULT_SHIPBAY_Y2 + DEFAULT_ROW_HEIGHT
self._confirm_width = DEFAULT_WIDTH - self._confirm_x
if self._confirm_text_id:
self.delete(self._confirm_text_id)
self._confirm_text_id = self.create_text((self._confirm_x, self._confirm_y), width=self._confirm_width,
fill='white', text=self._confirm_text, anchor='w')
def _select_ship(self, ship: battleship.Ship) -> None:
self._has_ship_selected = True
self._selected_ship = ship
self._selected_ship_type = type(ship).__name__
print(self._selected_ship_type)
self._selected_ship_degree = ship.sprite.degree
self._selected_ship_is_vertical = (self._selected_ship_degree == 0
or self._selected_ship_degree == 180
or self._selected_ship_degree == 360)
self._ships_are_placed = False
def _select_ship_from_bay(self, y: int) -> None:
ship = self._get_bay_ship_type(y)
if ship:
self._has_ship_selected = True
self._selected_ship = ship()
self._selected_ship_type = ship.__name__
print(self._selected_ship_type)
self._selected_ship_degree = 0
self._selected_ship_is_vertical = True
def _deselect_ship(self, should_delete: bool) -> None:
# Deselect currently selected ship
selected_sprite = self._selected_ship.sprite
if selected_sprite.id:
if should_delete:
self.delete(selected_sprite.id)
self._player.ships[self._selected_ship_type] = None
self._has_ship_selected = False
self._selected_ship = None
self._selected_ship_type = None
self._selected_ship_degree = 0
self._selected_ship_is_vertical = True
def _on_mouse_moved(self, event: tkinter.Event) -> None:
if self._mouse_btn1_down:
if self._has_ship_selected and self._is_placement_phase:
if self._is_placement_phase and not self._ships_are_placed and self._confirm_text_id:
self.delete(self._confirm_text_id)
selected_sprite = self._selected_ship.sprite
if selected_sprite.id:
self.delete(selected_sprite.id)
selected_sprite.x = 0
selected_sprite.y = 0
selected_sprite.degree = 0
selected_sprite.photo = None
selected_sprite.image = None
selected_sprite.x = event.x
selected_sprite.y = event.y
selected_sprite.degree = self._selected_ship_degree
selected_sprite.image = self._ship_imgs[self._selected_ship_type][self._selected_ship_degree]
selected_sprite.photo = self._ship_photos[self._selected_ship_type][self._selected_ship_degree]
selected_sprite.id = self.create_image(
(event.x, event.y),
image=selected_sprite.photo,
anchor="center")
def _on_shift_down(self, event: tkinter.Event) -> None:
if self._mouse_btn1_down and self._has_ship_selected:
ROTATION_DEGREE = 90
if self._selected_ship_degree >= 360:
self._selected_ship_degree = 0
self._selected_ship_degree += ROTATION_DEGREE
print('[SHIFT] Degree: {}'.format(self._selected_ship_degree))
self._selected_ship_is_vertical = (self._selected_ship_degree == 0
or self._selected_ship_degree == 180
or self._selected_ship_degree == 360)
if not self._ship_photos[self._selected_ship_type][self._selected_ship_degree]:
BASE_DEGREE = 0
self._ship_imgs[self._selected_ship_type][self._selected_ship_degree] = (
self._ship_imgs[self._selected_ship_type][BASE_DEGREE].rotate(self._selected_ship_degree, expand=True))
self._ship_photos[self._selected_ship_type][self._selected_ship_degree] = (
ImageTk.PhotoImage(self._ship_imgs[self._selected_ship_type][self._selected_ship_degree]))
selected_sprite = self._selected_ship.sprite
if selected_sprite.id:
self.delete(selected_sprite.id)
selected_sprite.x = 0
selected_sprite.y = 0
selected_sprite.degree = 0
selected_sprite.photo = None
selected_sprite.image = None
selected_sprite.x = event.x
selected_sprite.y = event.y
selected_sprite.degree = self._selected_ship_degree
selected_sprite.image = self._ship_imgs[self._selected_ship_type][self._selected_ship_degree]
selected_sprite.photo = self._ship_photos[self._selected_ship_type][self._selected_ship_degree]
selected_sprite.id = self.create_image(
(selected_sprite.x, selected_sprite.y),
image=selected_sprite.photo,
anchor="center")
def _get_bay_ship_type(self, y: int) -> battleship.Ship:
y_ratio = y / self.winfo_height()
height = DEFAULT_HEIGHT
patrolboat_y1_ratio = 0
patrolboat_y2_ratio = DEFAULT_PATROLBOAT_Y2 / height
battleship_y1_ratio = DEFAULT_BATTLESHIP_Y1 / height
battleship_y2_ratio = DEFAULT_BATTLESHIP_Y2 / height
submarine_y1_ratio = DEFAULT_SUBMARINE_Y1 / height
submarine_y2_ratio = DEFAULT_SUBMARINE_Y2 / height
carrier_y1_ratio = DEFAULT_CARRIER_Y1 / height
carrier_y2_ratio = DEFAULT_CARRIER_Y2 / height
destroyer_y1_ratio = DEFAULT_DESTROYER_Y1 / height
destroyer_y2_ratio = DEFAULT_DESTROYER_Y2 / height
if y_ratio >= patrolboat_y1_ratio and y_ratio <= patrolboat_y2_ratio:
return battleship.PatrolBoat
if y_ratio >= battleship_y1_ratio and y_ratio <= battleship_y2_ratio:
return battleship.BattleShip
if y_ratio >= submarine_y1_ratio and y_ratio <= submarine_y2_ratio:
return battleship.Submarine
if y_ratio >= carrier_y1_ratio and y_ratio <= carrier_y2_ratio:
return battleship.Carrier
if y_ratio >= destroyer_y1_ratio and y_ratio <= destroyer_y2_ratio:
return battleship.Destroyer
return None
| jtaylorsoftware/pyships | pyships/oceangridcanvas.py | oceangridcanvas.py | py | 22,728 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "gamecanvas.GameCanvas",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "tkinter.Tk",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "imageloader.instance.load",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "image... |
4407500695 | import scrapy
import re
from competition.items import CompetitionItem
class CiasiSpider(scrapy.Spider):
name = 'ciasi'
allowed_domains = ['ciasi.org.cn']
# start_urls = ['http://www.ciasi.org.cn/Home/safety/index?sid=15&bid=&cid=&sss=1&year=51,50']
start_urls = ['http://www.ciasi.org.cn/Home/safety/index?sid=15&bid=&cid=&sss=1&year=52,51,50,49']
def parse(self, response):
list_item = response.xpath("//div[@class='eval_by_item']")
for item in list_item:
items = CompetitionItem()
next_url = item.xpath("./a/@href").extract_first()
brand = item.xpath("./a/div[@class='ev_i_brand']/p/text()").extract_first()
menu = item.xpath("./a/div[@class='ev_i_manu']/div/p/text()").extract_first()
models = item.xpath("./a/div[@class='ev_i_models']/div/p/text()").extract_first()
level = item.xpath("./a/div[@class='ev_i_level']/div/p/text()").extract_first()
model = item.xpath("./a/div[@class='ev_i_model']/div/p/text()").extract_first()
items['brand'] = brand
items['menu'] = menu
items['models'] = models
items['level'] = level
items['model'] = model
items['l_safe'] = ''
items['r_pro'] = ''
items['b_eval'] = ''
# yield items
if next_url is not None:
next_url = 'http://www.ciasi.org.cn/' + next_url
yield scrapy.Request(
next_url,
callback= self.parse_detail,
meta = {'item': items}
)
def parse_detail(self,response):
# TODO 主页数据
main_item = response.meta['item']
# TODO 左侧数据
pur_list = response.xpath("//div[@class='pur_le_item']")
safe_pur_list = {}
for pur in pur_list:
title_txt = pur.xpath("./div[1]/p/text()").extract_first()
if title_txt is not None:
img_src = pur.xpath("./div[2]/div/img/@src").extract_first()
img_stat = str(img_src).split('/')[-1]
if img_stat == 'icon-greenDi.png':
img_stat = '车辆标配'
elif img_stat == 'icon-radCa.png':
img_stat = '车辆未配备'
elif img_stat == 'icon-yellowQu.png':
img_stat = '车辆选配'
safe_pur_list[title_txt] = img_stat
main_item['l_safe'] = safe_pur_list
# TODO 右侧数据
pro_index_list = []
main_list = response.xpath("//tr[contains(@class,'pur_hd_')]")
for i in main_list:
right_title = i.xpath(".//div[@class='pr_e_lt']/p/text()").extract_first()
right_result = i.xpath(".//div[@class='ev_i_bs']/span/text()").extract_first()
pro_index_list.append({right_title:right_result,"detail": {}})
if len(pro_index_list) > 0:
count = 0
for cls in ["co_p_one","co_p_two","co_p_san","co_p_si"]:
one_item = {}
for one in response.xpath("//tr[@class='{}']".format(cls)):
one_title = one.xpath(".//div[@class='pr_t_xt']/p/text()").extract_first()
one_result = one.xpath(".//div[@class='co_pu_s']/span/text()").extract_first()
one_item[one_title] = one_result
pro_index_list[count]["detail"] = one_item
count += 1
elif len(pro_index_list) == 0:
main_title_list = response.xpath("//div[@class='pr_e_lt']/p/text()").extract()
main_price_list = response.xpath("//div[@class='ev_i_bs']/span/text()").extract()
item_title_list = response.xpath("//div[@class='pr_t_xt']/p/text()").extract()
item_price_list = response.xpath("//div[@class='co_pu_s']/span/text()").extract()
pro_list = []
for ti,pr in zip(main_title_list,main_price_list):
_item = {ti: pr,'detail':{}}
pro_list.append(_item)
pro_list[0]['detail'] = {item_title_list[0]:item_price_list[0]}
pro_list[1]['detail'] = {t:p for t,p in zip(item_title_list[1:4],item_price_list[1:4])}
pro_list[2]['detail'] = {item_title_list[5]:item_price_list[5]}
pro_list[3]['detail'] = {item_title_list[6]:item_price_list[6]}
pro_index_list += pro_list
main_item['r_pro'] = pro_index_list
#TODO 下层数据
evaluation_details = response.xpath("//div[@class='pa_t_ds']/p/text() | //td[@class='p_h_les']/div//p/text()").extract()
evaluation_price = response.xpath("//div[@class='pa_t_bz']/span/text() | //div[@class='pa_t_bz']/div[@class='pa_i_rg']/span/text()").extract()
evaluation = [{de:pr} for de,pr in zip(evaluation_details,evaluation_price)]
table_list = response.xpath("//div[@class='par_p_block']//table")
eval_table_data = []
price_image = table_list[0].xpath(".//div[@class='pa_i_rg']//img/@src").extract_first()
for index,table in enumerate(table_list):
title_list = table.xpath(".//div[@class='pa_i_le' or @class='pa_i_xl']/p/text()").extract()
price_list = table.xpath(".//div[@class='pa_i_rg' or @class='co_pu_s']/span/text()").extract()
price_image = str(price_image).split('/')[-1]
if price_image == 'icon-greenDi.png':
price_image = '气囊未起爆'
elif price_image == 'icon-radCa.png':
price_image = '气囊起爆'
if title_list[0] == '正面碰撞得分(满分30分)':
price_list.append(price_image)
_table_data = [{title:price} for title,price in zip(title_list,price_list)]
eval_table_data += _table_data
price_car = response.xpath("//div[@class='pa_bt_le']/span/text()").extract_first()
evaluation = evaluation + eval_table_data
evaluation.append({'price':price_car})
main_item['b_eval'] = evaluation
price = response.xpath("//div[@class='pa_t_bt']/div[@class='pa_bt_le']/span/text()").extract_first()
price = re.findall(r"\d+\.?\d*",price)
main_item['price'] = price
yield main_item
# //div[@class="par_p_block"]//table//div[@class='pa_i_le' or @class='pa_i_xl']/p
| caojikeai/spider | competition/competition/spiders/ciasi.py | ciasi.py | py | 6,386 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "scrapy.Spider",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "competition.items.CompetitionItem",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "scrapy.Request",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "re... |
42165658616 | #!/usr/bin/python3
#
# Script for dumping/programming SPI flash chips with Hydrabus.
# Based on HydraBus BBIO documentation: https://github.com/hydrabus/hydrafw/wiki/HydraFW-Binary-SPI-mode-guide
#
# Author: MrKalach [https://github.com/MrKalach]
# License: GPLv3 (https://choosealicense.com/licenses/gpl-3.0/)
#
import hexdump
import serial
import sys
import argparse
class Params():
serial_port = 'COM4'
serial_speed = 115200
block_size = 0x1000 # max_buffer (HydraBus limitation)
spi = 1
polarity = 0
clock_phase = 0
SPI_SPEED_TABLE = {
"SPI1" : {
"320k" : 0b01100000,
"650k" : 0b01100001,
"1.31m" : 0b01100010,
"2.62m" : 0b01100011,
"5.25m" : 0b01100100,
"10.5m" : 0b01100101,
"21m" : 0b01100110,
"42m" : 0b01100111,
},
"SPI2" : {
"160k" : 0b01100000,
"320k" : 0b01100001,
"650k" : 0b01100010,
"1.31m" : 0b01100011,
"2.62m" : 0b01100100,
"5.25m" : 0b01100101,
"10.5m" : 0b01100110,
"21m" : 0b01100111,
}
}
SPI_speed = 0b01100110
def check_result(result, expected, error_str):
assert expected in result, f'{error_str}: expected={expected} != result={result}'
class SPI_hydra:
def __init__(self, params):
self.serial = None
self.params = params
def __enter__(self):
self.serial = serial.Serial(self.params.serial_port, self.params.serial_speed)
# Switching HydraBus to binary mode
for _ in range(20):
self.serial.write(b'\x00')
check_result(b"BBIO1", self.serial.read(5), 'Could not switch into binary mode!')
self.serial.reset_input_buffer()
# Sitching to SPI mode
self.write_bytes(0b00000001)
check_result(b"SPI1", self.serial.read(4), 'Could not switch to SPI mode')
# Configuring SPI
cfg = 0b10000000 | self.params.polarity << 3 | self.params.clock_phase << 2 | self.params.spi
self.write_bytes(cfg) #polarity => 0, phase => 0, SPI1
check_result(b'\x01', self.serial.read(1),'Could not setup SPI port!')
# Setting up SPI speed
self.write_bytes(self.params.SPI_speed)
check_result(b'\x01', self.serial.read(1), 'Could not setup SPI speed!')
self.cs_off()
return self
def __exit__(self, type, value, traceback):
# Switch HydraBus back to terminal mode
self.serial.write(b'\x00')
self.serial.write(b'\x0F')
def write_bytes(self, data):
if not isinstance(data, list) and not isinstance(data, bytes):
data = [data]
self.serial.write(data)
def read(self, num):
return self.serial.read(num)
def cs_on(self):
self.write_bytes(0b00000011)
check_result(b'\x01', self.serial.read(1), 'Cannot switch CS to on!')
def cs_off(self):
self.write_bytes(0b00000010)
check_result(b'\x01', self.serial.read(1), 'Cannot switch CS to off!')
##############################################################################################################################
def num_to_bytes(num, padding):
return num.to_bytes(padding, byteorder='big')
def dump_chip_id(params):
with SPI_hydra(params) as hydra:
print("Getting chip manufacturer id ...")
# 0b00000100 - write then read (with CS pinning)
hydra.write_bytes([0b00000100, 0x00, 0x01, 0x00, 0x03])
# send RDID command
hydra.write_bytes(0x9f)
check_result(b'\x01', hydra.read(1), 'Error occuried while getting chip id!')
result = hydra.read(3)
print(f'Chip manufacturer id: {hexdump.dump(result)}')
def dump_flash(params, filename, blocks, start_addr_hex):
start_addr = int(start_addr_hex, 16)
with SPI_hydra(params) as hydra:
print('Start dumping ...')
print(f'Reading blocks: ', end='')
block = 0
buf = bytearray()
while block < blocks:
# write-then-read: \x00\x04 - input size
hydra.write_bytes(b'\x04\x00\x04' + num_to_bytes(params.block_size, 2))
# read command (\x03) and 3 bytes of address
hydra.write_bytes(b'\x03' + num_to_bytes(start_addr, 3))
check_result(b'\x01', hydra.read(1), 'Error occurred while dumping!')
buf += hydra.read(params.block_size)
if (block % 10) == 0:
print(block, end='')
else:
print('.', end='')
block += 1
start_addr += params.block_size
with open(filename, 'wb+') as f:
f.write(buf)
print('\nDone')
# This code has been proofed && worked with Macronix MX25L12845E chip
# But somekind of generic commands is used, so it should work for most of other chips
def program_flash(params, filename, start_addr_hex, erase_before_program):
MAX_ATTEMPTS = 30
def enable_write():
# Enabling write
attempts = 0
status = 0
while status & 0b11 != 0b10:
# WREN
hydra.write_bytes(b'\x04\x00\x01\x00\x00')
hydra.write_bytes(0x06)
check_result(b'\x01', hydra.read(1), 'Error occurred while sending WREN!')
# RDSR
hydra.write_bytes(b'\x04\x00\x01\x00\x01')
hydra.write_bytes(0x05)
check_result(b'\x01', hydra.read(1), 'Error occurred while sending RDSR!')
status = hydra.read(1)[0]
if status & (1 << 7):
assert 'Chip has a write protection!'
attempts += 1
if attempts > MAX_ATTEMPTS:
assert 'Cannot set WREN!'
def check_fail():
# RDSCUR to check P_FAIL and E_FAIL
hydra.write_bytes(b'\x04\x00\x01\x00\x01')
hydra.write_bytes(0x28)
check_result(b'\x01', hydra.read(1), 'Error occurred while sending RDSCUR!')
status = hydra.read(1)[0]
if status & 0x01100000 != 0: # check P_FAIL && E_FAIL
# CLSR - clear SR Fail Register
hydra.write_bytes(b'\x04\x00\x01\x00\x01')
hydra.write_bytes(0x30)
check_result(b'\x01', hydra.read(1), 'Error occurred while sending RDSCUR!')
assert f'P_FAIL or E_FAIL: register={bin(status)}'
def erase_sector_4k(addr):
enable_write()
# Erase sector/block
# SE - sector erase, 0x20 + 3 bytes of address
hydra.write_bytes(b'\x04\x00\x04\x00\x00')
hydra.write_bytes(b'\x20' + num_to_bytes(start_addr, 3)) # CE - chip erase, can be used here
check_result(b'\x01', hydra.read(1), 'Error occurred while sending SE!')
# Check status
status = 1
attempts = 0
while status & 0b1 != 0: #write in progress bit check
# RDSR
hydra.write_bytes(b'\x04\x00\x01\x00\x01')
hydra.write_bytes(0x05)
check_result(b'\x01', hydra.read(1), 'Error occurred while sending RDSR!')
status = hydra.read(1)[0]
attempts += 1
if attempts > MAX_ATTEMPTS:
assert 'Cannot read status RDSR!'
check_fail()
def write_page(page, data):
if len(data) != 0x100:
data = data + b'\x00' * (0x100 - len(data))
assert len(data) == 0x100, f'Somthing goes wrong, data size is not 0x100 => {len(data)}'
enable_write()
# Program data
# PP - page program, 0x02 + 3 bytes of address + data
# low byte of address is always 0x00
hydra.write_bytes(b'\x04\x01\x04\x00\x00')
hydra.write_bytes(b'\x02' + num_to_bytes(page, 2) + b'\x00')
hydra.write_bytes(data)
check_result(b'\x01', hydra.read(1), 'Error occurred while sending PP!')
# Check status
status = 1
attempts = 0
while status & 0b1 != 0: #write in progress bit checking
# RDSR
hydra.write_bytes(b'\x04\x00\x01\x00\x01')
hydra.write_bytes(0x05)
check_result(b'\x01', hydra.read(1), 'Error occurred while sending RDSR!')
status = hydra.read(1)[0]
attempts += 1
if attempts > MAX_ATTEMPTS:
assert 'Cannot read status RDSR!'
check_fail()
start_addr = int(start_addr_hex, 16)
assert start_addr & 0x1FF == 0, f'In program mode start address has to be aligned (0x1FF bytes boundary). start address = {start_addr}'
print('Starting programming...')
print('Blocks:', end='')
page = 0
with SPI_hydra(params) as hydra:
with open(filename, 'rb') as f:
while True:
data = f.read(0x100)
if len(data) == 0:
break
if page % 0xF == 0:
if erase_before_program:
erase_sector_4k(start_addr)
start_addr += 0x1000
block_number = int(start_addr / 0x1000)
if block_number % 10 == 0:
print(block_number, end='')
else:
print('.', end='')
write_page(page, data)
page += 1
print('\nDone.')
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'-hsp', '--hydra-serial-port',
type=str, default=Params.serial_port,
help='Hydra COM port (eg. COM4 or /dev/ttyS0)'
)
parser.add_argument(
'-hss', '--hydra-serial-speed',
type=int, default=Params.serial_speed,
help='Hydra COM port speed'
)
parser.add_argument(
'-bs', '--block-size',
type=int, default=Params.block_size,
help='Max size of data block for SPI, value should be in range (0, 4096]'
)
parser.add_argument(
'-spi', '--spi-port',
type=str, default='SPI2' if Params.spi == 0 else 'SPI1',
help='SPI port',
choices=['SPI1', 'SPI2']
)
parser.add_argument(
'-ss', '--spi-speed',
type=str, default='21m',
help=f'''
SPI speed (
SPI1: {",".join(Params.SPI_SPEED_TABLE["SPI1"].keys())}
SPI2: {",".join(Params.SPI_SPEED_TABLE["SPI2"].keys())}
)
''',
)
parser.add_argument(
'-sp', '--spi-polarity',
type=int, default=Params.polarity,
help=f'SPI polarity',
choices=[0, 1]
)
parser.add_argument(
'-scph', '--spi-clock-phase',
type=int, default=Params.clock_phase,
help=f'SPI clock phase',
choices=[0, 1]
)
subparsers = parser.add_subparsers(dest='action', required=True)
subparsers.add_parser('chip-id', help='Get manufacturer chip id')
dump_parser = subparsers.add_parser('dump', help='Dump flash into file')
dump_parser.add_argument('filename', type=str, help='File to store dump')
dump_parser.add_argument('blocks', type=int, help='Blocks to read')
dump_parser.add_argument('start_address', type=str, help='Start address (HEX)')
burn_parser = subparsers.add_parser('program', help='Program img into flash')
burn_parser.add_argument('filename', type=str, help='File to program')
burn_parser.add_argument('start_address', type=str, help='Start address (HEX)')
burn_parser.add_argument('-ebw', '--erase_before_program', action='store_true', help='Erase data before program')
try:
options = parser.parse_args()
params = Params()
params.serial_port = options.hydra_serial_port
params.serial_speed = options.hydra_serial_speed
if options.block_size < 1 or options.block_size > 0x1000:
print(f'Invalid block size {options.block_size}! Value should be in range (1, 4096]')
exit()
params.block_size = options.block_size
params.spi = {"SPI1" : 1, "SPI2" : 0}[options.spi_port]
if options.spi_speed not in Params.SPI_SPEED_TABLE[options.spi_port]:
print(f'Wrong speed({options.spi_speed}) for this({options.spi_port}) port!')
exit()
params.SPI_speed = Params.SPI_SPEED_TABLE[options.spi_port][options.spi_speed]
params.polarity = options.spi_polarity
params.clock_phase = options.spi_clock_phase
if options.action == 'chip-id':
dump_chip_id(params)
elif options.action == 'dump':
dump_flash(params, options.filename, options.blocks, options.start_address)
elif options.action == 'program':
program_flash(params, options.filename, options.start_address, options.erase_before_program)
except SystemExit:
print()
parser.print_help()
sys.exit(0)
if __name__ == '__main__':
main()
| hydrabus/hydrafw | contrib/SPI_flasher/HydraSPI.py | HydraSPI.py | py | 12,969 | python | en | code | 305 | github-code | 1 | [
{
"api_name": "serial.Serial",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "hexdump.dump",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 294,
"usage_type": "call"
},
{
"api_name": "argparse.Argumen... |
1620437423 | import spacy
import re
import pickle
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import MultinomialNB
from sklearn.neural_network import MLPClassifier
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import classification_report
from sklearn.pipeline import Pipeline
from sklearn.decomposition import TruncatedSVD
from gensim.models import Word2Vec
import numpy as np
nlp = spacy.load("en_core_web_sm")
def lematizar_documento(ruta_doc, nombre_archivo_salida):
try:
archivo_df = open(ruta_doc, 'rb')
df = pickle.load(archivo_df)
for i in range(len(df)):
tokens = nlp(df.loc[i, 'doc'])
lista_lemmas = []
for t in tokens:
lista_lemmas.append(t.lemma_)
df.loc[i, 'doc'] = " ".join(lista_lemmas)
salida = open(nombre_archivo_salida, 'wb')
pickle.dump(df, salida)
archivo_df.close()
salida.close()
except FileNotFoundError:
print("No se encontro el archivo")
def quitar_sw_documento(ruta_doc, nombre_archivo_salida):
try:
archivo_df = open(ruta_doc, 'rb')
df = pickle.load(archivo_df)
for i in range(len(df)):
tokens = nlp(df.loc[i, 'doc'])
lista_palabras = []
for t in tokens:
if not t.is_stop: # and t.pos_ != "DET" and t.pos_ != "ADP" and t.pos_ != "CONJ" and t.pos_ != "PRON":
lista_palabras.append(t.text)
texto = " ".join(lista_palabras)
texto = re.sub('\s+', ' ', texto)
texto = texto.strip()
df.loc[i, 'doc'] = texto
salida = open(nombre_archivo_salida, 'wb')
pickle.dump(df, salida)
print(df.loc[0, 'doc'])
archivo_df.close()
salida.close()
except FileNotFoundError:
print("No se encontro el archivo")
def limpia_texto(ruta_doc, nombre_archivo_salida):
try:
archivo_df = open(ruta_doc, 'rb')
df = pickle.load(archivo_df)
for i in range(len(df)):
tokens = nlp(df.loc[i, 'doc'])
lista_palabras = []
for t in tokens:
if not t.is_punct:
lista_palabras.append(t.text)
texto = " ".join(lista_palabras)
texto = re.sub('\s+', ' ', texto)
texto = texto.strip()
df.loc[i, 'doc'] = texto
salida = open(nombre_archivo_salida, 'wb')
pickle.dump(df, salida)
print(df.loc[0, 'doc'])
archivo_df.close()
salida.close()
except FileNotFoundError:
print("No se encontro el archivo")
# Normalizacion combinaciones :
# quitar_sw_documento("test.pkl", "test_nsw.pkl")
# quitar_sw_documento("train.pkl", "train_nsw.pkl")
# lematizar_documento("test.pkl", "test_lemma.pkl")
# lematizar_documento("train.pkl", "train_lemma.pkl")
# limpia_texto('test.pkl','test_limp.pkl')
# limpia_texto('train.pkl','train_limp.pkl')
# lematizar_documento("test_nsw.pkl","test_nsw_lemma.pkl")
# lematizar_documento("train_nsw.pkl","train_nsw_lemma.pkl")
# quitar_sw_documento("test_lemma.pkl","test_lemma_nsw.pkl")
# quitar_sw_documento("train_lemma.pkl","train_lemma_nsw.pkl")
# limpia_texto('test_nsw.pkl','test_nsw_limp.pkl')
# limpia_texto('train_nsw.pkl','train_nsw_limp.pkl')
# lematizar_documento("test_limp.pkl","test_limp_lemma.pkl")
# lematizar_documento("train_limp.pkl","train_limp_lemma.pkl")
# lematizar_documento("test_nsw_limp.pkl","test_nsw_limp_lemma.pkl")
# lematizar_documento("train_nsw_limp.pkl","train_nsw_limp_lemma.pkl")
def get_x_y_data(ruta_doc):
try:
archivo_df = open(ruta_doc, 'rb')
df = pickle.load(archivo_df)
data = df['doc']
target = df['label']
archivo_df.close()
return data, target
except FileNotFoundError:
print("No se encontro el archivo")
def rep_vec(data_train, data_test, vectorizer):
vectors_train = vectorizer.fit_transform(data_train)
vectors_test = vectorizer.transform(data_test)
print(vectorizer.get_feature_names_out())
print(len(vectorizer.get_feature_names_out()))
return vectors_train, vectors_test
def get_model(route_train):
X_train, y_train = get_x_y_data(route_train)
sentences = [sentence.lower().split() for sentence in X_train]
model = Word2Vec(sentences, vector_size = 300, window = 16, min_count = 5, workers = 4)
# workers = number of threads
return model
def vectorize_embedding(sentence, w2v_model):
words = sentence.split()
words_vecs = [w2v_model.wv[word] for word in words if word in w2v_model.wv]
if len(words_vecs) == 0:
return np.zeros(300)
words_vecs = np.array(words_vecs)
return words_vecs.mean(axis=0)
def classify(clf, route_train, route_test, vectorizer=None):
X_train, y_train = get_x_y_data(route_train)
X_test, y_test = get_x_y_data(route_test)
if vectorizer != None:
vectors_train, vectors_test = rep_vec(X_train, X_test, vectorizer)
clf.fit(vectors_train, y_train)
y_pred = clf.predict(vectors_test)
else:
model = get_model(route_train)
X_train = np.array([vectorize_embedding(sentence.lower(), model) for sentence in X_train])
X_test = np.array([vectorize_embedding(sentence.lower(), model) for sentence in X_test])
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
print(y_pred)
print(classification_report(y_test, y_pred))
def classify_svd(clf, route_train, route_test, vectorizer):
X_train, y_train = get_x_y_data(route_train)
X_test, y_test = get_x_y_data(route_test)
pipe = Pipeline([('text_representation', vectorizer), ('dimensionality_reduction', TruncatedSVD(300)),
('classifier', clf)])
pipe.set_params(dimensionality_reduction__n_components = 1000)
print(pipe)
pipe.fit(X_train, y_train)
y_pred = pipe.predict(X_test)
print(classification_report(y_test, y_pred))
vectorizer_bin = CountVectorizer(binary=True)
vectorizer_freq = CountVectorizer()
vectorizer_tfidf = TfidfVectorizer()
clf_NB = MultinomialNB()
clf_LR = LogisticRegression(max_iter = 300)
clf_MLPC = MLPClassifier()
clf_SGD = SGDClassifier()
# Pruebas con los clasificadores:
print()
print()
print("No Stop Words + lemmatization : tfidf , SGD")
classify(clf_SGD, 'train_nsw_lemma.pkl', 'test_nsw_lemma.pkl', vectorizer_tfidf)
print()
print("No Stop Words + clean text + lemmatization : tfidf , SGD")
classify(clf_SGD, 'train_nsw_limp_lemma.pkl', 'test_nsw_limp_lemma.pkl', vectorizer_tfidf)
print()
print()
print("No Stop Words + lemmatization : tfidf , SGD , SVD")
classify_svd(clf_SGD, 'train_nsw_lemma.pkl', 'test_nsw_lemma.pkl', vectorizer_tfidf)
print()
print("No Stop Words + clean text + lemmatization : tfidf , SGD , SVD")
classify_svd(clf_SGD, 'train_nsw_limp_lemma.pkl', 'test_nsw_limp_lemma.pkl', vectorizer_tfidf)
| EdiZ935/Clasificaci-n-de-20-noticias | clasification.py | clasification.py | py | 7,032 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "spacy.load",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "pickle.dump",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 4... |
11037745361 | """Script that starts a P300 Speller. Run with 'python p300_speller' to show config window.
Default configuration loaded at startup is stored in /config_files/default.cfg. Creates an LSL stream of type
'P300_Marker' with one channel of 'int8' sending a marker every time an image flashes.
The script consists of three main classes.
ConfigParams: Stores all config parameters that are set in the configuration GUI
MainWindow: The main configuration window, launched at startup
P300Window: All logic concerning the Image flashing window. E.g. the flash-sequence is generated here.
"""
import configparser
import glob
import os
from collections import deque
from tkinter import (
EW,
IntVar,
Radiobutton,
StringVar,
Text,
Tk,
Toplevel,
W,
filedialog,
)
from tkinter.ttk import Button, Entry, Frame, Label
import numpy as np
from PIL import Image, ImageTk
from pylsl import IRREGULAR_RATE, StreamInfo, StreamOutlet, local_clock
MAX_FLASHES = 10000 # Maximum number of flashed images. Window will stop afterwards
class ConfigParams(object):
"""Stores all parameters that can be set in the MainWindow. Acts as model in MVC pattern."""
def __init__(self):
self.config_parser = configparser.RawConfigParser()
# GUI Parameters
self.imagesize = IntVar()
self.config_file_path = StringVar()
self.images_folder_path = StringVar()
self.flash_image_path = StringVar()
self.number_of_rows = IntVar()
self.number_of_columns = IntVar()
self.lsl_streamname = StringVar()
self.flash_mode = IntVar()
self.flash_duration = IntVar()
self.break_duration = IntVar()
# Default values
self.config_file_path.set(os.path.join(os.path.dirname(__file__), "conf_files/default.cfg"))
try:
self.read_from_file()
except configparser.NoSectionError:
print("Config file {} not found".format(self.config_file_path.get()))
def save_to_file(self):
"""Saves the current configuration to self.config_file_path"""
try:
self.config_parser.add_section("Parameters")
except configparser.DuplicateSectionError:
pass
self.config_parser.set("Parameters", "Break Duration", self.break_duration.get())
self.config_parser.set("Parameters", "Flash Duration", self.flash_duration.get())
self.config_parser.set("Parameters", "Imagesize", self.imagesize.get())
self.config_parser.set("Parameters", "Flash Mode", self.flash_mode.get())
self.config_parser.set("Parameters", "LSL streamname", self.lsl_streamname.get())
self.config_parser.set("Parameters", "Number of Rows", self.number_of_rows.get())
self.config_parser.set("Parameters", "Number of Columns", self.number_of_columns.get())
self.config_parser.set("Parameters", "Flash image path", self.flash_image_path.get())
self.config_parser.set("Parameters", "Images Folder Path", self.images_folder_path.get())
# Writing our configuration file to 'example.cfg'
with open(self.config_file_path.get(), "w") as configfile:
self.config_parser.write(configfile)
def read_from_file(self):
"""Loads all configuration parameters from self.config_file_path"""
self.config_parser.read(self.config_file_path.get())
images_folder_fallback = os.path.join(os.path.dirname(__file__), "number_images")
flash_image_fallback = os.path.join(os.path.dirname(__file__), "flash_images", "einstein.jpg")
self.imagesize.set(self.config_parser.getint("Parameters", "Imagesize"))
self.images_folder_path.set(
self.config_parser.get("Parameters", "Images Folder Path", fallback=images_folder_fallback)
)
self.flash_image_path.set(
self.config_parser.get("Parameters", "Flash image path", fallback=flash_image_fallback)
)
self.number_of_rows.set(self.config_parser.getint("Parameters", "Number of Rows"))
self.number_of_columns.set(self.config_parser.getint("Parameters", "Number of Columns"))
self.lsl_streamname.set(self.config_parser.get("Parameters", "LSL Streamname"))
self.flash_mode.set(self.config_parser.getint("Parameters", "Flash Mode"))
self.flash_duration.set(self.config_parser.getint("Parameters", "Flash duration"))
self.break_duration.set(self.config_parser.getint("Parameters", "Break duration"))
class MainWindow(object):
"""Handles all the configuration and starts flashing window
Args:
master: Tkinter root window
"""
def __init__(self, master: Tk):
self.master = master
master.title("P300 speller configuration")
self.p300_window = None
# Variables
self.usable_images = []
self.image_labels = []
self.flash_sequence = []
self.flash_image = None
self.sequence_number = 0
self.lsl_output = None
self.config = ConfigParams()
# Widget definition
self.changeable_widgets = []
self.config_file_label = Label(self.master, text="Config File:")
self.config_file_label.grid(row=0, column=0)
self.config_file_entry = Entry(self.master, textvariable=self.config.config_file_path)
self.config_file_entry.grid(row=0, column=1, sticky=EW)
self.changeable_widgets.append(self.config_file_entry)
self.open_conf_btn = Button(
self.master,
text="Open config file",
command=lambda: self.open_file_update_entry(self.config.config_file_path),
)
self.open_conf_btn.grid(row=0, column=2, sticky=EW)
self.changeable_widgets.append(self.open_conf_btn)
self.use_conf_btn = Button(self.master, text="Apply", command=self.config.read_from_file)
self.use_conf_btn.grid(row=0, column=3)
self.changeable_widgets.append(self.use_conf_btn)
self.save_settings_btn = Button(self.master, text="Save", command=self.config.save_to_file)
self.save_settings_btn.grid(row=0, column=4)
self.changeable_widgets.append(self.save_settings_btn)
self.images_folder_label = Label(self.master, text="Images folder:")
self.images_folder_label.grid(row=1, column=0)
self.images_folder_entry = Entry(self.master, textvariable=self.config.images_folder_path)
self.images_folder_entry.grid(row=1, column=1, sticky=EW)
self.changeable_widgets.append(self.images_folder_entry)
self.open_images_dir_btn = Button(
self.master,
text="Open image folder",
command=lambda: self.open_folder_update_entry(self.config.images_folder_path),
)
self.open_images_dir_btn.grid(row=1, column=2, sticky=EW)
self.changeable_widgets.append(self.open_images_dir_btn)
self.flash_image_label = Label(self.master, text="Flash image:")
self.flash_image_label.grid(row=2, column=0)
self.flash_image_file_entry = Entry(self.master, textvariable=self.config.flash_image_path)
self.flash_image_file_entry.grid(row=2, column=1, sticky=EW)
self.changeable_widgets.append(self.flash_image_file_entry)
self.open_flash_dir_btn = Button(
self.master, text="Open image", command=lambda: self.open_file_update_entry(self.config.flash_image_path)
)
self.open_flash_dir_btn.grid(row=2, column=2, sticky=EW)
self.changeable_widgets.append(self.open_flash_dir_btn)
self.imagesize_label = Label(self.master, text="Imagesize (px):")
self.imagesize_label.grid(row=3, column=0)
self.imagesize_entry = Entry(self.master, textvariable=self.config.imagesize)
self.imagesize_entry.grid(row=3, column=1, sticky=W)
self.changeable_widgets.append(self.imagesize_entry)
self.number_of_rows_label = Label(self.master, text="Number of rows:")
self.number_of_rows_label.grid(row=4, column=0)
self.number_of_rows_entry = Entry(self.master, textvariable=self.config.number_of_rows)
self.number_of_rows_entry.grid(row=4, column=1, sticky=W)
self.changeable_widgets.append(self.number_of_rows_entry)
self.number_of_columns_label = Label(self.master, text="Number of columns:")
self.number_of_columns_label.grid(row=5, column=0)
self.number_of_columns_entry = Entry(self.master, textvariable=self.config.number_of_columns)
self.number_of_columns_entry.grid(row=5, column=1, sticky=W)
self.changeable_widgets.append(self.number_of_columns_entry)
self.lsl_streamname_label = Label(self.master, text="LSL Streamname:")
self.lsl_streamname_label.grid(row=6, column=0)
self.lsl_streamname_entry = Entry(self.master, textvariable=self.config.lsl_streamname)
self.lsl_streamname_entry.grid(row=6, column=1, sticky=W)
self.changeable_widgets.append(self.lsl_streamname_entry)
self.flash_duration_label = Label(self.master, text="Flash duration (ms):")
self.flash_duration_label.grid(row=7, column=0)
self.flash_duration_entry = Entry(self.master, textvariable=self.config.flash_duration)
self.flash_duration_entry.grid(row=7, column=1, sticky=W)
self.changeable_widgets.append(self.flash_duration_entry)
self.break_duration_label = Label(self.master, text="Break duration (ms):")
self.break_duration_label.grid(row=8, column=0)
self.break_duration_entry = Entry(self.master, textvariable=self.config.break_duration)
self.break_duration_entry.grid(row=8, column=1, sticky=W)
self.changeable_widgets.append(self.break_duration_entry)
self.flash_mode_label = Label(self.master, text="Flashmode:")
self.flash_mode_label.grid(row=9, column=0)
self.flash_mode_1_rb = Radiobutton(
self.master,
text="Rows and Columns (Sequence not pseudorandom yet!)",
variable=self.config.flash_mode,
value=1,
)
self.flash_mode_1_rb.grid(row=9, column=1, sticky=W)
self.changeable_widgets.append(self.flash_mode_1_rb)
self.flash_mode_2_rb = Radiobutton(self.master, text="Single images", variable=self.config.flash_mode, value=2)
self.flash_mode_2_rb.grid(row=10, column=1, sticky=W)
self.changeable_widgets.append(self.flash_mode_2_rb)
self.set_flash_mode_rbs()
self.text_console = Text(self.master)
self.text_console.grid(row=11, column=0, rowspan=4, columnspan=5)
self.text_console.configure(state="disabled")
self.close_button = Button(self.master, text="Close", command=master.quit)
self.close_button.grid(row=15, column=0)
self.open_button = Button(self.master, text="Open", command=self.open_p300_window)
self.open_button.grid(row=15, column=3)
self.changeable_widgets.append(self.open_button)
def set_flash_mode_rbs(self):
if self.config.flash_mode.get() == 1:
self.flash_mode_1_rb.select()
else:
self.flash_mode_2_rb.select()
def open_folder_update_entry(self, entry_var):
new_path = filedialog.askdirectory()
if new_path != "":
entry_var.set(new_path)
def open_file_update_entry(self, entry_var):
new_path = filedialog.askopenfilename()
if new_path != "":
entry_var.set(new_path)
def print_to_console(self, text_to_print):
if not isinstance(text_to_print, str):
text_to_print = str(text_to_print)
self.text_console.configure(state="normal")
self.text_console.insert("end", text_to_print + "\n")
self.text_console.configure(state="disabled")
def disable_all_widgets(self):
for widget in self.changeable_widgets:
widget.configure(state="disabled")
self.master.iconify()
def enable_all_widgets(self):
for widget in self.changeable_widgets:
widget.configure(state="normal")
self.master.deiconify()
def open_p300_window(self):
p300_window_master = Toplevel(self.master)
self.p300_window = P300Window(p300_window_master, self, self.config)
self.disable_all_widgets()
class P300Window(object):
"""All logic for the image flashing window.
Args:
master: Tkinter Toplevel element
parent: Parent that opened the window
config: ConfigParams instance
"""
def __init__(self, master: Toplevel, parent: MainWindow, config: ConfigParams):
self.master = master
self.parent = parent
self.master.protocol("WM_DELETE_WINDOW", self.close_window)
self.config = config
self.running = 0
self.image_labels = []
self.sequence_number = 0
self.lsl_output = None
self.usable_images = []
self.flash_sequence = []
self.image_frame = Frame(self.master)
self.image_frame.grid(
row=0, column=0, rowspan=self.config.number_of_rows.get(), columnspan=self.config.number_of_columns.get()
)
self.start_btn_text = StringVar()
self.start_btn_text.set("Start")
self.start_btn = Button(self.master, textvariable=self.start_btn_text, command=self.start)
self.start_btn.grid(row=self.config.number_of_rows.get() + 1, column=self.config.number_of_columns.get() - 1)
self.pause_btn = Button(self.master, text="Pause", command=self.pause)
self.pause_btn.grid(row=self.config.number_of_rows.get() + 1, column=self.config.number_of_columns.get() - 2)
self.pause_btn.configure(state="disabled")
self.close_btn = Button(self.master, text="Close", command=self.close_window)
self.close_btn.grid(row=self.config.number_of_rows.get() + 1, column=0)
# Initialization
self.show_images()
self.create_flash_sequence()
self.lsl_output = self.create_lsl_output()
def open_images(self):
self.usable_images = []
image_paths = glob.glob(os.path.join(self.config.images_folder_path.get(), "*.jpg"))
png_images = glob.glob(os.path.join(self.config.images_folder_path.get(), "*.png"))
for png_image in png_images:
image_paths.append(png_image)
min_number_of_images = self.config.number_of_columns.get() * self.config.number_of_rows.get()
if len(image_paths) < min_number_of_images:
self.parent.print_to_console("To few images in folder: " + self.config.images_folder_path.get())
return
# Convert and resize images
for image_path in image_paths:
image = Image.open(image_path)
resized = image.resize((self.config.imagesize.get(), self.config.imagesize.get()), Image.ANTIALIAS)
Tkimage = ImageTk.PhotoImage(resized)
self.usable_images.append(Tkimage)
flash_img = Image.open(self.config.flash_image_path.get())
flash_img_res = flash_img.resize((self.config.imagesize.get(), self.config.imagesize.get()), Image.ANTIALIAS)
self.flash_image = ImageTk.PhotoImage(flash_img_res)
def show_images(self):
self.open_images()
if self.usable_images == []:
self.parent.print_to_console("No images opened")
return
num_rows = self.config.number_of_rows.get()
num_cols = self.config.number_of_columns.get()
# Arrange images
for r in range(0, num_rows):
for c in range(0, num_cols):
current_image = self.usable_images[r * num_cols + c]
label = Label(self.image_frame, image=current_image)
label.image = current_image
label.grid(row=r, column=c)
self.image_labels.append(label)
def create_lsl_output(self):
"""Creates an LSL Stream outlet"""
info = StreamInfo(
name=self.config.lsl_streamname.get(),
type="P300_Marker",
channel_count=1,
channel_format="int8",
nominal_srate=IRREGULAR_RATE,
source_id="marker_stream",
handle=None,
)
if self.config.flash_mode == 1:
info.desc().append_child_value("flash_mode", "Row and Column")
elif self.config.flash_mode == 2:
info.desc().append_child_value("flash_mode", "Single Value")
info.desc().append_child_value("num_rows", str(self.config.number_of_rows.get()))
info.desc().append_child_value("num_cols", str(self.config.number_of_columns.get()))
return StreamOutlet(info)
def create_flash_sequence(self):
self.flash_sequence = []
num_rows = self.config.number_of_rows.get()
num_cols = self.config.number_of_columns.get()
sequence_length = 700 # ms
distance_between_similar_elements = int((sequence_length / self.config.break_duration.get()) + 1)
if self.config.flash_mode.get() == 1:
self.parent.print_to_console("CAUTION: Row and Column flash mode currently uses only random samples!")
self.flash_sequence = np.random.randint(0, num_rows + num_cols, 3000)
elif self.config.flash_mode.get() == 2:
flash_sequence = []
maximum_number = num_rows * num_cols
if maximum_number * 0.7 < distance_between_similar_elements:
self.parent.print_to_console("No sequence could be created because the break duration is too short")
return
number_buffer = deque(maxlen=distance_between_similar_elements)
for _ in range(0, MAX_FLASHES):
while True:
new_number = np.random.randint(0, maximum_number, 1)
if bool(number_buffer.count(new_number[0])) is False:
number_buffer.append(new_number[0])
flash_sequence.append(new_number[0])
break
self.flash_sequence = flash_sequence
def start(self):
self.running = 1
self.start_flashing()
self.start_btn.configure(state="disabled")
self.pause_btn.configure(state="normal")
def pause(self):
self.running = 0
self.start_btn_text.set("Resume")
self.start_btn.configure(state="normal")
self.pause_btn.configure(state="disabled")
def start_flashing(self):
if self.sequence_number == len(self.flash_sequence):
self.parent.print_to_console("All elements flashed")
self.running = 0
self.sequence_number = 0
return
if self.running == 0:
self.parent.print_to_console("Flashing paused at sequence number " + str(self.sequence_number))
return
element_to_flash = self.flash_sequence[self.sequence_number]
self.sequence_number = self.sequence_number + 1
timestamp = local_clock()
print([element_to_flash + 1], timestamp)
self.lsl_output.push_sample([element_to_flash + 1], timestamp) # add 1 to prevent 0 in markers
if self.config.flash_mode.get() == 1:
self.flash_row_or_col(element_to_flash)
elif self.config.flash_mode.get() == 2:
self.flash_single_element(element_to_flash)
self.master.after(self.config.break_duration.get(), self.start_flashing)
def change_image(self, label, img):
label.configure(image=img)
label.image = img
def flash_row_or_col(self, rc_number):
num_rows = self.config.number_of_rows.get()
num_cols = self.config.number_of_columns.get()
if rc_number < num_rows:
for c in range(0, num_cols):
cur_idx = rc_number * num_cols + c
self.change_image(self.image_labels[cur_idx], self.flash_image)
else:
current_column = rc_number - num_rows
for r in range(0, num_rows):
cur_idx = current_column + r * num_cols
self.change_image(self.image_labels[cur_idx], self.flash_image)
self.master.after(self.config.flash_duration.get(), self.unflash_row_or_col, rc_number)
def unflash_row_or_col(self, rc_number):
num_rows = self.config.number_of_rows.get()
num_cols = self.config.number_of_columns.get()
if rc_number < num_rows:
for c in range(0, num_cols):
cur_idx = rc_number * num_cols + c
self.change_image(self.image_labels[cur_idx], self.usable_images[cur_idx])
else:
current_column = rc_number - num_rows
for r in range(0, num_rows):
cur_idx = current_column + r * num_cols
self.change_image(self.image_labels[cur_idx], self.usable_images[cur_idx])
def flash_single_element(self, element_no):
self.change_image(self.image_labels[element_no], self.flash_image)
self.master.after(self.config.flash_duration.get(), self.unflash_single_element, element_no)
def unflash_single_element(self, element_no):
self.change_image(self.image_labels[element_no], self.usable_images[element_no])
def close_window(self):
self.parent.enable_all_widgets()
self.master.destroy()
def main():
from tkinter import Tk
root = Tk()
MainWindow(root)
root.mainloop()
| bstadlbauer/lsl-p300-speller | src/bstadlbauer/p300speller/p300_speller.py | p300_speller.py | py | 21,451 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "configparser.RawConfigParser",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "tkinter.IntVar",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "tkinter.StringVar",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "tkinter... |
25160213168 | ## objectives ##
# objective - build dashboard using 'OldFaithful.csv'
# display as a scatterplot
# D = data of recordings in month (August)
# X duration of current eruptions in minutes
# Y waiting time until the next eruption
import dash
import dash_html_components as html
import dash_core_components as dcc
import plotly.graph_objs as go
import numpy as np
import pandas as pd
df = pd.read_csv('C:/Users/eugen/OneDrive/Main_Env/udemy_dash_course/Data/OldFaithful.csv')
colors = {'background':'#111111', 'text':'#7fdbff'}
app = dash.Dash()
app.layout = html.Div(children=
[html.H1('Hello Dash!',
style={'textAlign': 'center', 'color': colors['text']}),
dcc.Graph(id='example',
figure={'data': [go.Scatter(x=df['X'], y=df['Y']/60, mode='markers',
marker=dict(size=df['X'], color=df['Y'],
showscale=True,colorbar=dict(title='Time to Next Eruption')))],
'layout':go.Layout(title='Old Faithful Eruptions',
xaxis={'title': 'Duration of Eruption'},
yaxis={'title': 'Interval'})})])
if __name__ == '__main__':
app.run_server() | eugeniosp3/udemy_plotly_course | simple_dash exercise.py | simple_dash exercise.py | py | 1,476 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pandas.read_csv",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "dash.Dash",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Div",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "dash_html_componen... |
8999728817 | from sklearn.base import BaseEstimator, TransformerMixin
import pandas as pd
from data_processing.helpers.check_if_nans_exist import check_if_nans_exist
from data_processing.helpers.columns_that_contain_lists import columns_that_contain_lists
class RemoveNan(BaseEstimator, TransformerMixin):
def __init__(self, keep_nans_in_columns, suppress_output):
self.keep_nans_in_columns = keep_nans_in_columns
self.suppress_output = suppress_output
def fit(self, dataset: pd.DataFrame, y=None):
return self
def transform(self, dataset: pd.DataFrame, y=None):
""" This function removes all instances that have a NaN or None in all attributes that are not listed in the keep_nans_in_columns variable """
print('Removing nans...')
nans_exist = check_if_nans_exist(dataset, self.suppress_output)
if nans_exist:
all_columns = dataset.columns
columns_with_lists = columns_that_contain_lists(dataset)
columns_to_remove_nans_from = list(set(all_columns) - set(self.keep_nans_in_columns))
# drop items with nan values
dataset = dataset.dropna(subset=columns_to_remove_nans_from)
# drop items with empty lists
for column_with_lists in columns_with_lists:
if column_with_lists in columns_to_remove_nans_from:
# drop empty items (empty e.g. length of list is zero)
dataset = dataset[dataset[column_with_lists].map(lambda d: len(d)) > 0]
return dataset | arctic-source/game_recommendation | data_processing/transformers/RemoveNan.py | RemoveNan.py | py | 1,550 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sklearn.base.BaseEstimator",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "sklearn.base.TransformerMixin",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_... |
1704389554 | from LoadData import *
import statsmodels.api as sm
import pandas as pd
import numpy as np
from diagnosticTests import *
data = loadAll() #just returns the database
data['GDP_lag1'] = data['GDP_perCap'].shift(1) #lag column for GDP variable
data['CO2_lag1'] = data['Annual_CO2_emissions_TperCap'].shift(1) #lag column for co2 variable
y=data[['Annual_CO2_emissions_TperCap']] #dependent variable
x=data[['GDP_perCap', 'GDP_lag1', 'CO2_lag1']] #regressors
x['dummy1989']=0 #dummy column
x['dummy2003']=0 #dummy columns
x.loc['1989', 'dummy1989']=1 #setting 1 for specific years....
x.loc['2003', 'dummy2003']=1
#x['GDP_lag1']=x['GDP_lag1'].fillna(0) #fixing na values
#x['CO2_lag1']=x['CO2_lag1'].fillna(0)
x=sm.add_constant(x)
reg_ols = sm.OLS(y, x.astype(float), missing='drop').fit(cov_type='HC3')
#print(reg_ols.summary())
cusumTest(reg_ols.resid)
#normalityTest(reg_ols)
#LMtestAutocorrelation(reg_ols)
#ramseyResetTest(reg_ols)
#whiteTest(reg_ols)
| SalFin99/CO2_GPD | OLSmodels/LinearOLS_fixed.py | LinearOLS_fixed.py | py | 964 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "statsmodels.api.add_constant",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "statsmodels.api",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "statsmodels.api.OLS",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "stat... |
5169905792 | from PIL import Image
from django.shortcuts import render
import os
def papaya_find_ripe_raw():
path = 'webs/color/papaya/'
directory= os.listdir(path)
if len(directory) == 2:
img = Image.open('webs/color/papaya/raw_papaya.png')
black = 0
papaya_raw = 0
for pixel in img.getdata():
if pixel == (0, 0, 0): # if your image is RGB (if RGBA, (0, 0, 0, 255) or so
black += 1
else:
papaya_raw += 1
#print('yellow='+str(yellow_raw))
img = Image.open('webs/color/papaya/ripe_papaya.png')
black = 0
papaya_ripe = 0
for pixel in img.getdata():
if pixel == (0, 0, 0): # if your image is RGB (if RGBA, (0, 0, 0, 255) or so
black += 1
else:
papaya_ripe += 1
#print('yellow='+str(yellow_ripe))
if (papaya_raw > papaya_ripe):
data = 'papaya is raw'
elif(papaya_raw < papaya_ripe):
data = 'papaya is ripe'
elif (papaya_raw == papaya_ripe) :
data = 'Not image in range'
else:
data = 'Not image in range'
return(data)
| LoneWolf1999-Th/Fruit_Detect | webs/controller/papaya_ripe_or_raw.py | papaya_ripe_or_raw.py | py | 1,228 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.listdir",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "PIL.Image.open",
"line_number": ... |
8337571690 | import time
import os
import hashlib
from absl import app, flags, logging
from absl.flags import FLAGS
import tensorflow as tf
import lxml.etree
import tqdm
flags.DEFINE_string('data_dir', './my_data/voc2020_raw/VOCdevkit/VOC2020/',
'path to raw PASCAL VOC dataset')
flags.DEFINE_enum('split', 'train', [
'train', 'val'], 'specify train or val spit')
flags.DEFINE_string('output_file', './my_data/voc2020_train.tfrecord', 'output dataset')
flags.DEFINE_string('classes', './my_data/classes.names', 'classes file')
flags.DEFINE_string('emphasized_class', 'smoke', 'Select in which class to emphasize the dataset. \nOne of \'smoke\', \'fire\', \'all\' ')
flags.DEFINE_string('excluded_name', 'captures', 'define a name that it`s included in _train|_val .txt file in order to exclude it from dataset.')
def build_example(annotation, class_map):
img_path = os.path.join(
FLAGS.data_dir, 'JPEGImages', annotation['filename'])
with open(img_path, 'rb') as img:
img_raw = img.read()
key = hashlib.sha256(img_raw).hexdigest()
width = int(annotation['size']['width'])
height = int(annotation['size']['height'])
xmin = []
ymin = []
xmax = []
ymax = []
classes = []
classes_text = []
truncated = []
views = []
difficult_obj = []
if 'object' in annotation:
for obj in annotation['object']:
if FLAGS.excluded_name in annotation['filename']:
continue
difficult = bool(int(obj['difficult']))
difficult_obj.append(int(difficult))
xmin.append(float(obj['bndbox']['xmin']) / width)
ymin.append(float(obj['bndbox']['ymin']) / height)
xmax.append(float(obj['bndbox']['xmax']) / width)
ymax.append(float(obj['bndbox']['ymax']) / height)
classes_text.append(obj['name'].encode('utf8'))
classes.append(class_map[obj['name']])
truncated.append(int(obj['truncated']))
views.append(obj['pose'].encode('utf8'))
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': tf.train.Feature(int64_list=tf.train.Int64List(value=[height])),
'image/width': tf.train.Feature(int64_list=tf.train.Int64List(value=[width])),
'image/filename': tf.train.Feature(bytes_list=tf.train.BytesList(value=[
annotation['filename'].encode('utf8')])),
'image/source_id': tf.train.Feature(bytes_list=tf.train.BytesList(value=[
annotation['filename'].encode('utf8')])),
'image/key/sha256': tf.train.Feature(bytes_list=tf.train.BytesList(value=[key.encode('utf8')])),
'image/encoded': tf.train.Feature(bytes_list=tf.train.BytesList(value=[img_raw])),
'image/format': tf.train.Feature(bytes_list=tf.train.BytesList(value=['jpeg'.encode('utf8')])),
'image/object/bbox/xmin': tf.train.Feature(float_list=tf.train.FloatList(value=xmin)),
'image/object/bbox/xmax': tf.train.Feature(float_list=tf.train.FloatList(value=xmax)),
'image/object/bbox/ymin': tf.train.Feature(float_list=tf.train.FloatList(value=ymin)),
'image/object/bbox/ymax': tf.train.Feature(float_list=tf.train.FloatList(value=ymax)),
'image/object/class/text': tf.train.Feature(bytes_list=tf.train.BytesList(value=classes_text)),
'image/object/class/label': tf.train.Feature(int64_list=tf.train.Int64List(value=classes)),
'image/object/difficult': tf.train.Feature(int64_list=tf.train.Int64List(value=difficult_obj)),
'image/object/truncated': tf.train.Feature(int64_list=tf.train.Int64List(value=truncated)),
'image/object/view': tf.train.Feature(bytes_list=tf.train.BytesList(value=views)),
}))
return example
def parse_xml(xml):
if not len(xml):
return {xml.tag: xml.text}
result = {}
for child in xml:
child_result = parse_xml(child)
if child.tag != 'object':
result[child.tag] = child_result[child.tag]
else:
if child.tag not in result:
result[child.tag] = []
result[child.tag].append(child_result[child.tag])
return {xml.tag: result}
def main(_argv):
with open(FLAGS.classes) as f:
class_map = {name: idx for idx, name in enumerate(
f.read().splitlines())}
logging.info("Class mapping loaded: {}".format(class_map))
writer = tf.io.TFRecordWriter(FLAGS.output_file)
with open(os.path.join(FLAGS.data_dir, 'ImageSets', 'Main', '%s_%s.txt' % (FLAGS.emphasized_class, FLAGS.split))) as f:
image_list = f.read().splitlines()
logging.info("Image list loaded: %d", len(image_list))
for image in tqdm.tqdm(image_list):
name, _ = image.split()
annotation_xml = os.path.join(
FLAGS.data_dir, 'Annotations', name + '.xml')
with open(annotation_xml) as f:
annotation_xml = lxml.etree.fromstring(f.read())
annotation = parse_xml(annotation_xml)['annotation']
tf_example = build_example(annotation, class_map)
writer.write(tf_example.SerializeToString())
writer.close()
logging.info("Done")
if __name__ == '__main__':
app.run(main)
| christos-vasileiou/yolov3tiny-edgetpu | tools/fire_smoke_tfrecord.py | fire_smoke_tfrecord.py | py | 5,359 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "absl.flags.DEFINE_string",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "absl.flags",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "absl.flags.DEFINE_enum",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "absl.flags... |
38782305735 | import logging
from datetime import datetime, timedelta
import random
import pytest
import pytz
from show_my_solutions.dbmanager import Submission
OJS = ['POJ', 'LeetCode', 'Codeforces', 'TopCoder', 'HackerRank', 'ACM']
MAX_ROW = 100
LOGGER = logging.getLogger(__name__)
def setup_module(module):
from show_my_solutions.dbmanager import start_database
start_database(name='sms_test.db', echo=False)
def gen_sub(oj, problem_id, seconddelta=None):
seconddelta = random.randint(-1e6, 1e6) if seconddelta is None else seconddelta
return Submission(oj,
problem_id,
'Test {}'.format(random.randint(0, MAX_ROW)),
'{}.com/test_{}.html'.format(oj.lower(), problem_id),
datetime.now(tz=pytz.utc) + timedelta(seconds=seconddelta))
def gen_rand_subs(n=MAX_ROW, sort=False):
data = [gen_sub(random.choice(OJS), str(i)) for i in range(n)]
if sort:
data.sort(key=lambda x: x.submit_time)
return data
def refill_submissions(data=None):
from show_my_solutions.dbmanager import record_submissions, _reset_tables
_reset_tables()
if data is None:
data = gen_rand_subs()
record_submissions(data)
return data
def test_record():
from show_my_solutions.dbmanager import (fetch_submissions, record_submissions)
def compare(data):
result = fetch_submissions()
data = {(d.oj, d.problem_id): d.clone() for d in data}
data = sorted(data.values(), key=lambda x: x.submit_time)
for i, d in enumerate(data):
d.pid = i + 1
d.oj = d.oj.lower()
assert data == result
data = refill_submissions()
compare(data)
# Test duplicated (oj, problem_id)
new_sub = gen_sub(OJS[0], '-1')
data.extend([new_sub, new_sub])
refill_submissions(data)
compare(data)
# Record empty list
refill_submissions([])
compare([])
def test_milestone():
from sqlalchemy.exc import IntegrityError
from show_my_solutions.dbmanager import fetch_submissions, add_milestone
data = [gen_sub(OJS[0], str(i), i) for i in range(MAX_ROW)]
refill_submissions(data)
ms = MAX_ROW / 2
add_milestone('rand_tester', ms)
result = fetch_submissions('rand_tester')
pids = [x.pid for x in result]
assert min(pids) == ms + 1
assert len(set(pids)) == MAX_ROW - ms
assert max(pids) == MAX_ROW
# Out-of-bound milestone should not take effect because of forigen key
with pytest.raises(IntegrityError):
add_milestone('rand_tester', MAX_ROW + 1)
add_milestone('rand_tester', MAX_ROW)
assert fetch_submissions('rand_tester') == []
def test_latest_problem_id():
from show_my_solutions.dbmanager import (record_submissions, get_lastest_problem_id)
refill_submissions([])
for oj in OJS:
assert get_lastest_problem_id(oj) is None
latest = {}
data = []
for i in range(MAX_ROW):
oj = random.choice(OJS)
problem_id = str(random.randint(0, 1e5))
latest[oj] = problem_id
data.append(gen_sub(oj, problem_id, i))
record_submissions(data)
LOGGER.debug('\n'.format(map(str, data)))
for oj in OJS:
assert get_lastest_problem_id(oj) == latest.get(oj, None)
@pytest.fixture
def reactor():
from show_my_solutions.app import get_config, Reactor
return Reactor(get_config())
def test_trello_handler(reactor):
from show_my_solutions.handlers import build_handler
handler = build_handler('trello', reactor)
handler.upload(gen_rand_subs(10, True))
def test_leetcode_scraper(reactor):
from show_my_solutions.scrapers import build_scraper
from bs4 import BeautifulSoup
# _reset_tables()
lcs = build_scraper('leetcode', reactor)
lcs.fetch()
| yehzhang/Show-My-Solutions | tests.py | tests.py | py | 3,816 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "show_my_solutions.dbmanager.start_database",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 21,
"usage_type": "call"
},
{
"api_n... |
19707782223 | from setuptools import setup, find_packages
import os
with open(os.path.join(os.path.dirname(__file__), 'README.md')) as README:
DESCRIPTION = README.read()
with open('requirements.txt') as f:
required = f.read().splitlines()
setup(
name='OpenReader',
version='0.1',
packages = find_packages(),
include_package_data = True,
author = "Not Google",
license='LICENSE.md',
long_description=DESCRIPTION,
description="Reading RSS feeds like it's 2012",
url="https://github.com/craigsonoffergus/OpenReader",
install_requires=required,
) | craigsonoffergus/OpenReader | setup.py | setup.py | py | 580 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "os.path.join",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "setuptools.setup",
"line_n... |
34468081224 | import pyarrow.compute as pc
import pyarrow as pa
import numpy as np
from typing import Iterator, Tuple, List
from ..model.types import (
SubjectSymbol,
SourceSymbol,
ExchangeSymbol,
InstrumentSymbol,
MarketSymbol,
Timestamp,
FileId,
)
DAY_NANOSECONDS = 24 * 60 * 60 * 10 ** 9
PARTITION_COLUMNS = ["subject", "source", "exchange", "instrument"]
def _hash_array(array: pa.ChunkedArray) -> Tuple[int, pa.ChunkedArray]:
if isinstance(array.type, pa.DictionaryType):
encoded = array
else:
encoded = array.dictionary_encode()
return (
len(encoded.chunk(0).dictionary),
pa.chunked_array(
[chunk.indices.cast(pa.int64()) for chunk in encoded.iterchunks()]
),
)
def _hash_arrays(arrays: List[pa.ChunkedArray]) -> Tuple[int, pa.ChunkedArray]:
if len(arrays) == 1:
return _hash_array(arrays[0])
head_max, head_hashes = _hash_array(arrays[0])
tail_max, tail_hashes = _hash_arrays(arrays[1:])
if head_max == 1:
return (tail_max, tail_hashes)
return (
head_max * tail_max,
pc.add(pc.multiply(head_hashes, tail_max), tail_hashes),
)
def _run_length_encoding(arrays: List[pa.ChunkedArray]) -> Iterator[Tuple[int, int]]:
size, hashes = _hash_arrays(arrays)
# Inspired by https://gist.github.com/nvictus/66627b580c13068589957d6ab0919e66
where = np.flatnonzero
starts = np.r_[0, where(np.diff(hashes.to_numpy())) + 1]
stops = np.r_[starts[1:], len(hashes)]
return zip(starts.tolist(), stops.tolist())
def _fileid_from_chunk(chunk: pa.Table) -> FileId:
timestamp = Timestamp(chunk["time"][0].as_py())
subject = SubjectSymbol(chunk["subject"][0].as_py())
source = SourceSymbol(chunk["source"][0].as_py())
exchange = ExchangeSymbol(chunk["exchange"][0].as_py())
instrument = InstrumentSymbol.from_symbol(chunk["instrument"][0].as_py())
market = MarketSymbol(exchange, instrument)
file = FileId(subject, source, market, timestamp)
return file
def partition_records(records: pa.Table) -> Iterator[Tuple[FileId, pa.Table]]:
time = records["time"].cast(pa.int64())
assert records["time"].type.tz == "UTC", "time column must be in UTC"
assert np.all(np.diff(time.to_numpy()) >= 0), "time column must be monotonic"
date = pc.divide(time, DAY_NANOSECONDS)
partition_columns = [records[name] for name in PARTITION_COLUMNS] + [date]
for start, stop in _run_length_encoding(partition_columns):
chunk = records[start:stop]
yield _fileid_from_chunk(chunk), chunk.drop(PARTITION_COLUMNS)
| walling/trading | lib/datatool/write/partitioning.py | partitioning.py | py | 2,619 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "pyarrow.ChunkedArray",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "pyarrow.DictionaryType",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "pyarrow.chunked_array",
"line_number": 26,
"usage_type": "call"
},
{
"api_n... |
3215553627 | from django.db import models
from django.contrib.auth import get_user_model
from django.utils import timezone
from datetime import datetime
from django.utils.translation import gettext_lazy as _
from service.models import Skill
User = get_user_model()
class Expert(models.Model):
user = models.OneToOneField(User, verbose_name='کاربر', on_delete=models.CASCADE)
national_code = models.CharField(_('کد ملی'), max_length=10)
father_name = models.CharField(_('نام پدر'), max_length=26)
expert_province = models.CharField(_('استان'), max_length=100)
expert_city = models.CharField(_('شهر'), max_length=100)
MALE = 1
FEMALE = 2
GENDER_CHOICE = [
(MALE, 'MALE'),
(FEMALE, 'FEMALE')
]
gender = models.PositiveSmallIntegerField(_('جنسیت'), choices=GENDER_CHOICE)
WOMEN = 1
DONE = 2
EXEMPT = 3
SUBJECT = 4
MILITARY_SERVICE_CHOICES = [
(WOMEN, 'WOMEN'),
(DONE, 'DONE'),
(EXEMPT, 'EXEMPT'),
(SUBJECT, 'SUBJECT')
]
military_service = models.PositiveSmallIntegerField(
_('سربازی'), choices=MILITARY_SERVICE_CHOICES
)
SINGLE = 1
MARRIED = 2
MARRIED_CHOICES = [
(SINGLE, 'SINGLE'),
(MARRIED, 'MARRIED')
]
married_status = models.PositiveSmallIntegerField(_('وضعیت تاهل'), choices=MARRIED_CHOICES)
is_active_expert_expire_time = models.DateTimeField(_('تاریخ غیرفعال شدن اکانت'), default=timezone.now)
expert_lat = models.DecimalField(_('expert lat'), max_digits=9, decimal_places=6)
expert_long = models.DecimalField(_('expert long'), max_digits=9, decimal_places=6)
skills = models.ManyToManyField(
Skill, verbose_name='مهارت ها', related_name='experts', through='Skillship'
)
created_at = models.DateTimeField(_('created_at'), auto_now_add=True)
updated_at = models.DateTimeField(_('updated_at'), auto_now=True)
def __str__(self):
return self.user.get_full_name()
def count_skills(self):
pass
def skills_list(self):
pass
@property
def is_active(self):
now = timezone.now()
return self.is_active_expert_expire_time > now
class Meta:
verbose_name = _('متخصص')
verbose_name_plural = _('متخصصان')
class Skillship(models.Model):
expert = models.ForeignKey(Expert, verbose_name='متخصص', on_delete=models.CASCADE)
skill = models.ForeignKey(Skill, verbose_name='مهارت', on_delete=models.CASCADE)
image_of_evidence = models.ImageField(_('تصویر مدرک'), upload_to='evidences')
description = models.TextField(_('توضیحات'))
class Meta:
verbose_name = _('تخصص')
verbose_name_plural = _('تخصص ها')
| amoo-sajad/arp-project | expert/models.py | models.py | py | 2,825 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.contrib.auth.get_user_model",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.db.models.Model",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 11,
"usage_type": "name"
},
{
"ap... |
6900603378 | import os
import jinja2
import yaml
def load_config(config_directory, env_file, config_env_overrides=None):
if config_env_overrides is None:
config_env_overrides = {}
with open(os.path.join(config_directory, env_file)) as env_fd:
config_env = yaml.load(env_fd)
for key, value in config_env_overrides.items():
config_env[key] = value
jinja_env = jinja2.Environment(
loader=jinja2.FileSystemLoader(config_directory),
trim_blocks=True,
lstrip_blocks=True,
)
sources = (
entry
for entry in os.listdir(config_directory)
if entry != env_file
and os.path.isfile(os.path.join(config_directory, entry))
)
result = config_env.copy()
for source in sources:
template = jinja_env.get_template(source)
yaml_config = template.render(config_env)
result[source.split(".")[0]] = yaml.load(yaml_config)
return result
| vvgolubev/blabbermouth | blabbermouth/util/config.py | config.py | py | 952 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.path.join",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "yaml.load",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "jinja2.Environment",
"line_nu... |
25888909951 | from django import forms
from django.shortcuts import render, redirect
from .forms import DetailForm
from datetime import datetime
import string
import csv
# Create your views here.
def index(request):
print("HELLO")
if request.method == 'POST':
form = DetailForm(request.POST)
if form.is_valid():
name = form.cleaned_data['name']
email = form.cleaned_data['email']
str = form.cleaned_data['str']
data = detailsfunc(name, email, str)
return render(request, 'cards.html', {'data' : data})
else:
print("ERROR")
else:
form = DetailForm(request.POST)
msg = "ERROR"
return render(request, 'index.html', {'data' : msg})
def detailsfunc(name, email, str):
#main extractor
problem = str
data=dict()
txts = problem.split(" ")
duration = ""
try:
for i in range(len(txts)):
if txts[i].lower() == "since" or txts[i].lower() == "from":
duration = txts[i+1]+" "+txts[i+2]
break
except:
duration=""
data['duration'] = duration.translate(str.maketrans('', '', string.punctuation))
symptom = ""
try:
z=0
txts = problem.split(" ")
for i in range(len(txts)):
if txts[i].lower() in ("have","having","feel","feeling"):
si = i
if txts[i] == "since" or txts[i] == "from":
sii = i
z=1
break
if z == 1:
for x in txts[si:sii]:
symptom += x+" "
z=0
except:
z=0
symptom = problem
data['symptom'] = symptom.translate(str.maketrans('', '', string.punctuation))
aggrv=""
txt = problem.lower()
for i in ("during","while","when","increases","decreases"):
k=-1;s=''
try:
k = txt.find(i)
if k >= 0:
s = txt[k:txt.index('.',k)]
aggrv=s
break
except:
aggrv=""
pass
data['name'] = name
dt = datetime.now()
strg = dt.strftime('%d %B %Y')
data['date']=strg
problem = problem.replace('\n','')
data['aggrv']=aggrv.translate(str.maketrans('', '', string.punctuation))
data['story']=problem.translate(str.maketrans('', '', string.punctuation))
print(data['symptom'])
with open("doctor.csv",'a') as f:
f.write("\n"+data['name'] + "," +data['story'] + "," +data['duration'] + "," +data['symptom'] + "," +data['aggrv'] + "," +data['date'] )
return data
def datatable(request):
n,st,dr,sp,dt,af = [],[],[],[],[],[]
with open('E:\hackathon\doctor.csv', 'r') as file:
reader = csv.reader(file)
k = 0
print("-")
for row in reader:
if k != 0:
print(row[0])
print(row[0]+"-")
n.append(row[0])
st.append(row[1])
dr.append(row[2])
sp.append(row[3])
af.append(row[4])
dt.append(row[5])
else:
k+=1
data = zip(n,st,dr,sp,af,dt)
print(dt)
return render(request, 'table.html',{'data':data})
| rupenchitroda/hackathon | DocOnGo/views.py | views.py | py | 3,266 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "forms.DetailForm",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "forms.DetailForm",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "django.shor... |
32497373603 | # https://leetcode.com/problems/middle-of-the-linked-list/
from typing import Optional
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution:
def middleNode(self, head: Optional[ListNode]) -> Optional[ListNode]:
first, second = head, head
while first and first.next:
first = first.next.next
second = second.next
return second
| augini/algorithms_ds | Leetcode/876.py | 876.py | py | 452 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "typing.Optional",
"line_number": 12,
"usage_type": "name"
}
] |
70601213475 |
from typing import Any
from django import http
from django.shortcuts import render, redirect
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
from common.models import Insumo, Proveedor
from django.views import View
from django.views.decorators.csrf import csrf_exempt
from django.http.response import JsonResponse
from django.utils.decorators import method_decorator
import json
# Create your views here.
class InsumoView(View):
@method_decorator(csrf_exempt)
def dispatch(self, request, *args, **kwargs):
return super().dispatch(request, *args, **kwargs)
def get(self, request, id=0):
if id > 0:
insumos = list(Insumo.objects.filter(idinsumo=id).values())
if len(insumos) > 0:
insumo = insumos[0]
datos = {'mensaje': 'exito', 'insumo': insumo}
else:
datos = {'mensaje': 'No se encontró el insumo'}
return JsonResponse(datos)
else:
insumos = list(Insumo.objects.values())
if len(insumos) > 0:
datos = {'mensaje': 'exito', 'cantidad': len(insumos), 'insumos': insumos}
else:
datos = {'mensaje': 'No se encontraron insumos'}
return JsonResponse(datos)
def post(self, request):
jd = json.loads(request.body)
proveedor_id=jd['proveedor_id']
try:
prov = Proveedor.objects.get(nombre=proveedor_id)
except Proveedor.DoesNotExist:
prov = None
if prov:
insumo = Insumo.objects.create(descripcion=jd['descripcion'], precio_unitario=jd['precio_unitario'], cantidad_disponible=jd['cantidad_disponible'], tipo_medida=jd['tipo_medida'], categoria=jd['categoria'], proveedor=prov)
datos = {'mensaje': 'success'}
else:
datos = {'mensaje': 'El rubro no existe'}
return JsonResponse(datos)
def patch(self, request, id):
jd = json.loads(request.body)
insumos = list(Insumo.objects.filter(idinsumo=id).values())
if len(insumos) > 0:
insumo = Insumo.objects.get(idinsumo=id)
insumo.descripcion = jd['descripcion']
insumo.cantidad_disponible = jd['cantidad_disponible']
insumo.tipo_medida = jd['tipo_medida']
insumo.categoria = jd['categoria']
insumo.precio_unitario = jd['precio_unitario']
insumo.save()
datos = {'mensaje': 'Insumo actualizado correctamente'}
else:
datos = {'mensaje': 'No se encontró el insumo'}
return JsonResponse(datos) | leanmsan/proyecto-ITSE | NeumaticosSgo/insumos/views.py | views.py | py | 2,674 | python | es | code | 2 | github-code | 1 | [
{
"api_name": "django.views.View",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "django.utils.decorators.method_decorator",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "django.views.decorators.csrf.csrf_exempt",
"line_number": 17,
"usage_type": "a... |
72919968353 | from django.urls import path, include
from rest_framework.routers import DefaultRouter
from .views import (
IndexView,
AutorListCreateAPIView,
AutorRetrieveUpdateDestroyAPIView,
CategoriaListCreateAPIView,
CategoriaRetrieveUpdateDestroyAPIView,
EditorialListCreateAPIView,
EditorialRetrieveUpdateDestroyAPIView,
LibroViewSet,
)
# Configurar el enrutador
router = DefaultRouter()
router.register(r'libros', LibroViewSet)
urlpatterns = [
path('', IndexView.as_view(), name='api-index'),
path('autores/', AutorListCreateAPIView.as_view(), name='api-autor-list'),
path('autores/<int:autor_id>/', AutorRetrieveUpdateDestroyAPIView.as_view(), name='api-autor-detail'),
path('categorias/', CategoriaListCreateAPIView.as_view(), name='api-categoria-list'),
path('categorias/<int:categoria_id>/', CategoriaRetrieveUpdateDestroyAPIView.as_view(), name='api-categoria-detail'),
path('editoriales/', EditorialListCreateAPIView.as_view(), name='api-editorial-list'),
path('editoriales/<int:editorial_id>/', EditorialRetrieveUpdateDestroyAPIView.as_view(), name='api-editorial-detail'),
path('libros/', LibroViewSet.as_view({'post': 'create'}), name='api-libro-create'), # Agregar esta línea
path('admin/', include(router.urls)),
]
| aaronbarra040998/biblioteca-v | proyecto/api/urls.py | urls.py | py | 1,291 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "rest_framework.routers.DefaultRouter",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "views.LibroViewSet",
"line_number": 17,
"usage_type": "argument"
},
{
"api_name": "django.urls.path",
"line_number": 20,
"usage_type": "call"
},
{
"api_... |
5542013576 | from django.shortcuts import redirect, render
from core.models import Jornada
from django.views.generic import ListView, CreateView, UpdateView, DeleteView
from django.core.serializers import serialize
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required
from django.views.decorators.csrf import csrf_exempt
from django.http import JsonResponse, HttpResponseRedirect
from core.forms.jornada.forms import JornadaForm
from django.urls import reverse_lazy
class JornadaListView(ListView):
model = Jornada
form_class = JornadaForm
template_name = "jornada/list.html"
def post(self, request, *args, **kwargs):
data = {}
try:
action = request.POST['action']
if action == 'searchdata':
data = []
for i in Jornada.objects.filter(jo_estado=True):
data.append(i.to_json())
else:
data["error"] = "Ha ocurrido un error"
except Exception as e:
data["error"] = str(e)
return JsonResponse(data, safe=False)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["create_url"] = reverse_lazy("jornada:create")
context["title"] = 'Listado de jornadas'
return context
class JornadaCreateView(CreateView):
model = Jornada
form_class = JornadaForm
template_name = "crud/form.html"
success_url = reverse_lazy("jornada:list")
def post(self, request, *args, **kwargs):
data = {}
try:
form = self.get_form()#CategoryForm(request.POST))
print(form)
data = form.save()
except Exception as e:
data["error"] = str(e)
return JsonResponse(data)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["list_url"] = reverse_lazy("jornada:list")
context["title"] = 'Crear jornada'
return context
class JornadaUpdateView(UpdateView):
model = Jornada
form_class = JornadaForm
template_name = "crud/form.html"
def dispatch(self, request, *args, **kwargs):
self.object = self.get_object()
return super().dispatch(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
data = {}
try:
form = self.get_form()#CategoryForm(request.POST)
data = form.save()
except Exception as e:
data["error"] = str(e)
return JsonResponse(data)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["list_url"] = reverse_lazy("jornada:list")
context["title"] = 'Modificar jornada'
return context
class JornadaDeleteView(DeleteView):
model = Jornada
form_class = JornadaForm
template_name = "crud/delete.html"
def dispatch(self, request, *args, **kwargs):
self.object = self.get_object()
return super().dispatch(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
data = {}
try:
self.object.delete()
except Exception as e:
data["error"] = str(e)
return JsonResponse(data)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["list_url"] = reverse_lazy("jornada:list")
context["title"] = 'Eliminar jornada'
context["item"] = self.object.__str__()
return context
| AdolfoCunquero/Colegio | core/views/jornada/views.py | views.py | py | 3,662 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.views.generic.ListView",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "core.models.Jornada",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "core.forms.jornada.forms.JornadaForm",
"line_number": 14,
"usage_type": "name"
},
{... |
5593742103 | tags = set()
fo = open("a_example.txt","r")
photos = []
for i in fo:
photos.append(i)
photos = [photos[0]]+[photos[1]]+[photos[2]+photos[3]]+[photos[4]]
photos[2] = "H 3 selfie smile garden\n"
# print(photos)
for i in range(len(photos)):
x = len(photos[i])
photos[i] = photos[i][:x-1]
n = int(photos[0])
n = n-1
photos = photos[1:]
for i in range(len(photos)):
photos[i] = list(photos[i].split(" "))
for j in range(2,len(photos[i])):
tags.add(photos[i][j])
tags = list(tags)
t = len(tags)
print(tags)
matrix = [[False for x in range(t)] for y in range(n)]
print(photos)
for i in range(n):
for j in range(t):
if tags[j] in photos[i]:
matrix[i][j] = True
print(matrix)
from itertools import combinations
Slides = []
Slides.append(0)
used = [False]*n
used[0] = True
for i in range(n):
if i!=0:
if used[i]:
continue
else:
Slides.append(i)
used[i]=True
present = []
for j in range(len(matrix[i])):
if matrix[i][j]:
present.append(j)
# print(present)
prcombs = list(combinations(present,int(len(present)/2)))
# print(prcombs,"\n")
for j in prcombs:
cl = len(j)
grandflag = 0
for k in range(i+1,n):
if used[k]:
continue
flag = 0
for l in range(cl):
if matrix[k][j[l]]==False:
flag = 1
if flag==0:
Slides.append(k)
used[k] = True
grandflag = 1
break
if grandflag==1:
break
print(Slides) | mayank-kumar-giri/Competitive-Coding | HashCode/test.py | test.py | py | 1,649 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "itertools.combinations",
"line_number": 55,
"usage_type": "call"
}
] |
21245458777 | from models import *
from dataset import *
import argparse
import os
import glob
import tqdm
from torchvision.utils import make_grid
from PIL import Image, ImageDraw
import skvideo.io
import ssl
import cv2
import json
import matplotlib.pyplot as plt
import numpy as np
import av
def extract_frames(video_path):
frames = []
video = av.open(video_path)
for frame in video.decode(0):
yield frame.to_image()
if __name__ == "__main__":
ssl._create_default_https_context = ssl._create_unverified_context
parser = argparse.ArgumentParser()
parser.add_argument("--video_path", type=str, default="test/test_videos", help="Path to video")
parser.add_argument("--save_path", type=str, default="test/test_results", help="Path to save results")
parser.add_argument("--image_dim", type=int, default=112, help="Height / width dimension")
parser.add_argument("--channels", type=int, default=3, help="Number of image channels")
parser.add_argument("--latent_dim", type=int, default=512, help="Dimensionality of the latent representation")
parser.add_argument("--checkpoint_model1", type=str, default="model_checkpoints/ConvLSTM_20.pth", help="Optional path to checkpoint model")
parser.add_argument("--checkpoint_model2", type=str, default="model_checkpoints/ConvLSTM_Flow.pth", help="Optional path to checkpoint model")
opt = parser.parse_args()
print(opt)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
input_shape = (opt.channels, opt.image_dim, opt.image_dim)
transform = transforms.Compose(
[
transforms.Resize(input_shape[-2:], Image.BICUBIC),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
]
)
labels = ['Phoning','ApplyingMakeUpOnLips','BrushingTeeth','CleaningFloor','CleaningWindows','Drinking','FoldingTextile','Ironing','PlayingHarmonica','TakingPhotosOrVideos']
if not os.path.exists(opt.save_path):
os.mkdir(opt.save_path)
model1 = ConvLSTM(num_classes=len(labels), latent_dim=opt.latent_dim)
model1.to(device)
model1.load_state_dict(torch.load(opt.checkpoint_model2, map_location=torch.device('cpu')))
model1.eval()
# Define model and load model checkpoint
model2 = ConvLSTM(num_classes=len(labels), latent_dim=opt.latent_dim)
model2.to(device)
model2.load_state_dict(torch.load(opt.checkpoint_model1, map_location=torch.device('cpu')))
model2.eval()
# Extract predictions
for video in glob.glob(os.path.join(opt.video_path,'*.mp4')):
output_frames = []
y = []
record = []
cap=cv2.VideoCapture(video)
fps = cap.get(5)
video_name = video.split('.mp4')[0].split('/')[-1]
for j, frame in enumerate(tqdm.tqdm(extract_frames(video), desc="Processing frames")):
if j == 0:
first_frame = np.array(frame)
resize_dim = 600
max_dim = max(first_frame.shape)
scale = resize_dim/max_dim
first_frame = cv2.resize(first_frame, None, fx=scale, fy=scale)
prev_gray = cv2.cvtColor(first_frame, cv2.COLOR_BGR2GRAY)
mask = np.zeros_like(first_frame)
mask[..., 1] = 255
else:
cur_frame = np.array(frame)
gray = cv2.cvtColor(cur_frame, cv2.COLOR_BGR2GRAY)
gray = cv2.resize(gray, None, fx=scale, fy=scale)
flow = cv2.calcOpticalFlowFarneback(prev_gray, gray, None, pyr_scale = 0.5, levels = 5, winsize = 11, iterations = 5, poly_n = 5, poly_sigma = 1.1, flags = 0)
magnitude, angle = cv2.cartToPolar(flow[..., 0], flow[..., 1])
mask[..., 0] = angle * 180 / np.pi / 2
mask[..., 2] = cv2.normalize(magnitude, None, 0, 255, cv2.NORM_MINMAX)
rgb = cv2.cvtColor(mask, cv2.COLOR_HSV2BGR)
cur_frame = cv2.resize(cur_frame, None, fx=scale, fy=scale)
dense_flow = cv2.addWeighted(cur_frame, 1,rgb, 2, 0)
dense_flow = Image.fromarray(dense_flow.astype('uint8')).convert('RGB')
image_tensor1 = Variable(transform(dense_flow)).to(device)
image_tensor1 = image_tensor1.view(1, 1, *image_tensor1.shape)
prev_gray = gray
image_tensor2 = Variable(transform(frame)).to(device)
image_tensor2 = image_tensor2.view(1, 1, *image_tensor2.shape)
# Get label prediction for frame
with torch.no_grad():
prediction = model2(image_tensor2)
if j != 0:
prediction1 = model1(image_tensor1)
prediction = (prediction+prediction1)/2
label_id = prediction.argmax(1).item()
if label_id == 0:
predicted_label = 'Phoning'
record.append([j/fps, torch.max(prediction).item()])
y.append('Phoning')
else:
predicted_label = ''
y.append('Other')
# Draw label on frame
d = ImageDraw.Draw(frame)
d.text(xy=(10, 10), text=predicted_label, fill=(255, 255, 255))
output_frames += [frame]
# Create video from frames
writer = skvideo.io.FFmpegWriter(os.path.join(opt.save_path,video_name+'.avi'))
for frame in tqdm.tqdm(output_frames, desc="Writing to video"):
writer.writeFrame(np.array(frame))
writer.close()
record = {'Phoning': record}
json_str = json.dumps(record)
with open(opt.save_path+'/timeLabel_'+video_name+'.json', 'w') as json_file:
json_file.write(json_str)
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.set_title('Plot '+video_name)
plt.xlabel('Time')
plt.ylabel('Label')
x = np.arange(0,len(y))/fps
ax1.scatter(x,y,c = 'r',marker = 'o')
plt.savefig(opt.save_path+'/'+video_name+'_plot.png',bbox_inches='tight')
| LeoYin/Course-Project-Action-Recognition | test_on_video.py | test_on_video.py | py | 6,161 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "av.open",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "ssl._create_default_https_context",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "ssl._create_unverified_context",
"line_number": 25,
"usage_type": "attribute"
},
{
... |
3649536709 | #!/bin/python
"""Stastics of GC information, GCskew / GCratio / Nratio"""
__date__ = "2023-4-11"
__author__ = "Junbo Yang"
__email__ = "yang_junbo_hi@126.com"
__license__ = "MIT"
"""
The MIT License (MIT)
Copyright (c) 2022 Junbo Yang <yang_junbo_hi@126.com> <1806389316@pku.edu.cn>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import time
from concurrent.futures import ProcessPoolExecutor
from multiprocessing import Manager
from optparse import OptionParser
import random
from collections import defaultdict
import re
import os
import sys
from pathlib import Path
def argsParse():
parser = OptionParser('Usage: %prog -i genome.fa -o GC')
parser.add_option('-i', '--input',
dest='input',
help='Input file: genome.fa')
parser.add_option('-w', '--window',
dest='window',
default=200,
type="int",
help='Window length. Default: 200')
parser.add_option('-o', '--out',
dest='out',
default="GC",
type="str",
help='Prefix of output. Default: GC.')
(options, args) = parser.parse_args()
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
elif options.input is None:
parser.print_help()
print("Input file must be specified !!!")
sys.exit(1)
count = 3 # times for the packages install
while count:
try:
import pyfaidx #
print('Dependent package pyfaidx is OK.\nDpendent module pyfaidx is OK.')
break
except:
print('Dependent package pyfaidx is not found!!! \n Start intalling ....')
os.system('pip install pyfaidx')
count -= 1
continue
return parser.parse_args()
def windows_dict(Input):
window_dict = defaultdict(str)
with open(Input, 'r') as f:
for i in f:
if i.startswith(">"):
i = i.lstrip(">").strip()
key = i.replace(":", "-")
else:
value = i.strip()
window_dict[key] += value
return window_dict
def windows_GC(Input, Output):
GC_ratio_file = Output + ".GC_ratio"
GC_skew_file = Output + ".GC_skew"
N_file = Output + ".N_ratio"
with open(GC_ratio_file, "w") as out:
with open(GC_skew_file, "w") as out2:
with open(N_file, "w") as Nout:
for i in Input.keys():
bed = i.split("-")
GC_ratio = (list(Input[i]).count("G") + list(Input[i]).count("C"))/len(list(Input[i]))
GC_skew = (list(Input[i]).count("G") - list(Input[i]).count("C"))/\
(list(Input[i]).count("G") + list(Input[i]).count("C"))
N_ratio = list(Input[i]).count("N")/len(list(Input[i]))
out.write("\t".join(map(str, bed)) + "\t" + str(GC_ratio) + "\n")
out2.write("\t".join(map(str, bed)) + "\t" + str(GC_skew) + "\n")
Nout.write("\t".join(map(str, bed)) + "\t" + str(N_ratio) + "\n")
if __name__ == "__main__":
options, args = argsParse()
e1 = time.time()
genome_size = options.input.split("/")[-1] + ".size"
os.system("faidx {} -i chromsizes > {}".format(options.input, genome_size))
genome_window = options.input.split("/")[-1] + ".window"
os.system("bedtools makewindows -g {} -w {} > {}".format(genome_size, options.window, genome_window))
genome_window_fa = options.input.split("/")[-1] + ".window.fa"
os.system("bedtools getfasta -fi {} -bed {} -fo {}".format(options.input, genome_window, genome_window_fa))
genome_window_dict = windows_dict(genome_window_fa)
windows_GC(genome_window_dict, options.out)
e2 = time.time()
print("INFO {} Total times: {}".format(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time())),
round(float(e2 - e1), 2)))
| joybio/custom_scripts | GC_information_for_circos/GC_infor_for_circos.py | GC_infor_for_circos.py | py | 5,210 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "optparse.OptionParser",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_num... |
43110373172 | """
Contains functions for common support adjustments.
Created on Thu May 11 16:30:11 2023
@author: MLechner
# -*- coding: utf-8 -*-
"""
from copy import deepcopy
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from mcf import mcf_data_functions as mcf_data
from mcf import mcf_general as gp
from mcf import mcf_general_sys as mcf_sys
from mcf import mcf_print_stats_functions as ps
from mcf import mcf_variable_importance_functions as vi
def common_support(mcf_, tree_df, fill_y_df, train=True):
"""Remove observations from data files that are off-support."""
gen_dic = mcf_.gen_dict
lc_dic, var_x_type, cs_dic = mcf_.lc_dict, mcf_.var_x_type, mcf_.cs_dict
data_train_dic = mcf_.data_train_dict
d_name, _, no_of_treat = mcf_data.get_treat_info(mcf_)
x_name, x_type = gp.get_key_values_in_list(var_x_type)
names_unordered = [x_name[j] for j, val in enumerate(x_type) if val > 0]
if gen_dic['with_output'] and gen_dic['verbose']:
ps.print_mcf(gen_dic, '\n' + '=' * 100 + '\nCommon support analysis',
summary=True)
if train:
if lc_dic['cs_cv']: # Crossvalidate ... only tree data is used
tree_mcf_df, fill_y_mcf_df = tree_df.copy(), fill_y_df.copy()
d_tree_mcf_np = tree_mcf_df[d_name].to_numpy().ravel()
else: # Use lc_dic['cs_share'] of data for common support estim. only
# Take the same share of obs. from both input samples
tree_mcf_df, tree_cs_df = train_test_split(
tree_df, test_size=lc_dic['cs_share'], random_state=42)
fill_y_mcf_df, fill_y_cs_df = train_test_split(
fill_y_df, test_size=lc_dic['cs_share'], random_state=42)
data_cs_df = pd.concat([tree_cs_df, fill_y_cs_df], axis=0)
x_cs_df, _ = mcf_data.get_x_data(data_cs_df, x_name)
d_cs_np = data_cs_df[d_name].to_numpy().ravel()
x_fy_df, _ = mcf_data.get_x_data(fill_y_mcf_df, x_name)
else:
tree_mcf_df, fill_y_mcf_df = tree_df, None
x_mcf_df, obs_mcf = mcf_data.get_x_data(tree_mcf_df, x_name) # train,adj.
txt = ''
if train:
if names_unordered: # List is not empty
x_fy_df, _ = mcf_data.dummies_for_unord(
x_fy_df, names_unordered, data_train_dict=data_train_dic)
x_mcf_df, dummy_names = mcf_data.dummies_for_unord(
x_mcf_df, names_unordered, data_train_dict=data_train_dic)
if not lc_dic['cs_cv']:
x_cs_df, _ = mcf_data.dummies_for_unord(
x_cs_df, names_unordered, data_train_dict=data_train_dic)
else:
dummy_names = None
if not lc_dic['cs_cv']:
x_cs_np = x_cs_df.to_numpy()
x_fy_np, x_mcf_np = x_fy_df.to_numpy(), x_mcf_df.to_numpy()
if gen_dic['with_output'] and gen_dic['verbose']:
txt += '\n' + '-' * 100 + '\n'
txt += 'Computing random forest based common support\n'
ps.print_mcf(gen_dic, txt, summary=False)
max_workers = 1 if gen_dic['replication'] else gen_dic['mp_parallel']
classif = RandomForestClassifier(
n_estimators=mcf_.cf_dict['boot'], max_features='sqrt',
bootstrap=True, oob_score=False, n_jobs=max_workers,
random_state=42, verbose=False, min_samples_split=5)
if lc_dic['cs_cv']: # Crossvalidate
index = np.arange(obs_mcf) # indices
rng = np.random.default_rng(seed=9324561)
rng.shuffle(index)
index_folds = np.array_split(index, lc_dic['cs_cv_k'])
pred_mcf_np = np.empty((len(index), no_of_treat))
pred_fy_np_fold = np.zeros((len(x_fy_np), no_of_treat))
forests = []
for fold_pred in range(lc_dic['cs_cv_k']):
fold_train = [x for idx, x in enumerate(index_folds)
if idx != fold_pred]
index_train = np.hstack(fold_train)
index_pred = index_folds[fold_pred]
x_pred, x_train = x_mcf_np[index_pred], x_mcf_np[index_train]
d_train = d_tree_mcf_np[index_train]
classif.fit(x_train, d_train)
forests.append(deepcopy(classif))
pred_mcf_np[index_pred, :] = classif.predict_proba(x_pred)
pred_fy_np_fold += classif.predict_proba(x_fy_np)
pred_cs_np, d_cs_np = pred_mcf_np, d_tree_mcf_np # To get cut-offs
pred_fy_np = pred_fy_np_fold / lc_dic['cs_cv_k']
else:
x_train, x_test, d_train, d_test = train_test_split(
x_cs_np, d_cs_np, test_size=0.25, random_state=42)
classif.fit(x_train, d_train)
pred_cs_np = classif.predict_proba(x_test) # -> determine cut-offs
d_cs_np = d_test
pred_mcf_np = classif.predict_proba(x_mcf_np) # cut and return
pred_fy_np = classif.predict_proba(x_fy_np) # cut and return
forests = [classif]
cs_dic['forests'] = forests
if gen_dic['with_output']:
vi.print_variable_importance(
deepcopy(classif), x_mcf_df, tree_mcf_df[d_name], x_name,
names_unordered, dummy_names, gen_dic, summary=False)
# Normalize estimated probabilities to add up to 1
pred_cs_np_sum = pred_cs_np.sum(axis=1, keepdims=True)
pred_mcf_np_sum = pred_mcf_np.sum(axis=1, keepdims=True)
pred_fy_np_sum = pred_fy_np.sum(axis=1, keepdims=True)
pred_cs_np /= pred_cs_np_sum
pred_mcf_np /= pred_mcf_np_sum
pred_fy_np /= pred_fy_np_sum
# Determine cut-offs nased on pred_cs_np
cs_dic['cut_offs'] = get_cut_off_probs(mcf_, pred_cs_np, d_cs_np)
mcf_.cs_dict = cs_dic # Update instance with cut-off prob's
# Descriptive stats
if gen_dic['with_output']:
plot_support(mcf_, pred_cs_np, d_cs_np)
descriptive_stats_on_off_support(mcf_, pred_fy_np, fill_y_mcf_df,
'Training - fill mcf with y data')
# Reduce samples
fill_y_mcf_df, _ = on_off_support_df(mcf_, pred_fy_np, fill_y_mcf_df)
else: # Reduce prediction sample
# Predict treatment probabilities
if names_unordered: # List is not empty
x_mcf_df, _ = mcf_data.dummies_for_unord(
x_mcf_df, names_unordered, data_train_dict=data_train_dic)
pred_mcf_np = np.zeros((len(x_mcf_df), no_of_treat))
# If cross-validation, take average of forests in folds
for forest in cs_dic['forests']:
pred_mcf_np += forest.predict_proba(x_mcf_df.to_numpy())
pred_mcf_np /= len(cs_dic['forests'])
# Normalize estimated probabilities to add up to 1
pred_mcf_np /= pred_mcf_np.sum(axis=1, keepdims=True)
# Delete observation off support
if gen_dic['with_output']:
titel = 'Training - build mcf data' if train else 'Prediction data'
descriptive_stats_on_off_support(mcf_, pred_mcf_np, tree_mcf_df, titel)
tree_mcf_df, _ = on_off_support_df(mcf_, pred_mcf_np, tree_mcf_df)
return tree_mcf_df, fill_y_mcf_df
def check_if_too_many_deleted(mcf_, obs_keep, obs_del):
"""Check if too many obs are deleted and raise Exception if so."""
max_del_train = mcf_.cs_dict['max_del_train']
share_del = obs_del / (obs_keep + obs_del)
if share_del > max_del_train:
err_str = (
f'{share_del:3.1%} observation deleted in common support, but only'
f' {max_del_train:3.1%} observations of training data are allowed'
' to be deleted in support check. Programme is terminated. Improve'
' balance of input data or change share allowed to be deleted.')
raise ValueError(err_str)
def descriptive_stats_on_off_support(mcf_, probs_np, data_df, titel=''):
"""Compute descriptive stats for deleted and retained observations."""
keep_df, delete_df = on_off_support_df(mcf_, probs_np, data_df)
gen_dic, var_dic = mcf_.gen_dict, mcf_.var_dict
if delete_df.empty:
txt = (f'\n\nData investigated for common support: {titel}\n'
+ '-' * 100)
ps.print_mcf(gen_dic, '\nNo observations deleted in common support '
'check', summary=True)
else:
d_name, _, _ = mcf_data.get_treat_info(mcf_)
x_name = var_dic['x_name']
obs_del, obs_keep = len(delete_df), len(keep_df)
obs = obs_del + obs_keep
txt = '\n' + '-' * 100
txt += f'\nData investigated for common support: {titel}\n' + '-' * 100
txt += f'\nObservations deleted: {obs_del:4} ({obs_del/obs:.2%})'
txt += '\n' + '-' * 100
ps.print_mcf(gen_dic, txt, summary=True)
txt = ''
with pd.option_context(
'display.max_rows', 500, 'display.max_columns', 500,
'display.expand_frame_repr', True, 'display.width', 150,
'chop_threshold', 1e-13):
all_var_names = [name.upper() for name in data_df.columns]
if d_name[0].upper() in all_var_names:
d_keep = keep_df[d_name]
d_delete = delete_df[d_name]
d_keep_count = d_keep.value_counts(sort=False)
d_delete_count = d_delete.value_counts(sort=False)
d_keep_count = pd.concat(
[d_keep_count, np.round(d_keep_count / obs_keep * 100, 2)],
axis=1)
d_delete_count = pd.concat(
[d_delete_count,
np.round(d_delete_count / obs_del * 100, 2)], axis=1)
d_keep_count.columns = ['Obs.', 'Share in %']
d_delete_count.columns = ['Obs.', 'Share in %']
if gen_dic['panel_data']:
cluster_id = data_df[var_dic['cluster_name']].squeeze()
cluster_keep = keep_df[var_dic['cluster_name']].squeeze()
cluster_delete = delete_df[var_dic['cluster_name']
].squeeze()
k_str = '\nObservations kept, by treatment\n '
d_str = '\nObservations deleted, by treatment\n '
k_str += d_keep_count.to_string()
d_str += d_delete_count.to_string()
txt += k_str + '\n' + '- ' * 20 + d_str
if gen_dic['panel_data']:
txt += '- ' * 20
txt += '\nTotal number of panel units:'
txt += f'{len(cluster_id.unique())}'
txt += '\nObservations belonging to '
txt += f'{len(cluster_keep.unique())} panel units that are'
txt += ' ON support\nObservations belonging to '
txt += f'{len(cluster_delete.unique())} panel units are'
txt += ' OFF support'
ps.print_mcf(gen_dic, txt, summary=True)
else:
txt = f'\nData investigated for common support: {titel}\n'
txt += '-' * 100 + '\nTreatment not in prediction data.\n'
txt += '-' * 100
ps.print_mcf(gen_dic, txt, summary=False)
txt = '\n' + '-' * 100
txt += '\nFull sample (Data ON and OFF support)' + '\n' + '-' * 100
ps.print_mcf(gen_dic, txt, summary=False)
ps.print_mcf(gen_dic, data_df[x_name].describe().transpose(),
summary=False)
if d_name[0].upper() in all_var_names:
mean_by_treatment(data_df[d_name], data_df[x_name], gen_dic,
summary=False)
txt = '\n' + '-' * 100 + '\nData ON support' + '\n' + '-' * 100
ps.print_mcf(gen_dic, txt, summary=False)
ps.print_mcf(gen_dic, keep_df[x_name].describe().transpose(),
summary=False)
if d_name[0].upper() in all_var_names and len(keep_df) > 5:
mean_by_treatment(keep_df[d_name], keep_df[x_name], gen_dic,
summary=False)
txt = '\n' + '-' * 100 + '\nData OFF support' + '\n' + '-' * 100
ps.print_mcf(gen_dic, txt, summary=False)
ps.print_mcf(gen_dic, delete_df[x_name].describe().transpose(),
summary=False)
if d_name[0].upper() in all_var_names and len(delete_df) > 5:
mean_by_treatment(delete_df[d_name], delete_df[x_name],
gen_dic, summary=False)
check_if_too_many_deleted(mcf_, obs_keep, obs_del)
def mean_by_treatment(treat_df, data_df, gen_dic, summary=False):
"""Compute mean by treatment status."""
treat_df = treat_df.squeeze()
treat_vals = pd.unique(treat_df)
txt = '\n------------------ Mean by treatment status ---------------------'
ps.print_mcf(gen_dic, txt, summary=summary)
if len(treat_vals) > 0:
mean = data_df.groupby(treat_df).mean(numeric_only=True)
ps.print_mcf(gen_dic, mean.transpose(), summary=summary)
else:
txt = f'\nAll obs have same treatment: {treat_vals}'
ps.print_mcf(gen_dic, txt, summary=summary)
def on_off_support_df(mcf_, probs_np, data_df):
"""Split DataFrame into retained and deleted part."""
cs_dic = mcf_.cs_dict
_, _, no_of_treat = mcf_data.get_treat_info(mcf_)
lower, upper = cs_dic['cut_offs']['lower'], cs_dic['cut_offs']['upper']
obs = len(probs_np)
off_support = np.empty(obs, dtype=bool)
# off_upper = np.empty(no_of_treat, dtype=bool)
# off_lower = np.empty_like(off_upper)
for i in range(obs):
off_upper = np.any(probs_np[i, :] > upper)
off_lower = np.any(probs_np[i, :] < lower)
off_support[i] = off_upper or off_lower
data_on_df = data_df[~off_support].copy()
data_off_df = data_df[off_support].copy()
return data_on_df, data_off_df
def plot_support(mcf_, probs_np, d_np):
"""Histogrammes for distribution of treatment probabilities for overlap."""
cs_dic, int_dic = mcf_.cs_dict, mcf_.int_dict
lower, upper = cs_dic['cut_offs']['lower'], cs_dic['cut_offs']['upper']
_, d_values, _ = mcf_data.get_treat_info(mcf_)
color_list = ['red', 'blue', 'green', 'violet', 'magenta', 'crimson',
'yellow', 'darkorange', 'khaki', 'skyblue', 'darkgreen',
'olive', 'greenyellow', 'aguamarine', 'deeppink',
'royalblue', 'navy', 'blueviolet', 'purple']
if len(color_list) < len(d_values):
color_list = color_list * len(d_values)
color_list = color_list[:len(d_values)]
for idx_p, ival_p in enumerate(d_values): # iterate treatment probs
treat_prob = probs_np[:, idx_p]
titel = f'Probability of treatment {ival_p} in different subsamples'
f_titel = f'common_support_pr_treat{ival_p}'
file_name_csv = (cs_dic['common_support_fig_pfad_csv']
+ '/' + f_titel + '.csv')
file_name_jpeg = (cs_dic['common_support_fig_pfad_jpeg']
+ '/' + f_titel + '.jpeg')
file_name_pdf = (cs_dic['common_support_fig_pfad_pdf']
+ '/' + f_titel + '.pdf')
file_name_csv_d = (cs_dic['common_support_fig_pfad_csv']
+ '/' + f_titel + '_d.csv')
file_name_jpeg_d = (cs_dic['common_support_fig_pfad_jpeg']
+ '/' + f_titel + '_d.jpeg')
file_name_pdf_d = (cs_dic['common_support_fig_pfad_pdf']
+ '/' + f_titel + '_d.pdf')
data_hist = [treat_prob[d_np == val] for val in d_values]
fig, axs = plt.subplots()
fig_d, axs_d = plt.subplots()
labels = ['Treat ' + str(d) for d in d_values]
fit_line_all, bins_all = [], []
for idx, dat in enumerate(data_hist):
axs.hist(dat, bins='auto', histtype='bar', label=labels[idx],
color=color_list[idx], alpha=0.5, density=False)
_, bins, _ = axs_d.hist(dat, bins='auto', histtype='bar',
label=labels[idx], color=color_list[idx],
alpha=0.5, density=True)
bins_all.append(bins)
sigma = np.std(dat)
fit_line = ((1 / (np.sqrt(2 * np.pi) * sigma))
* np.exp(-0.5 * (1 / sigma
* (bins - np.mean(dat)))**2))
axs_d.plot(bins, fit_line, '--', color=color_list[idx],
label='Smoothed ' + labels[idx])
fit_line_all.append(fit_line)
axs.set_title(titel)
axs.set_xlabel('Treatment probability')
axs.set_ylabel('Observations')
axs.set_xlim([0, 1])
axs.axvline(lower[idx_p], color='blue', linewidth=0.7,
linestyle="--", label='min')
axs.axvline(upper[idx_p], color='black', linewidth=0.7,
linestyle="--", label='max')
axs.legend(loc=int_dic['legend_loc'], shadow=True,
fontsize=int_dic['fontsize'])
mcf_sys.delete_file_if_exists(file_name_jpeg)
mcf_sys.delete_file_if_exists(file_name_pdf)
mcf_sys.delete_file_if_exists(file_name_csv)
fig.savefig(file_name_jpeg, dpi=int_dic['dpi'])
fig.savefig(file_name_pdf, dpi=int_dic['dpi'])
save_list = [data_hist, labels]
save_df = pd.DataFrame(save_list)
save_df = save_df.fillna(value='NaN')
save_df.to_csv(file_name_csv, index=False)
axs_d.set_title(titel)
axs_d.set_xlabel('Treatment probability')
axs_d.set_ylabel('Density')
axs_d.set_xlim([0, 1])
axs_d.axvline(lower[idx_p], color='blue', linewidth=0.7,
linestyle="--", label='min')
axs_d.axvline(upper[idx_p], color='black', linewidth=0.7,
linestyle="--", label='max')
axs_d.legend(loc=int_dic['legend_loc'], shadow=True,
fontsize=int_dic['fontsize'])
mcf_sys.delete_file_if_exists(file_name_jpeg_d)
mcf_sys.delete_file_if_exists(file_name_pdf_d)
mcf_sys.delete_file_if_exists(file_name_csv_d)
save_list = [fit_line_all, bins_all, labels]
save_df = pd.DataFrame(save_list)
save_df = save_df.fillna(value='NaN')
save_df.to_csv(file_name_csv_d, index=False)
fig_d.savefig(file_name_jpeg_d, dpi=int_dic['dpi'])
fig_d.savefig(file_name_pdf_d, dpi=int_dic['dpi'])
if int_dic['show_plots']:
plt.show()
else:
plt.close()
def get_cut_off_probs(mcf_, probs_np, d_np):
"""Compute the cut-offs for common support for training only."""
cs_dic, gen_dic = mcf_.cs_dict, mcf_.gen_dict
_, d_values, no_of_treat = mcf_data.get_treat_info(mcf_)
if cs_dic['type'] == 1:
q_s = cs_dic['quantil']
upper_limit = np.empty((no_of_treat, no_of_treat))
lower_limit = np.empty_like(upper_limit)
for idx, ival in enumerate(d_values):
probs = probs_np[d_np == ival]
if q_s == 1:
upper_limit[idx, :] = np.max(probs, axis=0)
lower_limit[idx, :] = np.min(probs, axis=0)
else:
upper_limit[idx, :] = np.quantile(probs, q_s, axis=0)
lower_limit[idx, :] = np.quantile(probs, 1 - q_s, axis=0)
upper_limit[idx, 0] = 1
lower_limit[idx, 0] = 0
txt = ''
if cs_dic['adjust_limits'] != 0:
upper_limit *= 1 + cs_dic['adjust_limits']
lower_limit *= 1 - cs_dic['adjust_limits']
lower_limit = np.clip(lower_limit, a_min=0, a_max=1)
upper_limit = np.clip(upper_limit, a_min=0, a_max=1)
if gen_dic['with_output']:
txt += '\n' + '-' * 100 + '\nCommon support bounds adjusted by'
txt += f' {cs_dic["adjust_limits"]:5.2%}-points\n' + '-' * 100
if gen_dic['with_output']:
txt += '\nTreatment sample Treatment probabilities in %'
txt += '\n--------------------- Upper limits ----------------'
for idx, ival in enumerate(d_values):
txt += f'\nD = {ival:9} '
for jdx in range(no_of_treat):
txt += f'{upper_limit[idx, jdx]:>7.2%} '
txt += '\n--------------------- Lower limits ----------------'
for idx, ival in enumerate(d_values):
txt += f'\nD = {ival:9} '
for jdx in range(no_of_treat):
txt += f'{lower_limit[idx, jdx]:>7.2%} '
txt += '\n' + 100 * '-'
txt += '\nFirst treatment is set to 1 and 0 (ignored) due to'
txt += ' additivity.' + '\n' + 100 * '-'
upper, lower = np.min(upper_limit, axis=0), np.max(lower_limit, axis=0)
if gen_dic['with_output']:
upper_str = [f'{x:>7.2%}' for x in upper]
lower_str = [f'{x:>7.2%}' for x in lower]
txt += '\nUpper limits used: ' + ' '.join(upper_str)
txt += '\nLower limits used: ' + ' '.join(lower_str)
txt += '\n' + 100 * '-'
ps.print_mcf(gen_dic, txt, summary=True)
else:
# Normalize such that probabilities add up to 1
upper = np.ones(no_of_treat) * (1 - cs_dic['min_p'])
lower = np.ones(no_of_treat) * cs_dic['min_p']
cut_offs = {'upper': upper, 'lower': lower}
return cut_offs
| MCFpy/mcf | mcf/mcf_common_support_functions.py | mcf_common_support_functions.py | py | 21,725 | python | en | code | 12 | github-code | 1 | [
{
"api_name": "mcf.mcf_data_functions.get_treat_info",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "mcf.mcf_data_functions",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "mcf.mcf_general.get_key_values_in_list",
"line_number": 30,
"usage_type": "c... |
36307774911 | from twilio.rest import Client
import os
def send(body='Some body', to=''):
# Your Account Sid and Auth Token from twilio.com/console
# DANGER! This is insecure. See http://twil.io/secure
account_sid = os.getenv("sid")
auth_token = os.getenv("token")
sender = os.getenv("from_")
recepient = os.getenv("to")
client = Client(account_sid, auth_token)
message = client.messages.create(
body=body,
from_=sender,
to=recepient
)
print(message.sid)
| gmihov001/Queue-Mgmt-API-Flask | src/sms.py | sms.py | py | 514 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.getenv",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 10,
"us... |
15436207408 | from flask import Flask, jsonify, request
import cv2
import tensorflow as tf
import keras
import numpy as np
import requests
from labels.dog_label import dog_label
app = Flask(__name__)
MODEL_PATH = "./models/loaded_2.h5"
app.run(host='0.0.0.0', port=5000)
def load_model():
loaded_model = keras.models.load_model(MODEL_PATH)
return loaded_model
def preprocess_image(image_path):
image_nparray = np.asarray(
bytearray(requests.get(image_path).content), dtype=np.uint8)
image = cv2.imdecode(image_nparray, cv2.IMREAD_COLOR)
# img = cv2.imread(image)
img = cv2.resize(image, dsize=(224, 224))
img = img / 255.0
img = np.expand_dims(img, axis=0)
return img
@app.route('/')
def photos():
return "Hello World!"
@app.route('/photos', methods=['POST'])
def findSpeices():
params = request.get_json()
imageURL = params["imageURL"]
# image_path = request.files['image'].read()
# image_path = "./test_img/mal.jpg"
# image_path = "./test_img/Silky-Terrier.jpg"
# image_path = "./test_img/toy_poddle.jpg"
loaded_model = load_model()
img = preprocess_image(imageURL)
predictions = loaded_model.predict(img)
score = tf.nn.softmax(predictions[0])
temp = np.argmax(score)
breed = dog_label[temp]
return breed
| devRangers/smg-ana-prototype | data/app.py | app.py | py | 1,309 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "keras.models.load_model",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "keras.models",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "numpy.asarray",... |
73789734115 | import numpy as np
import cv2
import dlib
import torch
import torch.nn as nn
import torchvision.transforms as transforms
from torchvision import models
from PIL import Image
import pygame
device = torch.device('cpu')
num_classes = 4
path = 'epoch-99.pt'
classes = {'closed': 0, 'normal': 1, 'side': 2, 'yawn': 3}
model = models.resnet18(pretrained=True)
num_ftrs = model.fc.in_features
model.fc = nn.Linear(num_ftrs, num_classes)
model = model.to(device)
model.load_state_dict(torch.load(path, map_location=device))
model.eval()
detector = dlib.get_frontal_face_detector()
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Resize((224, 224)),
transforms.Normalize((0.5, 0.5, 0.5), (1, 1, 1))
])
cap = cv2.VideoCapture(0)
counter = 0
score = {label: 0 for label in classes.values()}
frames_count = {label: 0 for label in classes.values()}
required_frames = 10
pygame.mixer.init()
show_text = False
def play_alarm_sound():
global show_text
pygame.mixer.music.load('alarm.wav')
pygame.mixer.music.play(-1)
show_text = True
def stop_alarm_sound():
pygame.mixer.music.stop()
global show_text
show_text = False
while True:
ret, frame = cap.read()
faces = detector(frame)
try:
for face in faces:
x1, y1, x2, y2 = face.left(), face.top(), face.right(), face.bottom()
cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)
im = frame[y1:y2, x1:x2]
pil_image = Image.fromarray(im)
dl_frame = transform(pil_image)
dl_frame = torch.unsqueeze(dl_frame, axis=0)
prediction = model(dl_frame).squeeze(0).softmax(0)
pred_label = list(classes.keys())[torch.argmax(prediction)]
print(pred_label)
cv2.putText(frame, 'Prediction: ' + pred_label, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
# Update scores and check for alarm trigger
for label in classes.values():
if label in [classes['closed'], classes['side'], classes['yawn']]:
if pred_label == list(classes.keys())[label]:
frames_count[label] += 1
if frames_count[label] >= required_frames:
score[label] += 1
if score[label] == 1:
play_alarm_sound()
else:
frames_count[label] = 0
score[label] = 0
else:
frames_count[label] = 0
score[label] = 0
if all(score[label] == 0 for label in score):
stop_alarm_sound()
except Exception as e:
print(f"Error: {str(e)}")
if show_text:
cv2.putText(frame, "ALERT: Drowsiness Detected!", (10, 60), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
cv2.imshow("Drowsiness Detection", frame)
key = cv2.waitKey(1) & 0xFF
if key == ord('q'):
break
cap.release()
cv2.destroyAllWindows() | Nithya-Satheesh/Drowsiness-And-Distraction-Detection-System | real-time.py | real-time.py | py | 3,070 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "torch.device",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "torchvision.models.resnet18",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "torchvision.models",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "torch.nn.... |
32271389388 | import os
import azure.cognitiveservices.speech as speechsdk
def recognize_from_microphone():
# This example requires environment variables named "SPEECH_KEY" and "SPEECH_REGION"
speech_key = 'bb8bd625ed4e4a4ba60392152e02eb7c'
speech_region = 'eastus'
speech_config = speechsdk.SpeechConfig(subscription=speech_key, region=speech_region)
speech_config.speech_recognition_language="es-MX"
audio_config = speechsdk.audio.AudioConfig(use_default_microphone=True)
speech_recognizer = speechsdk.SpeechRecognizer(speech_config=speech_config, audio_config=audio_config)
print("Habla a tu microfono.")
speech_recognition_result = speech_recognizer.recognize_once_async().get()
if speech_recognition_result.reason == speechsdk.ResultReason.RecognizedSpeech:
print("Escuche: {}".format(speech_recognition_result.text))
elif speech_recognition_result.reason == speechsdk.ResultReason.NoMatch:
print("No pude reconocer: {}".format(speech_recognition_result.no_match_details))
elif speech_recognition_result.reason == speechsdk.ResultReason.Canceled:
cancellation_details = speech_recognition_result.cancellation_details
print("Cancelado: {}".format(cancellation_details.reason))
if cancellation_details.reason == speechsdk.CancellationReason.Error:
print("Error: {}".format(cancellation_details.error_details))
print("Tas bien mano?")
recognize_from_microphone() | AlexOlivaresP/Transcriptor-RPC | voz.py | voz.py | py | 1,468 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "azure.cognitiveservices.speech.SpeechConfig",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "azure.cognitiveservices.speech",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "azure.cognitiveservices.speech.audio.AudioConfig",
"line_number": 12... |
16565100780 | from os import path
from setuptools import setup, find_packages
from powerdataclass.VERSION import __VERSION__
package_name = 'powerdataclass'
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name=package_name,
version=__VERSION__,
packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
url=f'http://pypi.org/simple/{package_name}',
install_requires=['setuptools',
'toposort'
],
python_requires='>=3.11',
license='MIT',
author='Arish Pyne',
author_email='arishpyne@gmail.com',
description='Power Dataclass: dataclasses with auto typecasting and other power features',
long_description=long_description,
long_description_content_type='text/markdown',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.11',
'Topic :: Software Development :: Libraries :: Python Modules',
'Typing :: Typed',
]
)
| arishpyne/powerdataclass | setup.py | setup.py | py | 1,210 | python | en | code | 9 | github-code | 1 | [
{
"api_name": "os.path.abspath",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "os.path.dirname",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number"... |
71631120355 | from utils import mock_fairseq
mock_fairseq() # noqa: E402
from tseval.qats import get_qats_train_data, evaluate_scoring_method_on_qats
from tseval.feature_extraction import get_all_vectorizers
def test_get_qats_train_data():
sentences, labels = get_qats_train_data(aspect='simplicity')
assert sentences.shape == (505, 2)
assert labels.shape == (505,)
def test_evaluate_scoring_method():
vectorizer = get_all_vectorizers()[0]
metrics = evaluate_scoring_method_on_qats('simplicity', vectorizer)
assert abs(metrics['valid_pearson']) > 0.2
| facebookresearch/text-simplification-evaluation | tests/test_qats.py | test_qats.py | py | 566 | python | en | code | 46 | github-code | 1 | [
{
"api_name": "utils.mock_fairseq",
"line_number": 2,
"usage_type": "call"
},
{
"api_name": "tseval.qats.get_qats_train_data",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "tseval.feature_extraction.get_all_vectorizers",
"line_number": 14,
"usage_type": "call"
... |
18972801594 | import telegram
import telegram.ext
import time
import threading
# custom library
from handler import SuperHandler
import callback
import exrates
# token of the bot. For individual use, you should enter yous
TOKEN = open('token.txt', mode='r').read()
# add functionality to the bot
def create_updater(token) -> telegram.ext.Updater:
updater = telegram.ext.Updater(token)
dispatcher = updater.dispatcher
dispatcher.add_handler(SuperHandler.get_handler('start'))
dispatcher.add_handler(SuperHandler.get_handler('help'))
dispatcher.add_handler(SuperHandler.get_handler('main_menu'))
dispatcher.add_error_handler(SuperHandler.error)
# separately.
# callbacks are handled separately from other handlers
dispatcher.add_handler(telegram.ext.CallbackQueryHandler(callback.callback_handler))
return updater
def do_refreshes():
while True:
exrates.refresh()
time.sleep(60 * 60)
# main body
if __name__ == '__main__':
updater = create_updater(TOKEN)
threading.Thread(target=do_refreshes).start()
updater.start_polling()
updater.idle() | AndrewChmutov/TelegramWallet | main.py | main.py | py | 1,115 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "telegram.ext.Updater",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "telegram.ext",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "handler.SuperHandler.get_handler",
"line_number": 19,
"usage_type": "call"
},
{
"api_name"... |
38684301063 | import cv2
import numpy as np
from PIL import Image, ImageOps, ImageTk, ImageFilter
def apply_median_filter(img, ksize):
# Apply the median filter
img = cv2.medianBlur(img, ksize)
# Convert the NumPy array to a PIL image
# pil_img = Image.fromarray(img)
# return pil_img
return img
def apply_sharpen_filter(image):
# Convert the NumPy array to a PIL image so that we can apply filter(ImageFilter.SHARPEN)
pil_img = Image.fromarray(image)
# Apply the sharpen filter
sharpen = pil_img.filter(ImageFilter.SHARPEN)
return sharpen
# # Image path
# image_path = "../../images/ctisus/ctisusBmp/adrenal_1-01.bmp"
# # Load the image
# img = cv2.imread(image_path)
#
# # Apply the median filter
# median_filtered_img = apply_median_filter(img, 7)
# # pil_img = Image.fromarray(median_filtered_img)
# # Apply the sharpen filter
# # sharpen_filtered_img = apply_sharpen_filter(pil_img)
# sharpen_filtered_img = apply_sharpen_filter(median_filtered_img)
#
#
# # Convert the PIL images to NumPy arrays for displaying with OpenCV
# median_filtered_array = np.array(median_filtered_img)
# sharpen_filtered_array = np.array(sharpen_filtered_img)
#
# # Display the original and filtered images
# cv2.imshow('Original Image', cv2.imread(image_path))
# cv2.imshow('Median Filtered Image', median_filtered_array)
# cv2.imshow('Sharpen Filtered Image', sharpen_filtered_array)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
| nicole-kozhuharova/bachelorArbeit | venv/bachelorArbeit/algorithm/functions/medianFilterFunc.py | medianFilterFunc.py | py | 1,449 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "cv2.medianBlur",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "PIL.Image.fromarray",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "PIL.ImageFilter.SHARPEN",... |
20741888168 | from typing import Annotated, List, Optional
from mypy_extensions import TypedDict
RaiderDictionary = TypedDict("RaiderDictionary", {"address": str, "ranking": int})
RaiderWithAgentDictionary = TypedDict(
"RaiderWithAgentDictionary", {"address": str, "ranking": int, "agent_address": str}
)
CurrencyDictionary = TypedDict(
"CurrencyDictionary",
{"ticker": str, "decimalPlaces": int, "minters": Optional[List[str]]},
)
RewardDictionary = TypedDict(
"RewardDictionary", {"quantity": str, "currency": CurrencyDictionary}
)
RankingRewardDictionary = TypedDict(
"RankingRewardDictionary",
{"raider": RaiderDictionary, "rewards": List[RewardDictionary]},
)
RankingRewardWithAgentDictionary = TypedDict(
"RankingRewardWithAgentDictionary",
{"raider": RaiderWithAgentDictionary, "rewards": List[RewardDictionary]},
)
RecipientRow = Annotated[List[str], 8]
AmountDictionary = TypedDict(
"AmountDictionary",
{
"ticker": str,
"decimalPlaces": int,
"quantity": int,
},
)
Recipient = TypedDict("Recipient", {"recipient": str, "amount": AmountDictionary})
| planetarium/world-boss-service | world_boss/app/stubs.py | stubs.py | py | 1,112 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "mypy_extensions.TypedDict",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "mypy_extensions.TypedDict",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "mypy_extensions.TypedDict",
"line_number": 9,
"usage_type": "call"
},
{
"api_na... |
24328717943 | """Read NestedSamples from UltraNest results."""
import os
import json
from anesthetic.samples import NestedSamples
def read_ultranest(root, *args, **kwargs):
"""Read UltraNest files.
Parameters
----------
root : str
root name for reading files in UltraNest format, i.e. the files
``<root>/info/results.json`` and ``<root>/results/points.hdf5``.
"""
with open(os.path.join(root, 'info', 'results.json')) as infofile:
labels = json.load(infofile)['paramnames']
num_params = len(labels)
filepath = os.path.join(root, 'results', 'points.hdf5')
try:
import h5py
except ImportError:
raise ImportError('h5py is required to read UltraNest results')
with h5py.File(filepath, 'r') as fileobj:
points = fileobj['points']
_, ncols = points.shape
x_dim = ncols - 3 - num_params
logL_birth = points[:, 0]
logL = points[:, 1]
samples = points[:, 3+x_dim:3+x_dim+num_params]
kwargs['label'] = kwargs.get('label', os.path.basename(root))
columns = kwargs.pop('columns', labels)
labels = kwargs.pop('labels', labels)
data = samples
return NestedSamples(data=data, logL=logL, logL_birth=logL_birth,
columns=columns, labels=labels, *args, **kwargs)
| handley-lab/anesthetic | anesthetic/read/ultranest.py | ultranest.py | py | 1,314 | python | en | code | 51 | github-code | 1 | [
{
"api_name": "os.path.join",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "json.load",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number":... |
38940687657 |
import stripe
from stripe.error import AuthenticationError, InvalidRequestError
from django.conf import settings
# from .models import StripeConfig, SillyStripeConfig
# color parameters: style;background (30 is none);foreground
color = {
"end": "\x1b[0m",
"info": "\x1b[0;30;36m",
"success": "\x1b[0;30;32m",
"warning": "\x1b[0;30;33m",
"danger": "\x1b[0;30;31m",
}
DSS_CONFIG_ERROR = (
f"{color['warning']}DJANGO-SILLY-STRIPE IS NOT CONFIGURED PROPERLY."
"\nCheck the configuration in the admin panel."
f"{color['end']}"
)
SILLY_STRIPE = {
# Basic settings
'AUTO_SET': 'CLASSIC', # 'SPA' or 'CLASSIC'
'DSS_SECRET_KEY': 'sk_xxxxxx',
'DSS_PUBLIC_KEY': 'pk_xxxxxx',
'DSS_RESTRICTED_KEY': 'rk_xxxxxx', # optionnal
'DSS_WEBHOOK_SECRET': 'wk_xxxxxx',
'DSS_PREFIX': 'dss/',
# Django Silly Stripe Endpoints
'USE_CHECKOUT': True,
'USE_SUBSCRIPTIONS_CANCEL': True,
'USE_WEBHOOK': True,
'USE_PORTAL': True,
# Checkout settings
'SUCCESS_URL': 'https://example.com/checkout_success',
'CANCEL_URL': 'https://example.com/checkout_cancel',
# Subscriptions settings
'SUBSCRIPTION_CANCEL': 'PERIOD', # 'PERIOD' or 'NOW' (beware with 'NOW': no refund)
'SUBSCRIBE_ONLY_ONCE': True,
# Portal settings
'PORTAL_BACK_URL': 'https://example.com/back_from_portal',
# Misc
'PRINT_DEV_LOGS': False,
}
for key in settings.SILLY_STRIPE:
SILLY_STRIPE[key] = settings.SILLY_STRIPE[key]
| byoso/django-silly-stripe | django_silly_stripe/conf.py | conf.py | py | 1,495 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.conf.settings.SILLY_STRIPE",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "django.conf.settings.SILLY_STRIPE",
"line_number": 52,
"usage_type": "attribu... |
23718020693 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 15 08:02:43 2018
@author: mfromano
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 2 10:31:21 2018
@author: mfromano
"""
# run hua-an's motion correction
import motion_correction2
import scipy.io as sio
import numpy as np
fi = sio.loadmat('/fastdata/ca-imaging-ed/metadata_raw_gcamp.mat')
fi = fi['metaout']
for i in np.arange(0,1):
if fi[0][i]:
currfiles = [a[0] for a in fi[0][i][1][0]]
suffix = fi[0][i][0][0]
motion_correction2.motion_correction(filename_list=currfiles[0:4])
| HanLabBU/micro-control-final | matlab_src/bin/hua-an_code/test_motion_correct_results.py | test_motion_correct_results.py | py | 614 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "scipy.io.loadmat",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "scipy.io",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "numpy.arange",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "motion_correction2.motion_corr... |
41898385777 | import os
from astropy.io import fits
import numpy as np
from . import db_indicies as dbi
from . import load_fits
# Indicies of fits file arrays2018/180625/101501_180625.1129.fits
FLG_IND = 4
TEL_DIV_IND = 5
def write_back_tellurics(spectrum_path, model, pwv, order_wv_ranges, shards):
# 1: Open file
f = fits.open(spectrum_path, do_not_scale_image_data=True)
# 2: Write telluric divided spectrum
for order in range(0, 86):
if order in shards:
#only one spectrum in shard
spectrum = next(iter(shards[order].spectra.values()))
# 2(a) Create tellurics array filled with nans
tellurics = np.zeros(len(f[1].data.tellurics[order]))
tellurics.fill(np.nan)
# 2(b) Find the pixel at which the calculated telluric spectrum starts/
# ends, and paste it into array
l_cutoff_wv, r_cutoff_wv = order_wv_ranges[order]
l_cutoff_px = np.argmax(f[1].data.wavelength[order] > l_cutoff_wv)
r_cutoff_px = np.argmax(f[1].data.wavelength[order] > r_cutoff_wv)
good_px=np.where(np.logical_not(np.isnan(f[1].data.spectrum[order])))[0]
l_cutoff_px = l_cutoff_px if l_cutoff_px > good_px[0] else good_px[0]
r_cutoff_px = r_cutoff_px if r_cutoff_px < good_px[-1] else good_px[-1]
tellurics[l_cutoff_px:r_cutoff_px] = spectrum.tel_lin_y
# 2(c) Set telluric array in file
f[1].data.tellurics[order] = tellurics
else:
f[1].data.tellurics[order] = np.ones(len(f[1].data.tellurics[order]))
# 3: Write PWV to header
pwv_msg = "Calculated telluric intensity at 6940.18A"
f[1].header.set("PWV", pwv, pwv_msg)
# 4: Overwrite
f.writeto(spectrum_path, overwrite=True)
f.close()
def write_PWV_header(spectra_folder, out_folder, calibrators):
"""
Writes CRYSTAL's PWV metric to calibration spectra headers
"""
w_calibrator, z_calibrator, f_order = calibrators
for PWV, filename in zip(w_calibrator, f_order):
spectrum_path = os.path.join(spectra_folder, filename)
f = fits.open(spectrum_path, do_not_scale_image_data=True)
f[0].header["SEL_PWV"] = abs(PWV)
f.writeto(os.path.join(out_folder, filename))
f.close()
#def write_tellurics_to_fits(spectrum_path, out_folder, model, mu, shards):
# # 1: Open file
# f = fits.open(spectrum_path, do_not_scale_image_data=True)
# # 2: Write telluric flags
# order_shift = 0
# if load_fits.find_epoch_from_date(f[0].header["DATE-OBS"]) == 2:
# order_shift = 1
# for row in model:
# f[0].data[int(row[dbi.ORD_IND]) + order_shift, int(row[dbi.PX_IND]), FLG_IND] = 2
# # 3: Write telluric divided spectrum
# for shard in shards.values():
# spectrum = next(iter(shard.spectra.values())) #only one spectrum in shard
# f[0].data[shard.order + order_shift, shard.lo_px:shard.hi_px, TEL_DIV_IND] = spectrum.tel_lin_y
# # 4: Write PWV to header
# #f[1].header["PWV"] = str(np.exp(mu))
# f[0].header["CALC-PWV"] = str(np.exp(mu))
# #print("write_fits: CALC-PWV:{}".format(f[0].header["CALC-PWV"]))
#
# # 5: Open destination and write
# out_filename = os.path.basename(os.path.splitext(spectrum_path)[0]) + "t.fits"
# out_path = os.path.join(out_folder, out_filename)
# f.writeto(out_path, overwrite=True)
# f.close()
| chrisleet/selenite | selenite/load_store/write_fits.py | write_fits.py | py | 3,256 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "astropy.io.fits.open",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "astropy.io.fits",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "numpy.zeros",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "numpy.nan",
"lin... |
31944377025 | from PyQt5.QtCore import QCoreApplication
from PyQt5.QtGui import QColor
from qgis.PyQt.Qt import QVariant
from qgis.core import (QgsProcessing,
QgsFeatureSink,
QgsProcessingAlgorithm,
QgsProcessingParameterFeatureSource,
QgsProcessingParameterFeatureSink,
QgsFeature,
QgsDataSourceUri,
QgsProcessingOutputVectorLayer,
QgsProcessingParameterVectorLayer,
QgsWkbTypes,
QgsAction,
QgsProcessingParameterBoolean,
QgsProcessingParameterEnum,
QgsProcessingParameterNumber,
QgsProcessingParameterMultipleLayers,
QgsProcessingUtils,
QgsSpatialIndex,
QgsGeometry,
QgsProcessingParameterField,
QgsProcessingMultiStepFeedback,
QgsProcessingParameterFile,
QgsProcessingParameterExpression,
QgsProcessingException,
QgsProcessingParameterString,
QgsProcessingParameterDefinition,
QgsProcessingParameterType,
QgsProcessingParameterCrs,
QgsCoordinateTransform,
QgsProject,
QgsCoordinateReferenceSystem,
QgsField,
QgsFields,
QgsProcessingOutputMultipleLayers,
QgsProcessingParameterString,
QgsConditionalStyle,
QgsVectorLayer)
import os
from qgis import core
from qgis.utils import iface
from Ferramentas_Producao.modules.qgis.processingAlgs.processingAlg import ProcessingAlg
from Ferramentas_Producao.modules.spellchecker.spellCheckerCtrl import SpellCheckerCtrl
import re
class SpellCheckerAlg(ProcessingAlg):
INPUT_LAYERS = 'INPUT_LAYERS'
ATTRIBUTE_NAME = 'ATTRIBUTE_NAME'
OUTPUT = 'OUTPUT'
def __init__(self):
super(SpellCheckerAlg, self).__init__()
def initAlgorithm(self, config):
self.addParameter(
QgsProcessingParameterMultipleLayers(
self.INPUT_LAYERS,
self.tr('Camadas'),
QgsProcessing.TypeVectorAnyGeometry
)
)
self.addParameter(
QgsProcessingParameterString(
self.ATTRIBUTE_NAME,
description = self.tr('Nome do Atributo'),
)
)
self.addParameter(
QgsProcessingParameterFeatureSink(
self.OUTPUT,
self.tr('digitacao_flags')
)
)
def processAlgorithm(self, parameters, context, feedback):
inputLyrList = self.parameterAsLayerList(
parameters,
self.INPUT_LAYERS,
context
)
attributeName = self.parameterAsFile(
parameters,
self.ATTRIBUTE_NAME,
context
)
spellchecker = SpellCheckerCtrl('pt-BR')
errors = []
output_dest_id = ''
listSize = len(inputLyrList)
progressStep = 100/listSize if listSize else 0
errorFieldName = '{}_erro'.format(attributeName)
#field = core.QgsField('{}_erro'.format(attributeName))
fieldRelation = core.QgsField('id', QVariant.Double)
for step, layer in enumerate(inputLyrList):
if not layer.isEditable():
raise Exception('Todas as camadas de entrada devem está com a edição ativa!')
attributeIndex = self.getAttributeIndex(attributeName, layer)
if attributeIndex < 0:
continue
auxlayer = core.QgsAuxiliaryStorage().createAuxiliaryLayer(fieldRelation, layer)
layer.setAuxiliaryLayer(auxlayer)
auxLayer = layer.auxiliaryLayer()
vdef = core.QgsPropertyDefinition(
errorFieldName,
core.QgsPropertyDefinition.DataType.DataTypeString,
"",
"",
""
)
auxLayer.addAuxiliaryField(vdef)
idx = layer.fields().indexOf('auxiliary_storage__{}'.format(errorFieldName))
layer.setFieldAlias(idx, errorFieldName)
auxFields = auxLayer.fields()
for feature in layer.getFeatures():
if feedback.isCanceled():
return {self.OUTPUT: output_dest_id}
attributeValue = feature[attributeIndex]
if not attributeValue:
continue
attributeValue = ''.join(e for e in attributeValue if not(e in [',', ';', '&', '.'] or e.isdigit()))
wordlist = re.split(' |/', attributeValue)
wordlist = [ w for w in wordlist if not w in ['-'] ]
wrongWords = [ word for word in wordlist if not spellchecker.hasWord(word.lower())]
if len(wrongWords) == 0:
continue
auxFeature = QgsFeature(auxFields)
auxFeature['ASPK'] = feature['id']
auxFeature['_{}'.format(errorFieldName)] = ';'.join(wrongWords)
auxLayer.addFeature(auxFeature)
feedback.setProgress(step*progressStep)
return {self.OUTPUT: ''}
def getFlagWkbType(self):
return QgsWkbTypes.Point
def getFlagFields(self):
sinkFields = QgsFields()
sinkFields.append(QgsField('erro', QVariant.String))
sinkFields.append(QgsField('correcao', QVariant.String))
sinkFields.append(QgsField('outras_opcoes', QVariant.String))
return sinkFields
def name(self):
return 'spellchecker'
def displayName(self):
return self.tr('Verificador de Palavras')
def group(self):
return self.tr('Outros')
def groupId(self):
return 'FP: Outros'
def tr(self, string):
return QCoreApplication.translate('SpellCheckerAlg', string)
def createInstance(self):
return SpellCheckerAlg() | dsgoficial/Ferramentas_Producao | modules/qgis/processingAlgs/spellCheckerAlg.py | spellCheckerAlg.py | py | 6,299 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "Ferramentas_Producao.modules.qgis.processingAlgs.processingAlg.ProcessingAlg",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "qgis.core.QgsProcessingParameterMultipleLayers",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "qgis.core.QgsProcessi... |
73078421474 | from sklearn.decomposition import KernelPCA
from matplotlib import pyplot as plt
import pandas as pd, numpy as np, os
import pywt
base_path = os.path.dirname(os.path.abspath(__file__))
data_path = os.path.join(base_path, "../data/12053002165")
output_path = os.path.join(base_path, "../data/solar")
if not os.path.exists(output_path):
os.makedirs(output_path, exist_ok=True)
train_data_raw = pd.read_excel(os.path.join(data_path, "train_in.xlsx"), header=None).values
test_data_raw = pd.read_excel(os.path.join(data_path, "test_in.xlsx"), header=None).values
train_target_raw = pd.read_excel(os.path.join(data_path, "train_out.xlsx"), header=None).values
test_target_raw = pd.read_excel(os.path.join(data_path, "test_out.xlsx"), header=None).values
train_data, test_data = list(), list()
timesteps = 24
num_input = 6
reduction_size = 15
scales = 3
"""
Column 0: history power
Column 3: B0062T
"""
pca = KernelPCA(n_components=reduction_size, kernel="rbf")
fre_data = list()
for i in range(timesteps, len(train_data_raw)):
temp_row = list()
pf = list()
for j in range(0, timesteps):
pf.append(train_data_raw[i - j][0])
pf = np.array(pf)
pf, freqs = pywt.cwt(pf, range(1, scales), wavelet='morl')
pf = pf.flatten().tolist()
temp_row.extend(train_data_raw[i].tolist())
if train_data_raw[i][3] >= train_data_raw[i - 1][3] and train_data_raw[i - 1][3] != 0:
temp_row[3] = train_data_raw[i][3] - train_data_raw[i - 1][3]
fre_data.append(pf)
train_data.append(temp_row)
train_data = np.array(train_data)
fre_data = np.array(fre_data)
pca.fit(fre_data)
fre_data = pca.fit_transform(fre_data)
train_data = np.append(train_data, fre_data, axis=1)
fre_data = list()
for i in range(timesteps, len(test_data_raw)):
temp_row = list()
pf = list()
for j in range(0, timesteps):
pf.append(test_data_raw[i - j][0])
pf = np.array(pf)
pf, freqs = pywt.cwt(pf, range(1, scales), wavelet='morl')
pf = pf.flatten().tolist()
temp_row.extend(test_data_raw[i].tolist())
if test_data_raw[i][3] >= test_data_raw[i - 1][3] and test_data_raw[i - 1][3] != 0:
temp_row[3] = test_data_raw[i][3] - test_data_raw[i - 1][3]
fre_data.append(pf)
test_data.append(temp_row)
test_data = np.array(test_data)
fre_data = np.array(fre_data)
pca.fit(fre_data)
fre_data = pca.fit_transform(fre_data)
test_data = np.append(test_data, fre_data, axis=1)
train_target = train_target_raw[timesteps:]
test_target = test_target_raw[timesteps:]
train_data_df = pd.DataFrame(train_data)
train_target_df = pd.DataFrame(train_target)
test_data_df = pd.DataFrame(test_data)
test_target_df = pd.DataFrame(test_target)
### Normalizaion
for i in range(num_input, reduction_size + num_input):
train_max, train_min = train_data_df[i].max(), train_data_df[i].min()
test_max, test_min = test_data_df[i].max(), test_data_df[i].min()
_max, _min = max(train_max, test_max), min(train_min, test_min)
if _max - _min != 0:
train_data_df[i] = (train_data_df[i] - _min) / (_max - _min)
test_data_df[i] = (test_data_df[i] - _min) / (_max - _min)
train_data_df.to_excel(os.path.join(output_path, "train_in.xlsx"), index=False, header=None)
test_data_df.to_excel(os.path.join(output_path, "test_in.xlsx"), index=False, header=None)
train_target_df.to_excel(os.path.join(output_path, "train_out.xlsx"), index=False, header=None)
test_target_df.to_excel(os.path.join(output_path, "test_out.xlsx"), index=False, header=None) | apie0419/solar_power_prediction | hourahead/process_data.py | process_data.py | py | 3,653 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "os.path.dirname",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_nu... |
12194770935 | import nextcord
from nextcord.ext import commands, tasks
import openai
import os
import re
from langdetect import detect
from config import TRANSLATE_CHANNEL
# Initialize the OpenAI API
openai.api_key = os.environ['Key_OpenAI']
def is_english(text):
try:
return detect(text) == 'en'
except:
return False
def preprocess_message(text):
# Remove mentions
cleaned_text = re.sub(r'@\w+', '', text)
return cleaned_text.strip()
def should_translate(text):
cleaned_text = preprocess_message(text)
# If the cleaned message is empty, don't translate
if not cleaned_text:
return False
# Check for non-Latin scripts
non_latin_patterns = [
r'[\u0600-\u06FF]', # Arabic
r'[\u0980-\u09FF]', # Bengali
r'[\u4E00-\u9FFF\U00020000-\U0002A6DF]', # Chinese
r'[\u0400-\u04FF]', # Cyrillic
r'[\u0900-\u097F]', # Devanagari
r'[\u0370-\u03FF]', # Greek
r'[\u0A80-\u0AFF]', # Gujarati
r'[\u0A00-\u0A7F]', # Gurmukhi
r'[\u0590-\u05FF]', # Hebrew
r'[\u3040-\u30ff\u3400-\u4DBF]', # Japanese
r'[\u0C80-\u0CFF]', # Kannada
r'[\uAC00-\uD7AF]', # Korean
r'[\u0D00-\u0D7F]', # Malayalam
r'[\u0B00-\u0B7F]', # Oriya (Odia)
r'[\u0D80-\u0DFF]', # Sinhala
r'[\u0B80-\u0BFF]', # Tamil
r'[\u0C00-\u0C7F]', # Telugu
r'[\u0E00-\u0E7F]', # Thai
r'[\u0F00-\u0FFF]' # Tibetan
]
for pattern in non_latin_patterns:
if re.search(pattern, cleaned_text):
return True
# Check for minimum number of words on cleaned text
if len(cleaned_text.split()) < 3:
return False
# Check for URLs on cleaned text
url_regex = r"http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+"
if re.search(url_regex, cleaned_text):
return False
# Check for primarily alphanumeric content on cleaned text
alphanumeric_regex = r"^[a-zA-Z0-9]+$"
if re.search(alphanumeric_regex, cleaned_text):
return False
# Language check on cleaned text
if is_english(cleaned_text):
return False
return True
class TranslationButton(nextcord.ui.Button):
def __init__(self, message_id, *args, **kwargs):
super().__init__(*args, **kwargs)
self.message_id = message_id # Store the original message's ID
async def callback(self, interaction: nextcord.Interaction):
# Fetch the translation using the message ID
translation = self.view.cog.translations.get(self.message_id, "Translation not found.")
# Send the translation as an ephemeral message
await interaction.response.send_message(translation, ephemeral=True)
class TranslationView(nextcord.ui.View):
def __init__(self, cog, message_id, *args, **kwargs):
super().__init__(*args, **kwargs)
self.cog = cog
self.add_item(TranslationButton(message_id, label="TR", style=nextcord.ButtonStyle.primary))
class TranslationCog(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.translations = {} # Dictionary to store translations
self.cleanup_task = self.clean_translations.start()
@tasks.loop(hours=1)
async def clean_translations(self):
"""Cleanup task to remove translations older than 1 hour."""
self.translations = {}
@commands.Cog.listener()
async def on_message(self, message):
if message.channel.id == TRANSLATE_CHANNEL and not message.author.bot:
if should_translate(message.content):
chat_message = [{"role": "user", "content": f"Translate the following to English: '{message.content}'"}]
response = openai.ChatCompletion.create(
model="gpt-4",
messages=chat_message,
temperature=0.2,
max_tokens=1000,
frequency_penalty=0.0
)
translation = response['choices'][0]['message']['content'].strip()
self.translations[message.id] = translation
# Send an empty message with just the 'Translation' button below the original message
view = TranslationView(self, message_id=message.id, timeout=None)
await message.channel.send("", view=view)
def cog_unload(self):
self.cleanup_task.cancel()
def setup(bot):
bot.add_cog(TranslationCog(bot))
print("Translation cog loaded")
| CryptoAutistic80/Nextcord-Cog-Bot | cogs/translator.py | translator.py | py | 4,573 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "openai.api_key",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "langdetect.detect",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "re.sub",
"l... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.