seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
73021062754 | from tkinter import *
from PIL import ImageTk, Image
root = Tk()
root.title('leer coderen bij Codemy.com')
root.iconbitmap('c:/gui/ ')
my_img1 = ImageTk.PhotoImage(Image.open("IMG_1136.png"))
my_img2 = ImageTk.PhotoImage(Image.open("IMG_2004.png"))
#image_list = [my_img1, my_img2]
#my_label = Label(image=my_img1)
#my_label.grid(row=0, column=0, columnspan=3)
#my_label.pack()
button_back = Button(root, "<<")
button_exit = Button(root, text="exit program", command=root.quit)
button_forward = Button(root, text=">>")
button_back.grid(row=1, column=0)
button_exit.grid(row=1, column=1)
button_forward.grid(row=1, column=2)
#button_quit = Button(root, text="exit program", command=root.quit)
#button_quit.pack()
root.mainloop() | herucara/python-2021 | kijken.py | kijken.py | py | 743 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "PIL.ImageTk.PhotoImage",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "PIL.ImageTk",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "PIL.Image.open",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_... |
959238114 | import argparse
import os
import torch
import torchvision as tv
from at_learner_core.utils import transforms
from at_learner_core.configs import dict_to_namespace
from at_learner_core.utils import transforms as transforms
from at_learner_core.utils import joint_transforms as j_transforms
from at_learner_core.utils import sequence_transforms as s_transforms
from PIL import Image
L = 16
image_size = 112
modality_list = ['stat_r1000', 'stat_r1']
of_modality_list = ['optical_flow', 'optical_flow_start']
test_seq_transform = tv.transforms.Compose([
s_transforms.LinspaceTransform(L, key_list=['data']),
])
train_seq_transform = tv.transforms.Compose([
tv.transforms.RandomApply([
s_transforms.DuplicateElements(1, False, ['data'], 'target', 1, True)
], p=0.5),
s_transforms.LinspaceTransform(L, key_list=['data'], max_start_index=0),
])
preprocess_transform = transforms.Transform4EachElement([
transforms.RemoveBlackBorders(),
transforms.SquarePad(),
tv.transforms.Resize(image_size),
])
postprocess_transform = tv.transforms.Compose([
transforms.CreateNewItem(transforms.RankPooling(C=1000), 'data', 'stat_r1000'),
transforms.CreateNewItem(transforms.RankPooling(C=1), 'data', 'stat_r1'),
transforms.DeleteKeys(['data']),
transforms.Transform4EachKey([
transforms.Transform4EachElement([
# tv.transforms.Resize(112),
tv.transforms.ToTensor(),
]),
transforms.StackTensors(squeeze=True),
tv.transforms.Normalize(mean=[0.5, 0.5], std=[0.5, 0.5])
], key_list=of_modality_list),
transforms.Transform4EachKey([
tv.transforms.Resize(112),
tv.transforms.ToTensor(),
tv.transforms.Normalize(mean=[0.5], std=[0.5])],
key_list=modality_list)
])
train_image_transform = tv.transforms.Compose([
transforms.Transform4EachKey([
preprocess_transform,
tv.transforms.RandomApply([j_transforms.ColorJitter(0.2, 0.2, 0.2, 0.2)], p=0.5),
], key_list=['data']),
transforms.Transform4EachKey([
tv.transforms.RandomApply([
transforms.Transform4EachElement([
tv.transforms.RandomApply([
tv.transforms.RandomRotation(5)
], p=0.5)
])], p=0.5),
tv.transforms.RandomApply([
transforms.Transform4EachElement([
tv.transforms.RandomApply([
tv.transforms.RandomCrop(image_size, padding=5, pad_if_needed=True)
], p=0.5)
])
], p=0.5),
tv.transforms.RandomApply([
transforms.Transform4EachElement([
tv.transforms.RandomApply([
tv.transforms.ColorJitter(0.05, 0.05, 0.05, 0.00)
], p=0.5)
])
], p=0.5),
], key_list=['data']),
transforms.CreateNewItem(transforms.LiuOpticalFlowTransform((0, 4), (L - 4, L)), 'data', 'optical_flow'),
transforms.CreateNewItem(transforms.LiuOpticalFlowTransform((0, 1), (2, 4)), 'data', 'optical_flow_start'),
postprocess_transform
])
test_image_transform = tv.transforms.Compose([
transforms.Transform4EachKey([
preprocess_transform,
], key_list=['data']),
transforms.CreateNewItem(transforms.LiuOpticalFlowTransform(0, L-1), 'data', 'optical_flow'),
transforms.CreateNewItem(transforms.LiuOpticalFlowTransform(0, 1), 'data', 'optical_flow_start'),
postprocess_transform
])
def get_config(protocol_name):
config = {
'head_config': {
'task_name': 'rgb_track',
'exp_name': f'exp1_{protocol_name}',
'text_comment': '',
},
'checkpoint_config': {
'out_path': None,
'save_frequency': 1,
},
'datalist_config': {
'trainlist_config': {
'dataset_name': 'VideoDataset',
'datalist_path': '../data/train_list.txt',
'protocol_name': protocol_name,
'data_columns': [('rgb_path', 'data')],
'target_columns': ('label', 'target'),
'group_column': 'video_id',
'sampler_config': {
'name': 'NumElements',
'class_column': 'label',
'num_elem_per_epoch': 20.0,
},
'sequence_transforms': train_seq_transform,
'transforms': train_image_transform,
},
'testlist_configs': {
'dataset_name': 'VideoDataset',
'datalist_path': '../data/dev_list.txt',
'protocol_name': protocol_name,
'data_columns': [('rgb_path', 'data')],
'target_columns': ('label', 'target'),
'group_column': 'video_id',
'sequence_transforms': test_seq_transform,
'transforms': test_image_transform,
}
},
'train_process_config': {
'nthreads': 8,
'ngpu': 1,
'batchsize': 32,
'nepochs': 5,
'resume': None,
'optimizer_config': {
'name': 'Adam',
'lr_config': {
'lr_type': 'StepLR',
'lr': 0.0001,
'lr_decay_period': 5,
'lr_decay_lvl': 0.5,
},
'weight_decay': 1e-05,
},
},
'test_process_config': {
'run_frequency': 1,
'metric': {
'name': 'acer',
'target_column': 'target',
}
},
'wrapper_config': {
'wrapper_name': 'MultiModalWrapper',
'input_modalities': modality_list + of_modality_list,
'backbone': 'simplenet112',
'nclasses': 1,
'loss': 'BCE',
'pretrained': None,
},
'logger_config': {
'logger_type': 'log_combiner',
'loggers': [
{'logger_type': 'terminal',
'log_batch_interval': 5,
'show_metrics': {
'name': 'acer',
'fpr': 0.01,
}},
]
},
'manual_seed': 42,
'resume': None,
}
ns_conf = argparse.Namespace()
dict_to_namespace(ns_conf, config)
return ns_conf
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Options')
parser.add_argument('--savepath',
type=str,
default='experiments/',
help='Path to save options')
args = parser.parse_args()
for idx in range(1, 4):
configs = get_config(f'protocol_4_{idx}')
out_path = os.path.join(args.savepath,
configs.head_config.task_name,
configs.head_config.exp_name)
os.makedirs(out_path, exist_ok=True)
if configs.checkpoint_config.out_path is None:
configs.checkpoint_config.out_path = out_path
filename = os.path.join(out_path,
configs.head_config.task_name + '_' + configs.head_config.exp_name + '.config')
torch.save(configs, filename)
print('Options file was saved to ' + filename) | AlexanderParkin/CASIA-SURF_CeFA | rgb_track/configs_final_exp.py | configs_final_exp.py | py | 7,365 | python | en | code | 149 | github-code | 1 | [
{
"api_name": "torchvision.transforms.Compose",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "at_learner_core.utils.sequence_transforms.LinspaceTransform",
"line_number": 19,
... |
25515683214 | """Convert prediction of Network to .tei file and correct the _predict.txt file"""
from lxml import etree
import lxml.etree as ET
import time
import argparse
#file_network_input = open("/home/svogel/projects/textimaging/rnng-master/Franz_Kafka_Das_Urteil_graminput.txt", "r") #Thomas_Mann_Der_Tod_in_Venedig_Neu_graminput.txt", "r") #
#file_pred = open("/home/svogel/projects/textimaging/rnng-master/Franz_Kafka_Das_Urteil_predict.txt", "r") #Thomas_Mann_Der_Tod_in_Venedig_Neu_predict.txt","r") #
#file_pred = open("C:/Users/pasca/Dropbox/PaktikumTextimaging/raw/predicted_gram/Franz_Kafka_Der_Gruftwaechter_Quelle_DigBib_predict.txt", "r") #Thomas_Mann_Der_Tod_in_Venedig_Neu_predict.txt","r") #
p = argparse.ArgumentParser()
p.add_argument('file')
if __name__ == "__main__":
args = p.parse_args()
file_network_input = open(args.file + '_graminput.txt', 'r')
file_pred = open(args.file + '_predict.txt', 'r')
def get_clips(string):
first = True
clips = ""
for char in string:
if char == ")":
clips += "" if first else ")"
first = False
return clips
network_input = ["(t"]
lines = file_network_input.read().splitlines()
for line in lines:
# correctly working with multiple (t: Convert all t into only an outer t
network_input += line[3:-1].split(" ")
network_input[-1] += ")"
pred = ["(t"]
lines = file_pred.read().splitlines()
for line in lines:
split = line[:-1].split(" ")
help_ = 0 #for finding "(t ... "
for elem in split:
if("(t" == elem):
break
help_ += 1
pred += split[help_+1:]
pred[-1] += ")"
def get_full_prediction(network_input, pred):
"""Network outputs some weird changes, i.e. (XX ) instead of (w ) and some words are not correctly written
This method corrects the output"""
w_and_c = []
#print("Network: ", network_input[:100])
#print("pred: ", pred[:100])
for i in range(len(network_input)):
if("(w" in network_input[i] or "(c" in network_input[i]):
w_and_c.append(network_input[i] + " " + network_input[i+1])
counter = 0
helpBool = False #for let the words from prediction out
full_pred = ""
for i in range(len(pred)):
if(helpBool):
helpBool = False
elif("(XX" in pred[i]):
full_pred += w_and_c[counter] + get_clips(pred[i+1]) + " "
counter +=1
helpBool = True
else:
full_pred += pred[i] + " "
return full_pred[:-2]
def create_xml(full_pred):
"""Convert the corrected predicted output to a complete .tei file"""
publication_stmt = ["Timestamp", "Number of tokens", "Number of unknown tokens",
"Number of word forms", "TTR", "Guiraud", "MTLD",
"Number of punctuation marks", "Number of lemmata", "Number of segments",
"Number of level-1 segments", "Number of level-2 segments",
"Number of level-3 segments", "Number of level-4 segments",
"Number of level-5 segments", "Number of level-6 segments",
"Number of level-7 segments", "Maximum segment level", "Number of quotes",
"Number of sentences", "Number of level-1 sentences",
"Number of level-2 sentences", "Number of level-3 sentences",
"Maximum sentence level", "Number of paragraphs", "Number of divisions",
"Number of captions", "Number of tables","Number of named entities",
"Number of nouns", "Number of verbs", "Number of adjectives",
"Number of adverbs"]
elements = []
preds = full_pred.split(" ")
s_count = 0
s_count_deep = [0,0,0,0,0,0]
s_deep = 1
seg_count = 0
seg_count_deep = [0,0,0,0,0,0,0,0,0]
seg_deep = 1
w_count = 0
c_count = 0
tei = ET.Element("TEI", id="TEI1")
elements.append(ET.SubElement(tei, "teiHeader"))
elements.append(ET.SubElement(elements[0],"fileDesc"))
elements.append(ET.SubElement(elements[1],"titleStmt"))
elements.append(ET.SubElement(elements[2],"title"))
elements[3].text = "PlainText"
elements.append(ET.SubElement(elements[1],"publicationStmt"))
for stmt in publication_stmt:
elements.append(ET.SubElement(elements[4],"idno", type=stmt))
elements[-1].text = "0"
elements.append(ET.SubElement(elements[1],"sourceDesc"))
elements.append(ET.SubElement(elements[38],"p", Name= "TTLab-Corpus; tagging by RNNG prediction from Fabian Vogel and Pascal Fischer"))
stack = [tei]
elements.append(ET.SubElement(tei, "text", id="text1"))
stack.append(elements[-1])
elements.append(ET.SubElement(stack[-1], "body", id="body1"))
stack.append(elements[-1])
for pred in preds[1:]:
if(pred == "(s"):
elements.append(ET.SubElement(stack[-1], "s", id="s"+str(s_count), n=str(s_deep)))
stack.append(elements[-1])
s_count += 1
s_count_deep[s_deep -1] += 1
s_deep += 1
#if(s_deep == 4):
# print(s_deep)
elif(pred == "(seg"):
elements.append(ET.SubElement(stack[-1], "seg", id="seg"+str(seg_count), n=str(seg_deep+1)))
stack.append(elements[-1])
seg_count += 1
seg_count_deep[seg_deep -1] += 1
seg_deep += 1
elif(pred == "(w"):
elements.append(ET.SubElement(stack[-1], "w", id="w"+str(w_count), lemma="unknown", type="unknown", ana="unknown"))
w_count += 1
elif(pred == "(c"):
elements.append(ET.SubElement(stack[-1], "c", type="PUN"))
c_count += 1
else:
txt= ""
clips = -1
for char in pred:
if(char == ")"):
clips += 1
else:
txt += char
elements[-1].text = txt
for i in range(clips):
if(stack[-1].tag == "s"):
s_deep -= 1
else:
seg_deep -= 1
stack.pop()
elements.append(ET.SubElement(stack[-1], "c",))
elements[-1].text= " "
#print("Satztiefen: ", s_count_deep)
#print("Segmenttiefen: ", seg_count_deep)
elements[5].text= time.strftime("%d.%m.%Y")
elements[6].text= str(w_count)
elements[7].text= str(w_count)
words = []
quotes = 0
for i in range(len(preds)-1):
if(preds[i] == "(w"):
word = ""
for char in preds[i+1]:
if(char != ")"):
word += char
words.append(word)
if('»' in preds[i]):
quotes += 1
elements[8].text = str(len(list(set(words))))
elements[12].text = str(c_count)
elements[14].text = str(seg_count)
max_seg_lvl = 0
for i in range(7):
elements[15+i].text = str(seg_count_deep[i])
if(seg_count_deep[i] > 0):
max_seg_lvl += 1
elements[22].text = str(max_seg_lvl)
elements[23].text = str(quotes)
elements[24].text = str(s_count)
max_s_lvl = 0
for i in range(3):
elements[25+i].text = str(s_count_deep[i])
if(s_count_deep[i] > 0):
max_s_lvl += 1
elements[28].text = str(max_s_lvl)
tree = ET.ElementTree(tei)
with open(args.file+".tei", "wb") as writter:
writter.write(etree.tostring(tree, pretty_print=True,\
xml_declaration=True,encoding='UTF-8'))
full_pred = get_full_prediction(network_input, pred)
# safe the corrected version of the prediction
with open(args.file + '_predict.txt',"w") as pred_file:
pred_file.write(full_pred)
create_xml(full_pred)
| Psarpei/Recognition-of-logical-document-structures | RNNG/scripts/prediction_to_XML.py | prediction_to_XML.py | py | 8,612 | python | en | code | 5 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "lxml.etree.Element",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "lxml.etree",
"line_number": 99,
"usage_type": "name"
},
{
"api_name": "lxml.etree.SubE... |
29980155080 | import csv
import logging
import multiprocessing
import os
import pickle
import sys
import traceback
import cv2
import imutils
import imutils.contours
import numpy as np
from imutils.perspective import four_point_transform
import register_image
base_template = None
base_tables = None
DESIRED_HEIGHT = 779
DESIRED_WIDTH = 566
MINIMUM_TABLE_WIDTH = 80
MAXIMUM_TABLE_WIDTH = 133
MINIMUM_TABLE_HEIGHT = 133
MAXIMUM_TABLE_HEIGHT = 600
BASE_TEMPLATE = 'base_template.pkl'
BASE_TABLE_CONTOURS = 'tables.pkl'
def load_grayscale_image(image_path):
"""Loads a grayscale image.
Loads an image with a given path then converts it into grayscale image.
Args:
image_path: Path to image file.
Returns:
A grayscale image.
Raises:
IOError: An error occurred loading the image.
"""
image = cv2.imread(image_path)
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
return gray_image
def resize_image(image):
"""Resizes a given image.
This function scales an image up or down to 779x566 resolution.
Args:
image: An image object of answer sheet.
Returns:
Resized image with a resolution of 779x566
"""
height, width = image.shape
height_ratio = height / DESIRED_HEIGHT
width_ratio = width / DESIRED_WIDTH
if height_ratio > 1 and width_ratio > 1:
image = cv2.resize(image, (DESIRED_WIDTH, DESIRED_HEIGHT), interpolation=cv2.INTER_AREA)
elif height_ratio < 1 and width_ratio < 1:
image = cv2.resize(image, (DESIRED_WIDTH, DESIRED_HEIGHT), interpolation=cv2.INTER_CUBIC)
return image
def get_table_contours(image):
"""Gets contours of tables.
This function applies canny edge detector to find borders for each table in a given image.
Args:
image: An image object of answer sheet.
Returns:
An array of tables containing each table's contours.
"""
high_thresh, _ = cv2.threshold(image, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)
low_thresh = 0.5 * high_thresh
edged = cv2.Canny(image, low_thresh, high_thresh)
contours = cv2.findContours(edged, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = contours[0] if imutils.is_cv2() else contours[1]
contours = sorted(contours, key=cv2.boundingRect)
tables = []
for c in contours:
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.02 * peri, True)
_, _, width, height = cv2.boundingRect(c)
if MINIMUM_TABLE_WIDTH < width < MAXIMUM_TABLE_WIDTH and MINIMUM_TABLE_HEIGHT < height < MAXIMUM_TABLE_HEIGHT \
and len(approx) == 4:
tables.append(approx)
table_contours = imutils.contours.sort_contours(tables)[0]
return table_contours
def apply_template_matching_algorithm(image):
"""Applies template matching algorithm.
This function applies Discrete Fourier Transform based image registration algorithm to
pixel to pixel alignment of the answer sheet image to base template image.
Base Image is deserialized from "base_template.pkl" file.
Args:
image: An image object of answer sheet.
Returns:
Skew and Rotation corrected image of answer sheet.
Raises:
ValueError: Image must be based on template answer sheet.
"""
global base_template
if base_template is None:
with open(BASE_TEMPLATE, 'rb') as f:
base_template = pickle.load(f)
similarity = register_image.similarity(base_template, image)
image = np.array(similarity, dtype=np.uint8)
return image
def get_base_table_contours():
"""Retrieves table contours from base template.
This function loads points of borders for each table.
Points of Base Tables are deserialized from "tables.pkl" file.
Returns:
An array of tables containing each table's contours.
Raises:
IOError: An error occurred accessing the base template.
"""
global base_tables, base_template
if base_tables is None:
with open(BASE_TABLE_CONTOURS, 'rb') as f:
base_tables = pickle.load(f)
return base_tables
def recognize_roll_number(roll_table, dilate=False):
"""Recognizes roll number from roll numbers table.
Given a table of roll number, this function finds roll number by identifying filled bubbles.
Args:
roll_table: An image of roll numbers table.
dilate: boolean value indicating whether to dilate the table or not.
Returns:
String representing roll number.
"""
if dilate is True:
dilate = cv2.dilate(roll_table, (1, 1), iterations=2)
thresh = cv2.threshold(dilate, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
else:
thresh = cv2.threshold(roll_table, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
contours = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = contours[0] if imutils.is_cv2() else contours[1]
contours = imutils.contours.sort_contours(contours, method='left-to-right')[0]
contours = [(contour, cv2.boundingRect(contour)) for contour in contours if cv2.boundingRect(contour)[2] >= 11
and 16 >= 11 <= cv2.boundingRect(contour)[3] <= 16 and 0.7 <= float(
cv2.boundingRect(contour)[2] / cv2.boundingRect(contour)[3]) <= 1.3]
roll_number = get_roll_number(contours, thresh)
return roll_number
def get_roll_number(bubbles, image):
"""Retrieves roll number by their co-ordinates.
Given an image of roll numbers table and contours of bubbles , recognizes filled bubbles based on
number of pixels, it's x and y co-ordinates.
Args:
bubbles: Contours of possible bubbles in roll numbers table.
image: An image of roll numbers table.
Returns:
String representing roll number.
"""
roll_number = ''
for bubble, (x, y, w, h) in bubbles:
mask = np.zeros(image.shape, dtype="uint8")
cv2.drawContours(mask, [bubble], 0, 255, -1)
mask = cv2.bitwise_and(image, image, mask=mask)
total = cv2.countNonZero(mask)
if total > 100:
if 20 <= y <= 30:
roll_number += '0'
elif 35 <= y <= 45:
roll_number += '1'
elif 55 <= y <= 65:
roll_number += '2'
elif 72 <= y <= 82:
roll_number += '3'
elif 90 <= y <= 100:
roll_number += '4'
elif 108 <= y <= 118:
roll_number += '5'
elif 125 <= y <= 135:
roll_number += '6'
elif 140 <= y <= 150:
roll_number += '7'
elif 160 <= y <= 170:
roll_number += '8'
elif 175 <= y <= 185:
roll_number += '9'
return roll_number
def recognize_selected_answers(tables, dilate=False):
"""Recognizes selected answers from questions tables.
Given tables of questions, this function finds selected answers by identifying filled bubbles.
Args:
tables: Images of tables of questions.
dilate: boolean value indicating whether to dilate the table or not.
Returns:
String representing roll number.
Given images containing tables of questions, this function finds all filled bubbles from each table.
Then returns each marked bubble from each question.
Returns:
An array of strings, each index representing question number and
each value at each index representing the selected answers.
Note:
Indexes start from 0 and index 0 represents the question number 1.
"""
table_number = 0
selections = [''] * 100
for table in tables[1:]:
q_num = 0
if dilate:
dilated = cv2.dilate(table.copy(), (2, 2), iterations=4)
thresh = cv2.threshold(dilated, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
else:
thresh = cv2.threshold(table, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
contours = cv2.findContours(thresh.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
contours = contours[0] if imutils.is_cv2() else contours[1]
contours = imutils.contours.sort_contours(contours, method='top-to-bottom')[0]
cropped_question = [[ind, int(ind * (table.shape[0] / 25)),
int((ind + 1) * (table.shape[0] / 25))] for ind in range(25)]
contours = [contour for contour in contours
if 8 <= cv2.boundingRect(contour)[2] <= 50
and 8 <= cv2.boundingRect(contour)[3] <= 50 and
0.5 <= float(cv2.boundingRect(contour)[2] / cv2.boundingRect(contour)[3]) <= 1.5]
shape = thresh.shape
for contour in contours:
mask = np.zeros(shape, dtype="uint8")
cv2.drawContours(mask, [contour], -1, 255, -1)
mask = cv2.bitwise_and(thresh, thresh, mask=mask)
total = cv2.countNonZero(mask)
threshold_value = 95
if dilate:
threshold_value = 55
if total > threshold_value:
question_number, bubble = get_selected_answers(contour, cropped_question, q_num, table_number)
if question_number is None:
continue
q_num = question_number - (25 * table_number)
selections[question_number - 1] += bubble
table_number += 1
return selections
def get_selected_answers(bubbles, cropped_question, question_number, table_number):
"""Retrieves roll number by their co-ordinates.
Given a cropped section of a question and contours of bubbles , recognizes the filled bubbles based on
number of pixels, it's x and y co-ordinates.
Args:
bubbles: Contours of possible bubbles in roll numbers table.
cropped_question: An image containing the cropped section of the question.
question_number: Number representing the question.
table_number: Number representing the questions table
Returns:
A tuple containing question number and selected answers.
"""
M = cv2.moments(bubbles)
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
question_number = 0
for r in range(question_number - 1, len(cropped_question)):
if r >= cropped_question[r][0]:
min_height = cropped_question[r][1]
max_height = cropped_question[r][2]
if min_height < cY < max_height:
question_number = (r + 1) + (table_number * 25)
break
if question_number == 0:
return None, None
selected_anwers = ''
if 25 <= cX < 40:
selected_anwers = 'A'
elif 45 <= cX < 60:
selected_anwers = 'B'
elif 60 <= cX < 78:
selected_anwers = 'C'
elif cX > 78:
selected_anwers = 'D'
return question_number, selected_anwers
def crop_skew_corrected_table(image, center, theta, width, height):
"""Crops a skew corrected table from an image of the answer sheet.
Crops a table of the given height, width and center point, than performs skew correction
Finally crops the table from the image.
Args:
image: Contours of possible bubbles in roll numbers table.
center: Tuple (x,y) for the centre point of the rectangle.
theta: Angle of the rectangle representing the table.
width: Width of the rectangle representing the table.
height: Height of the rectangle representing the table.
Returns:
Crops a skew corrected table from answer sheet image.
"""
if 45 < theta <= 90:
theta -= 90
width, height = height, width
import math
theta *= math.pi / 180 # convert to rad
v_x = (math.cos(theta), math.sin(theta))
v_y = (-math.sin(theta), math.cos(theta))
s_x = center[0] - v_x[0] * (width / 2) - v_y[0] * (height / 2)
s_y = center[1] - v_x[1] * (width / 2) - v_y[1] * (height / 2)
mapping = np.array([[v_x[0], v_y[0], s_x], [v_x[1], v_y[1], s_y]])
return cv2.warpAffine(image, mapping, (width, height), flags=cv2.WARP_INVERSE_MAP, borderMode=cv2.BORDER_REPLICATE)
def get_cropped_tables(image, table_contours, reverse):
"""Crops tables from an image of the answer sheet.
Crops each table from an image of the answer sheet.
Args:
image: An image of the answer sheet
table_contours: Contours of the tables.
reverse: A boolean value representing whether to reverse the image or not.
Returns:
An array of cropped tables.
"""
tables = []
for contour_number in range(len(table_contours)):
table_contour = table_contours[contour_number]
if contour_number == 0:
table = four_point_transform(image, table_contour.reshape(len(table_contour), 2))
else:
centre, dimensions, theta = cv2.minAreaRect(table_contour)
rect = cv2.minAreaRect(table_contour)
width = int(dimensions[0])
height = int(dimensions[1])
box = cv2.boxPoints(rect)
box = np.int0(box)
M = cv2.moments(box)
cx = int(M['m10'] / M['m00'])
cy = int(M['m01'] / M['m00'])
table = crop_skew_corrected_table(image, (cx, cy), theta + 90, height, width)
if reverse:
table = np.rot90(table, 2)
table = table[3:-1, 3:-1]
if contour_number == 0:
table = cv2.resize(table, (80, 201))
else:
table = cv2.resize(table, (98, 538))
tables.append(table)
return tables
def get_result(selections, selections2, answers):
"""Retrieves the final grades.
Generates the final result by comparing the answers from given correct answers.
Args:
selections: An image of the answer sheet
selections2: Contours of the tables.
answers: A boolean value representing whether to reverse the image or not.
Returns:
A tuple containing amount of correct, wrong and missed answers
"""
correct = 0
wrong = 0
missing = 0
for i in range(len(selections)):
if len(selections[i]) > 1 \
or (len(selections[i]) == 1 and len(selections2[i]) > 1) \
or (len(selections2[i]) != 1 and len(selections[i]) < 1):
missing += 1
continue
if len(selections[i]) == 0:
if len(selections2[i]) == 1:
if str(selections2[i]) == answers[i]:
correct += 1
else:
wrong += 1
if len(selections[i]) == 1:
if str(selections[i]) == answers[i]:
correct += 1
else:
wrong += 1
return correct, wrong, missing
def main(data):
img_path = data["Image"]
answers = data["Answers"]
try:
img = load_grayscale_image(img_path)
img = resize_image(img)
reverse = False
table_contours = []
except Exception:
print('Error:' + img_path)
return
try:
table_contours = get_table_contours(img)
except:
traceback.print_exc()
if len(table_contours) == 5:
_, _, w, h = cv2.boundingRect(table_contours[0])
if h > 250:
reverse = True
table_contours = imutils.contours.sort_contours(table_contours, method='right-to-left')[0]
else:
try:
img = apply_template_matching_algorithm(img)
table_contours = get_base_table_contours()
except Exception:
traceback.print_exc()
print('Error:' + img_path)
return
tables = get_cropped_tables(img, table_contours, reverse)
roll_number = recognize_roll_number(tables[0])
if len(roll_number) != 3:
roll_number = recognize_roll_number(tables[0], True)
selections = recognize_selected_answers(tables)
selections2 = recognize_selected_answers(tables, True)
correct, wrong, missing = get_result(selections, selections2, answers)
if len(roll_number.strip()) != 3:
print('Error:' + img_path)
else:
pass
print('RollNo:' + str(roll_number) + ',Correct:' + str(correct) + ',Wrong:' + str(wrong) +
',Missing:' + str(missing) + ',Total:' + str(correct))
sys.stdout.flush()
def load_answers():
"""Fetches correct answers.
Reads a csv file containing correct answers in first column.
Returns:
A list containing correct answers.
"""
with open(answers_path, 'r') as f:
lines = csv.reader(f)
return [l[0].strip()[0].upper() for l in lines]
def get_path_of_answer_sheet_images():
"""Fetches paths of answer sheet images.
Iterates the directory containing answer sheet images and
returns an array containing paths of each answer sheet.
Returns:
An array of containing paths of each answer sheet.
"""
image_files = []
for image_file in os.listdir(base_path):
abs_path = os.path.join(base_path, image_file)
if os.path.isfile(abs_path):
image_files.append(abs_path)
return image_files
if __name__ == '__main__':
multiprocessing.freeze_support()
try:
base_path = sys.argv[1]
answers_path = sys.argv[2]
answers = load_answers()
files = get_path_of_answer_sheet_images()
data = []
for file in files:
if file[-3:] == 'jpg' or file[-3:] == 'bmp' or file[-4:] == 'jpeg':
data.append({"Image": file, "Answers": answers})
from multiprocessing import Pool
concurrency = multiprocessing.cpu_count()
p = Pool(concurrency)
p.map(main, data)
except Exception as e:
print(traceback.print_exc(), file=sys.stderr)
| tauseefahmed600/bubble-sheet-reader | app/bubble_sheet_reader.py | bubble_sheet_reader.py | py | 17,872 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "cv2.imread",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "cv2.resize",
"lin... |
16711232336 | from helium import *
from bs4 import BeautifulSoup
import pandas as pd
from sqlalchemy import create_engine
import time
links = []
data = []
start = time.time()
def lin():
url = 'https://www.amarstock.com/latest-share-price'
browser = start_firefox(url, headless=True)
s = BeautifulSoup(browser.page_source, 'lxml')
heads = s.find_all('td', class_='ob-left')
for head in heads:
try:
link = head.find('a', class_='scrip green')['href']
links.append(link)
except:
link = 'None'
kill_browser()
lin()
print(links)
def links_scrape():
for l in links[21:40]:
if l is None:
return
else:
url = l
browser = start_firefox(url, headless=True)
s = BeautifulSoup(browser.page_source, 'lxml')
Open = s.find('span', {'data-key': 'OpenPrice'})
if Open is not None:
Open = s.find('span', {'data-key': 'OpenPrice'}).text
if Open is None:
Open = '0'
Close = s.find('span', {'data-key': 'YCp'})
if Close is not None:
Close = s.find('span', {'data-key': 'YCp'}).text
if Close is None:
Close = '0'
Q1EPS = s.find('span', {'data-key': 'Q1Eps'})
if Q1EPS is not None:
Q1EPS = s.find('span', {'data-key': 'Q1Eps'}).text
if Q1EPS is None:
Q1EPS = '0'
Q2EPS = s.find('span', {'data-key': 'Q2Eps'})
if Q2EPS is not None:
Q2EPS = s.find('span', {'data-key': 'Q2Eps'}).text
if Q2EPS is None:
Q2EPS = '0'
Q3EPS = s.find('span', {'data-key': 'Q3Eps'})
if Q3EPS is not None:
Q3EPS = s.find('span', {'data-key': 'Q3Eps'}).text
if Q3EPS is None:
Q3EPS = '0'
Q4EPS = s.find('span', {'data-key': 'Q4Eps'})
if Q4EPS is not None:
Q4EPS = s.find('span', {'data-key': 'Q4Eps'}).text
if Q4EPS is None:
Q4EPS = '0'
address = s.find('div', {'data-key': 'Address'}).text.strip()
email = s.find('div', {'data-key': 'Email'}).text
name = s.find('h1', class_='h2 title').text.strip()
dat = {'Name': name, 'Open': Open, 'Close': Close,
'Q1 EPS': Q1EPS, 'Q2 EPS': Q2EPS, 'Q3 EPS': Q3EPS, 'Q4 EPS': Q4EPS,
'Email': email, 'Address': address}
data.append(dat)
print(dat)
kill_browser()
time.sleep(2)
links_scrape()
df = pd.DataFrame(data)
print(df)
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}"
.format(user="root",
pw="123456",
db="stock"))
df.to_sql('stock_data', con=engine, if_exists='append', chunksize=1000)
end = time.time()
print(end- start) | Nadimul2/Stock-prices | stock.py | stock.py | py | 3,099 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "time.time",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_... |
36974547726 | """add auto-votes
Revision ID: 4c8b06ae0ef5
Revises: c3d959bce883
Create Date: 2023-05-20 18:22:06.916821
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '4c8b06ae0ef5'
down_revision = 'c3d959bce883'
branch_labels = None
depends_on = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('postss')
op.drop_table('products')
op.drop_table('userss')
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('userss',
sa.Column('id', sa.INTEGER(), server_default=sa.text("nextval('userss_id_seq'::regclass)"), autoincrement=True, nullable=False),
sa.Column('email', sa.VARCHAR(), autoincrement=False, nullable=False),
sa.Column('password', sa.VARCHAR(), autoincrement=False, nullable=False),
sa.Column('created_at', postgresql.TIMESTAMP(timezone=True), server_default=sa.text('now()'), autoincrement=False, nullable=False),
sa.PrimaryKeyConstraint('id', name='userss_pkey'),
sa.UniqueConstraint('email', name='userss_email_key'),
postgresql_ignore_search_path=False
)
op.create_table('products',
sa.Column('name', sa.VARCHAR(), autoincrement=False, nullable=False),
sa.Column('price', sa.INTEGER(), autoincrement=False, nullable=False),
sa.Column('id', sa.INTEGER(), autoincrement=True, nullable=False),
sa.Column('is_sale', sa.BOOLEAN(), server_default=sa.text('false'), autoincrement=False, nullable=True),
sa.Column('inventory', sa.INTEGER(), server_default=sa.text('0'), autoincrement=False, nullable=False),
sa.Column('created', postgresql.TIMESTAMP(timezone=True), server_default=sa.text('now()'), autoincrement=False, nullable=False),
sa.PrimaryKeyConstraint('id', name='products_pkey')
)
op.create_table('postss',
sa.Column('id', sa.INTEGER(), autoincrement=True, nullable=False),
sa.Column('title', sa.VARCHAR(), autoincrement=False, nullable=False),
sa.Column('content', sa.VARCHAR(), autoincrement=False, nullable=False),
sa.Column('owner_id', sa.INTEGER(), autoincrement=False, nullable=False),
sa.Column('published', sa.BOOLEAN(), server_default=sa.text('true'), autoincrement=False, nullable=False),
sa.Column('created_at', postgresql.TIMESTAMP(timezone=True), server_default=sa.text('now()'), autoincrement=False, nullable=False),
sa.ForeignKeyConstraint(['owner_id'], ['userss.id'], name='postss_userss_fk', ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id', name='postss_pkey')
)
# ### end Alembic commands ###
| jperrino/example_fastapi | alembic/versions/4c8b06ae0ef5_add_auto_votes.py | 4c8b06ae0ef5_add_auto_votes.py | py | 2,697 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "alembic.op.drop_table",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "alembic.op.drop_table",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "alembic.op",
... |
35773688788 | from protobuf.message import Message
from protobuf.property import Property
from protobuf.typed import TYPES, WIRE_TYPES
from protobuf.ProtobufSyntaxError import ProtobufSyntaxError
PRIORITIES = {
'required',
'optional',
'repeated'
}
def _read_file(filename):
with open(filename) as f:
code = f.read()
return code
def parse(filename):
code = _read_file(filename)
lines = []
start = 0
for i in range(len(code)):
if code[i] == '{' or code[i] == '}' or code[i] == ';':
lines.append(code[start:i + 1].strip())
start = i + 1
message = None
for line in lines:
if line[-1] == '{':
string = line[:-1].split()
if len(string) != 2:
raise ProtobufSyntaxError(f'unexpected string: {line}')
if string[0] == 'message':
if message is None:
message = Message(line.split()[1], None)
else:
message.classes.append(Message(line.split()[1], message))
message = message.classes[-1]
TYPES[message.name] = type(message.name, (), {})
WIRE_TYPES[message.name] = 2
elif string[0] == 'enum':
if message is None:
message = Message(line.split()[1], None, True)
else:
message.enums.append(
Message(line.split()[1], message, True))
message = message.enums[-1]
TYPES[message.name] = type(message.name, (), {})
WIRE_TYPES[message.name] = 0
else:
raise ProtobufSyntaxError(f'unexpected string: {line}')
if line[-1] == ';':
if line.strip()[:6] == 'syntax':
line = line[:-1].split('=')
if len(line) != 2:
raise ProtobufSyntaxError(f'unexpected string: {line}')
line = line[1].strip()
if eval(line) != 'proto2':
raise ProtobufSyntaxError(f'unexpected syntax, expected proto2')
continue
line = line[:-1]
default = line.split('default')
if len(default) < 2:
default = None
elif len(default) == 2:
if len(default[0].split('[')) != 2 or len(default[1].split(']')) != 2:
raise ProtobufSyntaxError(f'Incorrect string {line}')
default = line.split('[')[1].split('=')[1].split(']')[0].strip()
line = line.split('[')[0]
else:
raise ProtobufSyntaxError(f'Incorrect string {line}')
w = line.split('=')
if len(w) != 2:
raise ProtobufSyntaxError(f'Incorrect string {line}')
line = ' '.join([w[0], w[1]])
words = line.split()
if not message.is_enum:
if len(words) != 4 or words[1] not in TYPES or words[0] not in PRIORITIES:
raise ProtobufSyntaxError(f'unexpected string: {line}')
if words[1] == 'string' and default is not None:
try:
default = eval(default)
except Exception:
raise Exception('Something went wrong')
field_number = int(words[3])
prop = Property(
words[2], field_number, default, words[1], words[0], WIRE_TYPES[words[1]])
else:
if len(words) != 2:
raise ProtobufSyntaxError(f'unexpected string: {line}')
field_number = int(words[1])
prop = Property(words[0], field_number)
if prop.priority == 'optional':
message.optional_properties.append(prop)
elif prop.priority == 'required':
if prop.default is None:
message.required_properties.append(prop)
else:
message.req_def_properties.append(prop)
message.properties.append(prop)
message.properties_dict[field_number] = prop
if line[-1] == '}':
if message.parent is not None:
message = message.parent
return message
| vtarasovaaa/protobuf | protobuf/pb_parser.py | pb_parser.py | py | 4,316 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "protobuf.ProtobufSyntaxError.ProtobufSyntaxError",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "protobuf.message.Message",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "protobuf.message.Message",
"line_number": 38,
"usage_type": "ca... |
32816595902 | from account.models import Account, EmailAddress
from mozilla_django_oidc.auth import OIDCAuthenticationBackend, default_username_algo
class PinaxOIDCAuthenticationBackend(OIDCAuthenticationBackend):
def create_user(self, claims):
"""
Create a user account for the given claims.
This method is overridden to ensure we create a user account
which will work in the DUA world.
"""
username = claims.get("preferred_username", default_username_algo(claims["email"]))
user = self.UserModel(username=username, email=claims["email"])
user._disable_account_creation = True
user.set_unusable_password()
user.save()
extra = {}
if claims.get("zoneinfo"):
extra["timezone"] = claims["zoneinfo"]
if claims.get("locale"):
extra["language"] = claims["locale"]
Account.create(**{
"request": self.request,
"user": user,
"create_email": False,
**extra,
})
if claims.get("email_verified", False):
EmailAddress.objects.create(
email=user.email,
verified=True,
primary=True,
)
else:
EmailAddress.objects.add_email(user, user.email, confirm=True)
return user
def verify_claims(self, claims):
checks = set()
email = claims.get("email")
if email:
try:
email_address = EmailAddress.objects.get(email__iexact=email)
except EmailAddress.DoesNotExist:
checks.add(True)
else:
# check if the found email address is verified.
# we need this because if the user has an unverified
# email address we never get to fail the authentication.
# however, this is being overly protective because all
# users who authenticate with OIDC will have a verified
# email address.
# @@@ consider creating a django.contrib.messages message
checks.add(email_address.verified)
return all(checks)
def filter_users_by_claims(self, claims):
email = claims.get("email")
if not email:
return self.UserModel.objects.none()
try:
email_address = EmailAddress.objects.get(
email__iexact=email,
verified=True,
)
except EmailAddress.DoesNotExist:
return self.UserModel.objects.none()
else:
return self.UserModel.objects.filter(pk=email_address.user_id)
| deep-philology/DeepVocabulary | deep_vocabulary/auth.py | auth.py | py | 2,668 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "mozilla_django_oidc.auth.OIDCAuthenticationBackend",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "mozilla_django_oidc.auth.default_username_algo",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "account.models.Account.create",
"line_number... |
19953368465 | import streamlit as st
from streamlit_extras.switch_page_button import switch_page
def add_eyewitness():
st.header("Add an eyewitness")
name = st.text_input("Enter the name of the eyewitness", key='name')
desc = st.text_input("Enter a description of the witness", key = 'desc')
pic = st.file_uploader("Upload the picture of the eyewitness",type=['.jpg','.jpeg','.png'], key='picture')
pic_bytes=None
if pic is not None:
pic_bytes=pic.read()
st.image(pic_bytes)
context = st.text_input("Enter the context of the interview",help ='Enter what the subject being discussed is. This helps the model create an accurate summary of the text', key=f'context')
audio_file = st.file_uploader("Upload the recording transcript: ",type=['.wav','.ogg','.mp3','.wave'], accept_multiple_files=False, key=f'uploader')
if audio_file is not None:
audio_bytes = audio_file.read()
# st.write(st.session_state["witnesses"])
if st.button("Add Eyewitness:"):
st.session_state["witnesses"][f"Witness {st.session_state['count']}"]={
"Name": name,
"desc": desc,
"pic": pic_bytes if pic_bytes is not None else pic_bytes,
"context": context,
"audio_bytes": audio_bytes
}
st.success("Eyewitness added successfully")
st.session_state['count']+=1
if st.session_state['refresh']==0:
st.session_state['refresh']=1
# if audio_file is not None:
# audio_bytes = audio_file.read()
# st.audio(audio_bytes, format='audio/wav')
# analysis_button = st.button("Perform analysis")
# if analysis_button:
# analyze(audio_bytes, context)
def display(data: dict):
st.header(data["Name"])
try:
st.image(data["pic"], width=200)
except AttributeError:
st.write("No picture provided")
audio_bytes = data['audio_bytes']
context=data["context"]
desc = data["desc"]
st.subheader("Description")
if desc:
st.write(desc)
else:
st.write("No description provided for the witness")
st.subheader("Context")
if context:
st.write(context)
else:
st.write("No context provided for the transcription")
st.write("## Listen to transcript: ")
st.audio(audio_bytes, format='audio/wav')
analysis_button = st.button("Perform analysis")
if analysis_button:
st.session_state["curr_data"]=data
st.session_state['analyze']=1
switch_page("analysis")
| mopasha1/interrogAIte | functions.py | functions.py | py | 2,674 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "streamlit.header",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "streamlit.text_input",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "streamlit.text_input",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "streamlit.fil... |
5606702695 | from __future__ import (absolute_import, division, print_function, unicode_literals)
from builtins import *
from future.utils import with_metaclass
import copy
import types
import wizzat.decorators
from wizzat.pghelper import *
from wizzat.util import set_defaults
__all__ = [
'DBTable',
'DBTableError',
'DBTableConfigError',
'DBTableImmutableFieldError',
]
class DBTableError(Exception): pass
class DBTableConfigError(DBTableError): pass
class DBTableImmutableFieldError(DBTableError): pass
class DBTableMeta(type):
def __init__(cls, name, bases, dct):
super(DBTableMeta, cls).__init__(name, bases, dct)
if 'table_name' not in dct or not isinstance(dct['table_name'], str):
raise DBTableConfigError("table_name is required, and should be a string")
if 'fields' not in dct or not isinstance(dct['fields'], (list, tuple)):
raise DBTableConfigError("fields is required, and should be a list or tuple")
if 'id_field' in dct:
if not isinstance(dct['id_field'], (type(None), str)):
raise DBTableConfigError('id_field is not required, but should be a string or None')
else:
cls.id_field = None
if 'key_fields' in dct:
if not isinstance(dct['key_fields'], (list, tuple)):
raise DBTableConfigError('key_fields is not required, but should be a list of strings or None')
for field in dct['key_fields']:
if not isinstance(field, str):
raise DBTableConfigError('key_fields is not required, but should be a list of strings or None')
else:
cls.key_fields = []
if dct.get('id_field') and dct['id_field'] not in dct['fields']:
raise DBTableConfigError('id field {} not in fields'.format(dct['id_field']))
for field in dct.get('key_fields', []):
if field not in dct['fields']:
raise DBTableConfigError('key field {} not in fields'.format(field))
if dct.get('memoize'):
cls.id_cache = wizzat.decorators.create_cache_obj(
max_size = dct.get('memoize_size', 0),
max_bytes = dct.get('memoize_bytes', 0),
)
cls.key_cache = wizzat.decorators.create_cache_obj(
max_size = dct.get('memoize_size', 0),
max_bytes = dct.get('memoize_bytes', 0),
)
cls._conn = None
cls.default_funcs = {}
for field in dct['fields']:
func_name = 'default_{}'.format(field)
if func_name in dct:
cls.default_funcs[field] = dct[func_name]
class DBTable(with_metaclass(DBTableMeta)):
"""
This is a micro-ORM for the purposes of not having dependencies on Django or SQLAlchemy.
Philosophically, it also supports merely the object abstraction and super simple sql generation.
It requires full knowledge of SQL.
Params:
table_name: string, the name of the table to query
id_field: string, the name of the id field (generally a surrogate key)
key_fields: list[string], the names of the key fields (generally primary or unique key)
fields: list[string], the names of all fields on the object
--
memoize: bool, caches objects from the database locally
memoize_size: int, maximum number of objects to cache from the database (LRU ejection)
memoize_bytes: int, maximum size objects to cache from the database (LRU ejection).
Note that there are two caches, and while references are shared the
cache size here is not absolute.
default_{field}: func, define functions for default behaviors. These functions are executed
in order of definition in the fields array.
"""
memoize = False
table_name = ''
id_field = ''
key_fields = []
fields = []
def __init__(self, _is_in_db = False, **kwargs):
self.db_fields = kwargs if _is_in_db else {}
for field in self.fields:
if field in kwargs:
field_value = kwargs[field]
elif field in self.default_funcs:
field_value = self.default_funcs[field](self)
else:
field_value = None
setattr(self, field, copy.deepcopy(field_value))
self.on_init()
self.cache_obj(self)
def on_init(self):
pass
@classmethod
def check_key_cache(cls, key_fields):
if cls.memoize:
cache_key = tuple(key_fields)
return cls.key_cache.get(cache_key, None)
@classmethod
def check_id_cache(cls, id):
if cls.memoize:
return cls.id_cache.get(id, None)
@classmethod
def cache_obj(cls, obj):
if cls.memoize:
if obj and cls.id_field:
cache_key = getattr(obj, cls.id_field)
cls.id_cache[cache_key] = obj
if obj and cls.key_fields:
cache_key = tuple(getattr(obj, field) for field in cls.key_fields)
cls.key_cache[cache_key] = obj
@classmethod
def clear_cache(cls):
if cls.memoize:
cls.id_cache.clear()
cls.key_cache.clear()
@classmethod
def uncache_obj(cls, obj):
if cls.id_field:
cache_key = getattr(obj, cls.id_field)
cls.id_cache.pop(cache_key, None)
if cls.key_fields:
cache_key = tuple(getattr(obj, field) for field in cls.key_fields)
cls.key_cache.pop(cache_key, None)
@classmethod
def find_by_id(cls, id):
obj = cls.check_id_cache(id)
if obj:
return obj
return cls.find_one(**{ cls.id_field : id })
@classmethod
def find_by_key(cls, *keys):
obj = cls.check_key_cache(keys)
if obj:
return obj
return cls.find_one(**{ field : value for field,value in zip(cls.key_fields, keys) })
@classmethod
def find_one(cls, **kwargs):
found = list(cls.find_by(**kwargs))
if not found:
return None
assert len(found) == 1
return found[0]
@classmethod
def create(cls, *keys, **kwargs):
kwargs = set_defaults(kwargs, { field : value for field, value in zip(cls.key_fields, keys) })
return cls(**kwargs).update()
@classmethod
def find_or_create(cls, *args, **kwargs):
return cls.find_by_key(*args) or cls.create(*args, **kwargs)
@classmethod
def find_or_create_many(cls, *rows):
for row in rows:
return cls.find_or_create(*row)
@classmethod
def find_by(cls, for_update = False, nowait = False, **kwargs):
"""
Returns rows which match all key/value pairs
Additionally, accepts for_update = True/False, nowait = True/False
"""
for_update = 'for update' if for_update else ''
nowait = 'nowait' if nowait else ''
sql = """
SELECT *
FROM {table_name}
where {where_clause}
{for_update} {nowait}
""".format(
table_name = cls.table_name,
where_clause = sql_where_from_params(**kwargs),
for_update = for_update,
nowait = nowait,
)
return cls.find_by_sql(sql, **kwargs)
@classmethod
def find_by_sql(cls, sql, **bind_params):
for row in iter_results(cls.conn, sql, **bind_params):
yield cls(_is_in_db = True, **row)
def rowlock(self, nowait = False):
"""
Locks a row in the database for update. Requires a primary key.
"""
nowait = "nowait" if nowait else ""
if self.id_field:
fields = [ self.id_field ]
elif self.key_fields:
fields = self.key_fields
else:
fields = self.fields
filter_clause = ' and '.join([ '{0} = %(orig_{0})s'.format(field) for field in fields ])
bind_params = { 'orig_{}'.format(x) : self.db_fields[x] for x in fields }
sql = """
select *
from {table_name}
where {filter_clause}
for update
{nowait}
""".format(
table_name = self.table_name,
filter_clause = filter_clause,
nowait = nowait
)
execute(self.conn, sql, **bind_params)
return self
def should_update(self):
curr_values = self.to_dict()
return any(curr_values[field] != self.db_fields[field] for field in self.fields)
def update(self, force = False):
"""
Ensures the row exists is serialized to the database
"""
if self.db_fields:
if force or self.should_update():
self.on_update()
self._update(force)
self.after_update()
else:
self.on_insert()
self._insert(force)
self.after_insert()
return self
def on_insert(self):
pass
def after_insert(self):
pass
def on_update(self):
pass
def after_update(self):
pass
def _insert(self, force = False):
"""
Inserts a row into the database, and returns that row.
"""
kv = { x:y for x,y in self.to_dict().items() if y != None }
fields = kv.keys()
values = [ kv[x] for x in fields ]
sql = "INSERT INTO {table_name} ({fields}) VALUES ({values}) RETURNING *".format(
table_name = self.table_name,
fields = ', '.join(fields),
values = ', '.join([ "%({})s".format(x) for x in fields ]),
)
self.db_fields = fetch_results(self.conn, sql, **kv)[0]
assert self.db_fields
for k, v in self.db_fields.items():
setattr(self, k, copy.deepcopy(v))
def _update(self, force = False):
"""
Updates a row in the database, and returns that row.
"""
new_values = self.to_dict()
bind_params = { x : new_values[x] for x in self.fields if new_values[x] != self.db_fields[x] }
if not bind_params:
return self
# Verify id field didn't change
if self.id_field:
if getattr(self, self.id_field) != self.db_fields[self.id_field]:
raise ValueError("id field {} changed from {} to {}".format(
self.id_field,
self.db_fields[self.id_field],
getattr(self, self.id_field),
))
# Verify key fields didn't change
if self.key_fields:
for key_field in self.key_fields:
if not force and getattr(self, key_field) != self.db_fields[key_field]:
raise ValueError("key field {} changed from {} to {}".format(
key_field,
self.db_fields[key_field],
getattr(self, key_field),
))
field_equality = ', '.join([ "{0} = %({0})s".format(x) for x in bind_params.keys() ])
if self.id_field:
fields = [ self.id_field ]
elif self.key_fields:
fields = self.key_fields
else:
fields = self.db_fields.keys()
filter_clause = ' and '.join([ '{0} = %(orig_{0})s'.format(field) for field in fields ])
bind_params.update({ "orig_{}".format(x) : y for x, y in self.db_fields.items() })
sql = """
UPDATE {table_name}
SET {field_equality}
WHERE {filter_clause}
RETURNING *
""".format(
table_name = self.table_name,
field_equality = field_equality,
filter_clause = filter_clause,
)
self.db_fields = fetch_results(self.conn, sql, **bind_params)[0]
assert self.db_fields
for k, v in self.db_fields.items():
setattr(self, k, copy.deepcopy(v))
def delete(self):
"""
Deletes row(s) in the database that share all fields with the current row, and returns those rows.
"""
if not self.db_fields:
return []
if self.id_field:
fields = [ self.id_field ]
elif self.key_fields:
fields = self.key_fields
else:
fields = self.fields
filter_clause = ' and '.join([ '{0} = %(orig_{0})s'.format(field) for field in fields ])
bind_params = { 'orig_{}'.format(x) : self.db_fields[x] for x in fields }
sql = """
DELETE FROM {table_name}
WHERE {filter_clause}
RETURNING *
""".format(
table_name = self.table_name,
filter_clause = filter_clause,
)
objs = fetch_results(self.conn, sql, **bind_params)
assert objs
return objs
def to_dict(self):
return { field : getattr(self, field) for field in self.fields }
def commit(self):
self.conn.commit()
def rollback(self):
self.conn.rollback()
@property
def conn(cls):
return cls._conn
@conn.setter
def get_conn(cls, new_conn):
cls._conn = new_conn
| wizzat/wizzat.py | wizzat/dbtable.py | dbtable.py | py | 13,346 | python | en | code | 6 | github-code | 1 | [
{
"api_name": "wizzat.decorators.decorators.create_cache_obj",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "wizzat.decorators.decorators",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "wizzat.decorators",
"line_number": 54,
"usage_type": "nam... |
28538901581 | from django.http.response import HttpResponseRedirect
from django.shortcuts import render
from django.http import HttpResponse
from .forms import AddOfferForm
from .models import Offer
from main.models import Dog
import pyrebase
from pyrebase.pyrebase import Database
config = {
"apiKey": "AIzaSyAgaZYQDBNyfMNI3A7ocJB1DP_vHiUdo2o",
"authDomain": "godoggo-a18ac.firebaseapp.com",
"databaseURL": "https://godoggo-a18ac-default-rtdb.europe-west1.firebasedatabase.app",
"projectId": "godoggo-a18ac",
"storageBucket": "godoggo-a18ac.appspot.com",
"messagingSenderId": "929780314718",
"appId": "1:929780314718:web:31b97491b560bc07dc7acf"
}
firebase = pyrebase.initialize_app(config)
authe = firebase.auth()
database = firebase.database()
def addOffer(request):
if request.method == "POST":
form = AddOfferForm(request.POST)
if form.is_valid():
date = form.cleaned_data["date"]
dog = form.cleaned_data["dog"]
location = form.cleaned_data["location"]
uuid = request.session["uuid"]
offer = Offer(date, dog, uuid, location)
offer.create()
return HttpResponseRedirect("/")
allOffers = Offer.getEntries()
uuid = request.session["uuid"]
#dogs=Dog.getDogs(uuid)
form = AddOfferForm()
return render(request, "addOffer/addOffer.html", {"entries":allOffers, "form":form})
| zmazk123/GoDoggo | addOffer/views.py | views.py | py | 1,416 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pyrebase.initialize_app",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "forms.AddOfferForm",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "models.Offer",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "django.http.r... |
8702203975 | import streamlit as st
import altair as alt
import inspect
from vega_datasets import data
@st.experimental_memo
def get_chart_67593(use_container_width: bool):
import altair as alt
import pandas as pd
data = pd.DataFrame([dict(id=i) for i in range(1, 101)])
person = (
"M1.7 -1.7h-0.8c0.3 -0.2 0.6 -0.5 0.6 -0.9c0 -0.6 "
"-0.4 -1 -1 -1c-0.6 0 -1 0.4 -1 1c0 0.4 0.2 0.7 0.6 "
"0.9h-0.8c-0.4 0 -0.7 0.3 -0.7 0.6v1.9c0 0.3 0.3 0.6 "
"0.6 0.6h0.2c0 0 0 0.1 0 0.1v1.9c0 0.3 0.2 0.6 0.3 "
"0.6h1.3c0.2 0 0.3 -0.3 0.3 -0.6v-1.8c0 0 0 -0.1 0 "
"-0.1h0.2c0.3 0 0.6 -0.3 0.6 -0.6v-2c0.2 -0.3 -0.1 "
"-0.6 -0.4 -0.6z"
)
chart = alt.Chart(data).transform_calculate(
row="ceil(datum.id/10)"
).transform_calculate(
col="datum.id - datum.row*10"
).mark_point(
filled=True,
size=50
).encode(
x=alt.X("col:O", axis=None),
y=alt.Y("row:O", axis=None),
shape=alt.ShapeValue(person)
).properties(
width=400,
height=400
).configure_view(
strokeWidth=0
)
tab1, tab2 = st.tabs(["Streamlit theme (default)", "Altair native theme"])
with tab1:
st.altair_chart(chart, theme="streamlit", use_container_width=True)
with tab2:
st.altair_chart(chart, theme=None, use_container_width=True)
try:
st.expander("See code").code(inspect.getsource(get_chart_67593))
get_chart_67593(use_container_width=True)
except Exception as e:
st.exception(e)
| streamlit/release-demos | 1.16.0/demo_app_altair/pages/143_Isotype_Grid.py | 143_Isotype_Grid.py | py | 1,562 | python | en | code | 78 | github-code | 1 | [
{
"api_name": "vega_datasets.data",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "altair.Chart",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "vega_datasets.data",... |
13698381326 | """djangoProject URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
# backend/urls.py
from django.contrib import admin
from django.urls import path, include # add this
from drf_yasg import openapi
# from drf_yasg.views import get_schema_view
# from drf_spectacular import openapi
from drf_yasg.views import get_schema_view
from rest_framework import routers # add this
from candidate import views as candidateviews
from client import views as clientsviews
from drf_spectacular.views import SpectacularAPIView, SpectacularRedocView, SpectacularSwaggerView
api_info = openapi.Info(
title="Back End API",
description="CareIgnition Back End API for code demo",
default_version="0.0.1",
terms_of_service="https://www.google.com/policies/terms/",
contact=openapi.Contact(email="jasonmvallee@gmail.com"),
license=openapi.License(name="No License"),
)
schema_view = get_schema_view(
api_info,
public=True,
)
router = routers.DefaultRouter() # add this
router.register(r'candidates', candidateviews.CandidateView, 'candidate')
router.register(r'clients', clientsviews.ClientView)
router.register(r'jobs', clientsviews.JobView)
router.register(r'outreaches', clientsviews.OutreachView)
urlpatterns = [
path('admin/', admin.site.urls),
path('api/', include(router.urls)), # add this
path('api/schema/', SpectacularAPIView.as_view(), name='schema'),
# Optional UI:
path('api/schema/swagger-ui/', SpectacularSwaggerView.as_view(url_name='schema'), name='swagger-ui'),
path('api/schema/redoc/', SpectacularRedocView.as_view(url_name='schema'), name='redoc'),
]
| jvallee/EmailTemplate | djangoProject/djangoProject/urls.py | urls.py | py | 2,252 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "drf_yasg.openapi.Info",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "drf_yasg.openapi",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "drf_yasg.openapi.Contact",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "drf_y... |
30329408627 | import sqlite3
from flask import Flask, render_template, jsonify, request, json
from flask_socketio import SocketIO, send, emit, join_room
import paho.mqtt.publish as publish
import time
import datetime
import pygal
from random import random
# import json
# import eventlet
# eventlet.monkey_patch()
app = Flask(__name__)
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0
app.config['SECRET_KEY'] = 'Brainiac'
app.jinja_env.auto_reload = True
app.config['TEMPLATES_AUTO_RELOAD'] = True
socketio = SocketIO(app, ping_timeout=600)
connected = False
clients = ["raspberrypi", "client1", "client2", "client3"]
@socketio.on("connect")
def connection():
global connected
connected = True
print("Client has been connected.")
print(request.sid)
# with open("clients.json", 'r') as f:
# clients = json.load(f)
socketio.emit('data', json.dumps(clients), room=request.sid)
print("hi")
@socketio.on('username')
def receive_username(username):
join_room(username)
control_data = []
# send(username + ' has entered the room.', room=username)
print('user is viewing {}'.format(username))
with sqlite3.connect("mydata.db") as conn:
conn.row_factory = sqlite3.Row
c = conn.cursor()
sensor_data = {}
adata = read_db(c, username, "Accelerometer")
gdata = read_db(c, username, "Gyroscope")
temp = read_db(c, username, "Temperature")
sensor_data['Accelerometer'] = adata
sensor_data['Gyroscope'] = gdata
sensor_data['Temperature'] = temp
with sqlite3.connect("client.db") as conn:
conn.row_factory = sqlite3.Row
c = conn.cursor()
c.execute('SELECT switch, gpio, state FROM {}'.format(username))
for i in c.fetchall():
control_data.append(dict(i))
sensor_data['control_data'] = control_data
data = json.dumps(sensor_data)
print(data)
socketio.emit('load', data, room=username)
@socketio.on('onOff')
def action(json_data):
data = json.loads(json_data)
username = data['room']
device = data['device']
gpio = data['gpio']
state = data['state']
del data['room']
host = "piserver.local"
topic = "Project/{}".format(username)
payload = json.dumps(data)
publish.single(topic, payload, hostname=host,
auth={"username": "alpha", "password": "flash"})
with sqlite3.connect("mydata.db") as conn:
c = conn.cursor()
unix = int(time.time())
date = datetime.datetime.fromtimestamp(unix)
date = str(date.strftime('%Y-%m-%d %H:%M:%S'))
db = "INSERT INTO switchData(id, datestamp, client, switch, gpio, state)\
VALUES (?, ?, ?, ?, ?, ?)"
c.execute("SELECT MAX(id) FROM switchData WHERE client=?", (username,))
t = c.fetchone()
if t[0]:
n = t[0] + 1
c.execute(db, (n, date, username, device, gpio, state))
else:
c.execute(db, (1, date, username, device, gpio, state))
conn.commit()
with sqlite3.connect("client.db") as conn:
c = conn.cursor()
update = 'UPDATE {0} SET state = ? WHERE gpio = ?'.format(username)
c.execute(update, (state, gpio))
conn.commit()
socketio.emit('switching', json_data, room=username)
@socketio.on('addSwitch')
def add_switch(json_data):
data = json.loads(json_data)
username = data['room']
with sqlite3.connect("client.db") as conn:
c = conn.cursor()
add = "INSERT INTO {} (switch, gpio, state) VALUES (?, ?, ?)".format(
username)
c.execute(add, (data["device"], data["gpio"], False))
conn.commit()
socketio.emit('adding', json_data, room=username,
broadcast=True, include_self=False)
@socketio.on('delete')
def delete_switch(json_data):
data = json.loads(json_data)
print(data)
username = data['room']
array = data['entries']
with sqlite3.connect('client.db') as conn:
c = conn.cursor()
for i in array:
print(username, i)
delete = "DELETE FROM {0} WHERE switch=?".format(username)
c.execute(delete, ((i,)))
conn.commit()
socketio.emit('deleting', json_data, room=username,
broadcast=True, include_self=False)
@socketio.on('getaccel')
def plot_accel():
graph = pygal.Line()
graph.title = 'Change in magnitude of Accelerometer data'
graph.x_labels = list(range(1, 11))
try:
with sqlite3.connect("mydata.db") as conn:
c = conn.cursor()
for client in clients:
data = "SELECT MAX(id) FROM accelData WHERE client=?"
c.execute(data, ((client,)))
n = c.fetchone()[0]
data = "SELECT x,y,z FROM accelData WHERE id BETWEEN ? AND ? AND client=?"
c.execute(data, (n-9, n, client,))
values = c.fetchall()
print(values)
val = []
def func(x, y, z): return (x**2 + y**2 + z**2)**0.5
for x, y, z in values:
val.append(func(x, y, z))
graph.add(client, val)
except Exception as e:
print(e)
return
graph_data = graph.render_data_uri()
socketio.emit('sendaccel', graph_data, broadcast=True)
@socketio.on('getgyro')
def plot_gyro():
graph = pygal.Line()
graph.title = 'Change in magnitude of Gyroscope data'
graph.x_labels = list(range(1, 11))
with sqlite3.connect("mydata.db") as conn:
c = conn.cursor()
for client in clients:
data = "SELECT MAX(id) FROM gyroData WHERE client=?"
c.execute(data, ((client,)))
n = c.fetchone()[0]
data = "SELECT x,y,z FROM gyroData WHERE id BETWEEN ? AND ? AND client=?"
c.execute(data, (n-9, n, client,))
values = c.fetchall()
val = []
def func(x, y, z): return (x**2 + y**2 + z**2)**0.5
for x, y, z in values:
val.append(func(x, y, z))
print(val)
graph.add(client, val)
graph_data = graph.render_data_uri()
socketio.emit('sendgyro', graph_data, broadcast=True)
@socketio.on('gettemp')
def plot_temp():
graph = pygal.Line()
graph.title = 'Change in magnitude of Temperature data'
graph.x_labels = list(range(1, 11))
with sqlite3.connect("mydata.db") as conn:
c = conn.cursor()
for client in clients:
data = "SELECT MAX(id) FROM tempData WHERE client=?"
c.execute(data, ((client,)))
n = c.fetchone()[0]
data = "SELECT temperature FROM tempData WHERE id BETWEEN ? AND ? AND client=?"
c.execute(data, (n-9, n, client,))
values = c.fetchall()
val = []
for i in values:
val.append(i[0])
print(val)
graph.add(client, val)
graph_data = graph.render_data_uri()
socketio.emit('sendtemp', graph_data, broadcast=True)
def send_sensor_data(data, username):
print("i am websocket: " + data)
try:
socketio.emit("sensorData", data, room=username)
except Exception as e:
print(e)
def send_graph_data(data):
# try:
if data['sensor'] == "Accelerometer":
plot_accel()
elif data['sensor'] == "Gyroscope":
plot_gyro()
else:
plot_temp()
# except Exception as e:
# print(e)
@app.route('/')
def index():
return render_template("index.html")
def read_db(c, cname, table):
if table == "Accelerometer":
c.execute('SELECT MAX(id),x,y,z FROM accelData WHERE client=?',
(cname,))
data = dict(c.fetchone())
del(data['MAX(id)'])
return data
elif table == "Gyroscope":
c.execute('SELECT MAX(id),x,y,z FROM gyroData WHERE client=?',
(cname,))
data = dict(c.fetchone())
del(data['MAX(id)'])
return data
elif table == "Temperature":
c.execute('SELECT MAX(id),temperature FROM tempData WHERE client=?',
(cname,))
data = dict(c.fetchone())
del(data['MAX(id)'])
return data
if __name__ == '__main__':
socketio.run(app, debug=True)
| Niraj-Kamdar/IoT-Dashboard | myapp.py | myapp.py | py | 8,315 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "flask_socketio.SocketIO",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "flask.request.sid",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "flask.requ... |
10989914554 | from typing import Tuple, Optional
import torch
from torch import nn
from towhee.models.max_vit.max_vit_block import MaxViTStage
from towhee.models.max_vit.configs import get_configs
from towhee.models.utils import create_model as towhee_model
class MaxViT(nn.Module):
"""
Implementation of the MaxViT proposed in:
https://arxiv.org/pdf/2204.01697.pdf
Args:
in_channels (`int`):
Number of input channels to the convolutional stem. Default 3
depths (`Tuple[int, ...]`):
Depth of each network stage. Default (2, 2, 5, 2)
channels (`Tuple[int, ...]`):
Number of channels in each network stage. Default (64, 128, 256, 512)
num_classes (`int`):
Number of classes to be predicted. Default 1000
embed_dim (`int`):
Embedding dimension of the convolutional stem. Default 64
num_heads (`int`):
Number of attention heads. Default 32
grid_window_size (`Tuple[int, int]`):
Grid/Window size to be utilized. Default (7, 7)
attn_drop (`float`):
Dropout ratio of attention weight. Default: 0.0
drop (`float`):
Dropout ratio of output. Default: 0.0
drop_path (`float`):
Dropout ratio of path. Default: 0.0
mlp_ratio (`float`):
Ratio of mlp hidden dim to embedding dim. Default: 4.0
act_layer (`nn.Module`):
Type of activation layer to be utilized. Default: nn.GELU
norm_layer (`nn.Module`):
Type of normalization layer to be utilized. Default: nn.BatchNorm2d
norm_layer_transformer (`nn.Module`):
Normalization layer in Transformer. Default: nn.LayerNorm
global_pool (`str`):
Global polling type to be utilized. Default "avg"
"""
def __init__(
self,
in_channels: int = 3,
depths: Tuple[int, ...] = (2, 2, 5, 2),
channels: Tuple[int, ...] = (64, 128, 256, 512),
num_classes: int = 1000,
embed_dim: int = 64,
num_heads: int = 32,
grid_window_size: Tuple[int, int] = (7, 7),
attn_drop: float = 0.,
drop: float = 0.,
drop_path: float = 0.,
mlp_ratio: float = 4.,
act_layer=nn.GELU,
norm_layer=nn.BatchNorm2d,
norm_layer_transformer=nn.LayerNorm,
global_pool: str = "avg"
) -> None:
""" Constructor method """
# Call super constructor
super().__init__()
# Check parameters
assert len(depths) == len(channels), "For each stage a channel dimension must be given."
assert global_pool in ["avg", "max"], f"Only avg and max is supported but {global_pool} is given"
# Save parameters
self.num_classes: int = num_classes
# Init convolutional stem
self.stem = nn.Sequential(
nn.Conv2d(in_channels=in_channels, out_channels=embed_dim, kernel_size=(3, 3), stride=(2, 2),
padding=(1, 1)),
act_layer(),
nn.Conv2d(in_channels=embed_dim, out_channels=embed_dim, kernel_size=(3, 3), stride=(1, 1),
padding=(1, 1)),
act_layer(),
)
# Init blocks
drop_path = torch.linspace(0.0, drop_path, sum(depths)).tolist()
stages = []
for index, (depth, channel) in enumerate(zip(depths, channels)):
stages.append(
MaxViTStage(
depth=depth,
in_channels=embed_dim if index == 0 else channels[index - 1],
out_channels=channel,
num_heads=num_heads,
grid_window_size=grid_window_size,
attn_drop=attn_drop,
drop=drop,
drop_path=drop_path[sum(depths[:index]):sum(depths[:index + 1])],
mlp_ratio=mlp_ratio,
act_layer=act_layer,
norm_layer=norm_layer,
norm_layer_transformer=norm_layer_transformer
)
)
self.stages = nn.Sequential(*stages)
self.global_pool: str = global_pool
self.head = nn.Linear(channels[-1], num_classes)
# @torch.jit.ignore
# def no_weight_decay(self) -> Set[str]:
# """ Gets the names of parameters to not apply weight decay to.
# Returns:
# nwd (Set[str]): Set of parameter names to not apply weight decay to.
# """
# nwd = set()
# for n, _ in self.named_parameters():
# if "relative_position_bias_table" in n:
# nwd.add(n)
# return nwd
def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None) -> None:
"""
Method results the classification head
Args:
num_classes (`int`):
Number of classes to be predicted.
global_pool (`str`):
If not global pooling is updated.
"""
self.num_classes: int = num_classes
if global_pool is not None:
self.global_pool = global_pool
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, data: torch.Tensor) -> torch.Tensor:
"""
Forward pass of feature extraction.
Args:
data (`torch.Tensor`):
Input images of the shape [B, C, H, W].
Returns:
output (`torch.Tensor`):
Image features of the backbone.
"""
output = data
for stage in self.stages:
output = stage(output)
return output
def forward_head(self, data: torch.Tensor, pre_logits: bool = False):
"""
Forward pass of classification head.
Args:
data (`torch.Tensor`):
Input features.
pre_logits (`bool`):
If true pre-logits are returned.
Returns:
output (`torch.Tensor`):
Classification output of the shape [B, num_classes].
"""
if self.global_pool == "avg":
data = data.mean(dim=(2, 3))
elif self.global_pool == "max":
data = torch.amax(data, dim=(2, 3))
return data if pre_logits else self.head(data)
def forward(self, data: torch.Tensor) -> torch.Tensor:
""" Forward pass
Args:
data (`torch.Tensor`):
Input images of the shape [B, C, H, W].
Returns:
output (`torch.Tensor`):
Classification output of the shape [B, num_classes].
"""
output = self.forward_features(self.stem(data))
output = self.forward_head(output)
return output
def create_model(
model_name: str = None,
pretrained: bool = False,
checkpoint_path: str = None,
device: str = None,
**kwargs
):
configs = get_configs(model_name)
configs.update(**kwargs)
model = towhee_model(MaxViT, configs=configs, pretrained=pretrained, checkpoint_path=checkpoint_path, device=device)
return model
| towhee-io/towhee | towhee/models/max_vit/max_vit.py | max_vit.py | py | 7,262 | python | en | code | 2,843 | github-code | 1 | [
{
"api_name": "torch.nn.Module",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_n... |
40068698885 | from django.shortcuts import render, redirect
from .forms import ContatoForm, ProdutoModelForm, ClienteModelForm
from django.contrib import messages
from .models import Cliente, Produto
# Create your views here.
def index(request):
prod = Produto.objects.all()
context ={
'prod': prod
}
return render(request, 'index.html', context)
def contato(request):
form = ContatoForm(request.POST or None)
if(str(request.method) == 'POST'):
if form.is_valid():
form.send_email()
messages.success(request, 'Email enviado com sucesso')
form = ContatoForm
else:
messages.error(request, 'Falha ao enviar email')
context = {
'form': form
}
return render(request, 'contato.html', context)
def produto(request):
print(dir(request))
print(dir(request.user))
print(request.user.is_authenticated)
if (request.user.is_authenticated == True):
if str(request.method) == 'POST':
form = ProdutoModelForm(request.POST, request.FILES)
if form.is_valid():
form.save()
messages.success(request,'Produto salvo com sucesso')
else:
messages.error(request, 'Erro ao salvar produto')
form = ProdutoModelForm()
else:
form = ProdutoModelForm()
context = {
'form': form
}
return render(request, 'produto.html', context)
else:
return redirect(index)
def cliente(request):
if str(request.method) == 'POST':
form = ClienteModelForm(request.POST)
if form.is_valid():
form.save()
messages.success(request,'Cliente cadastrado com sucesso')
else:
messages.error(request,'Não foi possível cadastrar o cliente')
form = ClienteModelForm()
else:
form = ClienteModelForm()
context = {
'form': form
}
return render(request, 'clientes.html', context)
def mostra_cliente(request):
clientes = Cliente.objects.all()
context ={
'clientes': clientes
}
return render(request,'mostra_cliente.html', context) | P4J3/django2 | core/views.py | views.py | py | 2,205 | python | pt | code | 0 | github-code | 1 | [
{
"api_name": "models.Produto.objects.all",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "models.Produto.objects",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "models.Produto",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": ... |
10924482819 | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.support.ui import WebDriverWait
import time
def no_delay_output():
"""
非延迟输出,正常会等页面加载完毕后才进行输出,
此种方式在加载界面的时候, 会输出
:return:
"""
# 修改页面加载策略
desired_capabilities = DesiredCapabilities.CHROME
# 注释这两行会导致最后输出结果的延迟, 即等待页面加载完成再输出
desired_capabilities["pageLoadStrategy"] = "none"
driver = webdriver.Chrome()
# 后面可以使用wait对特定元素进行等待
wait = WebDriverWait(driver, 3)
driver.get('http://www.dianping.com/shop/98394949')
time.sleep(2)
print(driver.get_cookies())
print("end")
driver.close()
def no_delay_cookies(url):
"""
非延迟获取cookies
:return:
"""
# 修改页面加载策略
desired_capabilities = DesiredCapabilities.CHROME
# 注释这两行会导致最后输出结果的延迟, 即等待页面加载完成再输出
desired_capabilities["pageLoadStrategy"] = "none"
# 静默模式
option = webdriver.ChromeOptions()
option.add_argument('headless')
option.add_experimental_option('excludeSwitches', ['enable-automation'])
# 启动浏览器
driver = webdriver.Chrome(options=option)
# driver = webdriver.Chrome()
driver.delete_all_cookies()
# 后面可以使用wait对特定元素进行等待
wait = WebDriverWait(driver, 1)
# driver.get('http://www.dianping.com/shop/98394949')
driver.get(url)
time.sleep(1)
cookies_list = driver.get_cookies()
cookie_dict = dict()
for res in cookies_list:
key = res.get("name")
value = res.get("value")
cookie_dict[key] = value
print("9991111", cookie_dict)
if cookie_dict is not None:
lxsdk_s = cookie_dict.get("_lxsdk_s")
if lxsdk_s is not None:
increase_int = lxsdk_s[(int(lxsdk_s.rindex('C')) + 1):]
with open('incre_cookid_detail', 'w') as f:
f.write(str(increase_int))
else:
with open('incre_cookid_detail', 'w') as f:
f.write(5)
# driver.close()
return cookie_dict
def test(url):
driver1 = webdriver.Chrome()
# driver.delete_all_cookies()
driver1.get(url)
# cookies_list = driver1.get_cookies()
#
# print(cookies_list)
# driver1.close()
if __name__ == '__main__':
ss = no_delay_cookies("http://www.dianping.com/shop/98394949")
print(ss)
# auto_search()
# test("http://www.dianping.com/shop/2767525")
# test("http://www.baidu.com")
| logonmy/spider-mz | utils/selenium_utils.py | selenium_utils.py | py | 2,840 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "selenium.webdriver.common.desired_capabilities.DesiredCapabilities.CHROME",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.desired_capabilities.DesiredCapabilities",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": ... |
5130368081 | # !/usr/bin/env python.
# -*- coding: utf-8 -*-
import os
import glob
import fiona
import matplotlib
import pandas as pd
import numpy as np
import wradlib as wrl
import matplotlib.pyplot as plt
from matplotlib import path
from osgeo import osr
from pathlib import Path
from matplotlib.colors import LinearSegmentedColormap
#base_path = r'C:\Users\hachem\Downloads\SF201907\*'
base_path = r'C:\Users\hachem\Downloads\SF201407.tar\SF201407\*'
state_name = 'Rheinland-Pfalz'
files = glob.glob(base_path)
# hdf5_path = os.path.join(database, 'rain_germany_daily.h5')
# hf = tables.open_file(hdf5_path, 'r')
# ishape = (r"P:\2017_SYNOPSE_II\02_Import\07_Shapefiles\DEU_adm_shp"
# r"\DEU_adm1.shp")
ishape = r"X:\staff\elhachem\Shapefiles\Neckar_seperate\Danube.shp"
#first = ishape.next()
# shp_objects_all = [shp for shp in list(fiona.open(ishape))
# if shp['properties']['NAME_1'] == state_name]
shp_objects_all = [shp for shp in list(fiona.open(ishape))]
#==============================================================================
#
# #============================================================================
# hourly_events = [ # '2016-06-25 00:00:00',
# '2018-06-11 14:50:00',
# '2018-06-11 15:50:00',
# '2018-06-11 16:50:00',
# '2018-06-11 17:50:00',
# '2018-06-11 18:50:00']
hourly_events = ['2018-09-06 16:50:00',
'2018-09-06 17:50:00',
'2018-09-06 18:50:00']
#'2018-09-23 17:00:00',
#'2018-09-23 18:00:00',
#'2018-09-23 19:00:00']
daily_events = [ # '2018-12-23 00:00:00',
#'2019-05-22 00:00:00',
'2018-05-14 00:00:00',
'2019-07-28 00:00:00']
bound = [0., 1,
2, 5, 8,
10, 15, 20,
25, 30] # , 35, 40, 45]
interval_ppt = np.linspace(0.05, 0.95)
colors_ppt = plt.get_cmap('jet_r')(interval_ppt)
cmap_ppt = LinearSegmentedColormap.from_list('name', colors_ppt)
#cmap_ppt = plt.get_cmap('jet_r')
cmap_ppt.set_over('navy')
norm = matplotlib.colors.BoundaryNorm(bound, cmap_ppt.N)
for i, file in enumerate(files):
# file = file + '.gz'
# event_date = ('20' + file[50:52] + '-' + file[52:54] + '-' + file[54:56]
# + ' ' + file[56:58] + ':' + file[58:60] + ':00')
# print(i, '/', len(files), event_date)
# # if event_date in hourly_events:
# event_date_minus_one_hr = pd.DatetimeIndex(
# [event_date]) - pd.Timedelta(minutes=60)
# shifted_event = event_date_minus_one_hr + pd.Timedelta(minutes=10)
rwdata, rwattrs = wrl.io.read_radolan_composite(file)
# mask data
sec = rwattrs['secondary']
rwdata.flat[sec] = -9999
rwdata = np.ma.masked_equal(rwdata, -9999)
# create radolan projection object
proj_stereo = wrl.georef.create_osr("dwd-radolan")
# create wgs84 projection object
proj_wgs = osr.SpatialReference()
proj_wgs.ImportFromEPSG(4326)
# get radolan grid
radolan_grid_xy = wrl.georef.get_radolan_grid(900, 900)
x1 = radolan_grid_xy[:, :, 0]
y1 = radolan_grid_xy[:, :, 1]
# convert to lonlat
radolan_grid_ll = wrl.georef.reproject(radolan_grid_xy,
projection_source=proj_stereo,
projection_target=proj_wgs)
lon1 = radolan_grid_ll[:, :, 0]
lat1 = radolan_grid_ll[:, :, 1]
mask = np.ones_like(lon1, dtype=np.bool)
# first['geometry']['coordinates']
for n, i_poly_all in enumerate(shp_objects_all):
i_poly = i_poly_all['geometry']['coordinates']
if 0 < len(i_poly) <= 1:
p = path.Path(np.array(i_poly)[0])
grid_mask = p.contains_points(
np.vstack((lon1.flatten(),
lat1.flatten())).T).reshape(900, 900)
mask[grid_mask] = 0
else:
for ix in range(len(i_poly)):
p = path.Path(np.array(i_poly[ix]))
grid_mask = p.contains_points(
np.vstack((lon1.flatten(),
lat1.flatten())).T).reshape(900, 900)
mask[grid_mask] = 0
# mask.dump(r'X:\staff\elhachem\2020_10_03_Rheinland_Pfalz\mask.npy')
# rwdata[mask] = -1
rw_maskes = np.ma.masked_array(rwdata, rwdata < 0.)
# plt.figure()
# # min_x = xss[np.argmin([xs - x0 for xs in xss])]
# # min_y = yss[np.argmin([ys - y0 for ys in yss])]
#
# plt.pcolormesh(lon1, lat1, rw_maskes, cmap=cmap_ppt,
# vmin=0, norm=norm, vmax=30)
#
# plt.ylabel('Latitude [°]')
# plt.xlabel('Longitude [°]')
#
# plt.xlim([7.1, 10.7])
# plt.ylim([47.3, 50.0])
#
# # plt.axis('equal')
# # radar.set_aspect('equal', 'box')
# # plt.xlim([netatmo.get_xlim()[0], netatmo.get_xlim()[1]])
# # plt.ylim([netatmo.get_ylim()[0], netatmo.get_ylim()[1]])
# cbar = plt.colorbar(extend='max', label='[mm/h]')
#
# plt.tight_layout()
# plt.savefig(
# Path(os.path.join(
# r'C:\Users\hachem\Desktop\radar',
# 'hourly_event_{}.png'.format(file[50:59]))), dpi=600)
#
# # plt.show()
# # break
# plt.close()
| AbbasElHachem/extremes | _57_plot_radar_events_BW_.py | _57_plot_radar_events_BW_.py | py | 5,414 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "glob.glob",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "fiona.open",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.get_cmap",
... |
72558407395 | import uuid
from abc import ABC, abstractmethod
from decimal import Decimal
from typing import List, Optional, Dict
from pydantic import BaseModel, Field, computed_field
from cache.store import get_provider_html
from models.models import ProviderVehiclesRequest, ProviderStoreItem
class Calculator(ABC, BaseModel):
vehicle_request: ProviderVehiclesRequest
price_per_minute: Optional[Decimal]
price_per_km: Optional[Decimal]
def total_price(self) -> Decimal:
return (self.vehicle_request.distance_kilometer * self.price_per_km) + (
self.vehicle_request.time_minutes * self.price_per_minute
)
class VehicleOption(BaseModel):
vehicle_image: str
electric: bool = Field(default=False)
provider: str
type: str
calculator: Calculator = Field(exclude=True)
@computed_field
@property
def total_price(self) -> Decimal:
return self.calculator.total_price().quantize(Decimal("0.00"))
@computed_field
@property
def id(self) -> str:
return str(uuid.uuid4())
class ProviderInterface(ABC, BaseModel):
vehicle_request: ProviderVehiclesRequest
pricing_urls: Optional[List[str]] = Field(required=False, default=None)
free_parking: bool
def get_pricing_html(self) -> List[ProviderStoreItem]:
assert self.pricing_urls
return [get_provider_html(url) for url in self.pricing_urls]
@abstractmethod
async def get_vehicles_options(self) -> List[VehicleOption]:
raise NotImplementedError("Forgot to implement total method")
| martijnboers/WelkeDeelauto | backend/models/interface.py | interface.py | py | 1,563 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "abc.ABC",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "pydantic.BaseModel",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "models.models.ProviderVehiclesRequest",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "typi... |
70673994913 | import os
import shutil
import argparse
import subprocess
import random
import pandas as pd
import numpy as np
import pickle as pkl
import scipy as sp
import networkx as nx
import scipy.stats as stats
import scipy.sparse as sparse
from torch import nn
from torch import optim
from torch.nn import functional as F
from scripts.ulity import *
from scripts.preprocessing import *
from scripts.cnnscript import *
from shutil import which
from collections import Counter
from Bio import SeqIO
from Bio.SeqRecord import SeqRecord
from Bio.Blast.Applications import NcbiblastnCommandline
from models.phamer import Transformer
from models.CAPCNN import WCNN
from models.PhaGCN import GCN
from models import Cherry
from draw import draw_network, drop_network
from collections import Counter
from scipy.special import softmax
from scripts.data import load_data, preprocess_features, preprocess_adj, sample_mask
parser = argparse.ArgumentParser(description="""Main script of PhaSUIT.""")
parser.add_argument('--contigs', help='FASTA file of contigs', default = 'inputs.fa')
parser.add_argument('--threads', help='number of threads to use', type=int, default=8)
parser.add_argument('--len', help='minimum length of contigs', type=int, default=3000)
parser.add_argument('--reject', help='threshold to reject prophage', type=float, default = 0.3)
parser.add_argument('--rootpth', help='rootpth of the user', default='user_0/')
parser.add_argument('--out', help='output path of the user', default='out/')
parser.add_argument('--midfolder', help='mid folder for intermidiate files', default='midfolder/')
parser.add_argument('--dbdir', help='database directory', default = 'database/')
parser.add_argument('--parampth', help='path of parameters', default = 'parameters/')
parser.add_argument('--scriptpth', help='path of parameters', default = 'scripts/')
parser.add_argument('--proteins', help='FASTA file of predicted proteins (optional)')
parser.add_argument('--topk', help='Top k prediction', type=int, default=1)
inputs = parser.parse_args()
contigs = inputs.contigs
midfolder = inputs.midfolder
rootpth = inputs.rootpth
db_dir = inputs.dbdir
out_dir = inputs.out
parampth = inputs.parampth
threads = inputs.threads
length = inputs.len
scriptpth = inputs.scriptpth
if not os.path.exists(db_dir):
print(f'Database directory {db_dir} missing or unreadable')
exit(1)
check_path(os.path.join(rootpth, out_dir))
check_path(os.path.join(rootpth, midfolder))
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if device == 'cpu':
print("running with cpu")
torch.set_num_threads(inputs.threads)
###############################################################
####################### Filter length ########################
###############################################################
rec = []
ID2length = {}
for record in SeqIO.parse(contigs, 'fasta'):
ID2length[record.id] = len(record.seq)
if len(record.seq) > inputs.len:
rec.append(record)
if not rec:
with open(f'{rootpth}/{out_dir}/phamer_prediction.csv', 'w') as file_out:
file_out.write("Accession,Pred,Score\n")
for record in SeqIO.parse(contigs, 'fasta'):
file_out.write(f'{record.id},filtered,0\n')
exit()
SeqIO.write(rec, f'{rootpth}/filtered_contigs.fa', 'fasta')
###############################################################
########################## PhaMer ############################
###############################################################
# add convertxml (Nov. 8th)
translation(rootpth, os.path.join(rootpth, midfolder), 'filtered_contigs.fa', 'test_protein.fa', threads, inputs.proteins)
run_diamond(f'{db_dir}/phamer_database.dmnd', os.path.join(rootpth, midfolder), 'test_protein.fa', 'phamer', threads)
convert_xml(os.path.join(rootpth, midfolder), 'phamer', scriptpth)
if os.path.getsize(f'{rootpth}/{midfolder}/phamer_results.abc') == 0:
with open(f'{rootpth}/{out_dir}/phamer_prediction.csv', 'w') as file_out:
file_out.write("Accession,Pred,Score\n")
for record in SeqIO.parse(contigs, 'fasta'):
file_out.write(f'{record.id},non-phage,0\n')
exit()
contig2sentence(db_dir, os.path.join(rootpth, midfolder), 'test_protein.fa', 'phamer')
pcs2idx = pkl.load(open(f'{rootpth}/{midfolder}/phamer_pc2wordsid.dict', 'rb'))
num_pcs = len(set(pcs2idx.keys()))
src_pad_idx = 0
src_vocab_size = num_pcs+1
model, optimizer, loss_func = reset_model(Transformer, src_vocab_size, device)
try:
pretrained_dict=torch.load(f'{parampth}/transformer.pth', map_location=device)
model.load_state_dict(pretrained_dict)
except:
print('cannot find pre-trained model')
exit(1)
sentence = pkl.load(open(f'{rootpth}/{midfolder}/phamer_sentence.feat', 'rb'))
id2contig = pkl.load(open(f'{rootpth}/{midfolder}/phamer_sentence_id2contig.dict', 'rb'))
proportion = pkl.load(open(f'{rootpth}/{midfolder}/phamer_sentence_proportion.feat', 'rb'))
contig2id = {item: key for key, item in id2contig.items()}
all_pred = []
all_score = []
with torch.no_grad():
_ = model.eval()
for idx in range(0, len(sentence), 500):
try:
batch_x = sentence[idx: idx+500]
weight = proportion[idx: idx+500]
except:
batch_x = sentence[idx:]
weight = proportion[idx:]
batch_x = return_tensor(batch_x, device).long()
logit = model(batch_x)
logit = torch.sigmoid(logit.squeeze(1))
logit = reject_prophage(logit, weight, inputs.reject)
pred = ['phage' if item > 0.5 else 'non-phage' for item in logit]
all_pred += pred
all_score += [float('{:.3f}'.format(i)) for i in logit]
#FLAGS
if len(set(all_pred)) == 1 and all_pred[0] == 'non-phage':
with open(f'{rootpth}/{out_dir}/phamer_prediction.csv', 'w') as file_out:
file_out.write("Accession,Pred,Score\n")
for record in SeqIO.parse(contigs, 'fasta'):
file_out.write(f'{record.id},non-phage,0\n')
exit()
### Add filtered label (Nov. 8th)
contigs_list = list(id2contig.values())
contigs_add = []
for record in SeqIO.parse(f'{contigs}', 'fasta'):
if record.id not in contigs_list:
if len(record.seq) < inputs.len:
contigs_add.append(record.id)
all_pred.append('filtered')
all_score.append(0)
continue
contigs_add.append(record.id)
all_pred.append('non-phage')
all_score.append(0)
contigs_list += contigs_add
length_list = [ID2length[item] for item in contigs_list]
pred_csv = pd.DataFrame({"Accession":contigs_list, "Length":length_list, "Pred":all_pred, "Score":all_score})
pred_csv.to_csv(f'{rootpth}/{out_dir}/phamer_prediction.csv', index = False)
pred_phage_dict = {}
for contig, pred in zip(pred_csv['Accession'].values, pred_csv['Pred'].values):
if pred == 'phage':
pred_phage_dict[contig] = contig2id[contig]
# FLAGS
if pred_phage_dict:
pass
else:
exit()
###############################################################
########################## PhaTYP ############################
###############################################################
id2contig = {key: item for key, item in enumerate(pred_phage_dict.keys())}
recruit_sentence = sentence[list(pred_phage_dict.values())]
pkl.dump(recruit_sentence, open(f'{rootpth}/{midfolder}/phatyp_sentence.feat', 'wb'))
pkl.dump(pcs2idx, open(f'{rootpth}/{midfolder}/phatyp_pc2wordsid.dict', 'wb'))
generate_bert_input(db_dir, os.path.join(rootpth, midfolder), os.path.join(rootpth, midfolder), 'phatyp')
def preprocess_function(examples):
return tokenizer(examples["text"], truncation=True)
bert_feat = pd.read_csv(f'{rootpth}/{midfolder}/phatyp_bert_feat.csv')
test = pa.Table.from_pandas(bert_feat)
test = datasets.Dataset(test)
data = datasets.DatasetDict({"test": test})
tokenizer = BertTokenizer.from_pretrained(f'{parampth}/bert_config', do_basic_tokenize=False)
tokenized_data= data.map(preprocess_function, batched=True)
data_collator = DataCollatorWithPadding(tokenizer=tokenizer)
trainer = init_bert(f'{rootpth}/{midfolder}', bert_feat, os.path.join(parampth, "bert"), tokenizer, tokenized_data, data_collator)
with torch.no_grad():
pred, label, metric = trainer.predict(tokenized_data["test"])
prediction_value = []
for item in pred:
prediction_value.append(softmax(item))
prediction_value = np.array(prediction_value)
all_pred = []
all_score = []
for score in prediction_value:
pred = np.argmax(score)
if pred == 1:
all_pred.append('temperate')
all_score.append(score[1])
else:
all_pred.append('virulent')
all_score.append(score[0])
pred_csv = pd.DataFrame({"Accession":id2contig.values(), "Pred":all_pred, "Score":all_score})
pred_csv.to_csv(f'{rootpth}/{out_dir}/phatyp_prediction.csv', index = False)
###############################################################
########################## PhaGCN ############################
###############################################################
nucl, protein = recruit_phage_file(rootpth, midfolder, pred_phage_dict)
SeqIO.write(nucl, f'{rootpth}/checked_phage_contigs.fa',"fasta")
SeqIO.write(protein, f'{rootpth}/checked_phage_protein.fa',"fasta")
# Filter unknown family (Nov. 16th)
query_file = f"{rootpth}/checked_phage_contigs.fa"
db_virus_prefix = f"{db_dir}/unknown_db/db"
output_file = f"{rootpth}/{midfolder}/unknown_out.tab"
virus_call = NcbiblastnCommandline(query=query_file,db=db_virus_prefix,out=output_file,outfmt="6 qseqid sseqid evalue pident length qlen", evalue=1e-10,
task='megablast',perc_identity=95,num_threads=threads)
virus_call()
check_unknown = {}
check_unknown_all = {}
check_unknown_all_score = {}
with open(output_file) as file_out:
for line in file_out.readlines():
parse = line.replace("\n", "").split("\t")
virus = parse[0]
target = parse[1]
target = target.split('|')[1]
ident = float(parse[-3])
length = float(parse[-2])
qlen = float(parse[-1])
if length/qlen > 0.95 and ident > 0.95:
check_unknown[virus] = target
if virus not in check_unknown_all:
ident = float(parse[-3])/100
ident = float(f"{ident:.3f}")
check_unknown_all[virus] = target
check_unknown_all_score[virus] = ident
rec = []
for record in SeqIO.parse(f'{rootpth}/checked_phage_contigs.fa', 'fasta'):
try:
if check_unknown[record.id]:
continue
except:
rec.append(record)
if not rec:
phagcn_exception_no_visual(rootpth, midfolder, out_dir, ID2length, inputs, 'checked_phage_contigs.fa')
else:
SeqIO.write(rec, f'{rootpth}/checked_phage_phagcn_contigs.fa', 'fasta')
rec = []
for record in SeqIO.parse(f'{rootpth}/checked_phage_protein.fa', 'fasta'):
name = record.id
name = name.rsplit('_', 1)[0]
try:
if check_unknown[name]:
continue
except:
rec.append(record)
SeqIO.write(rec, f'{rootpth}/checked_phage_phagcn_protein.fa', 'fasta')
single_pth = os.path.join(rootpth, "CNN_temp/single")
cnninput_pth = os.path.join(rootpth, "CNN_temp/input")
phagcninput_pth = os.path.join(rootpth, midfolder, "phgcn/")
check_path(single_pth)
check_path(cnninput_pth)
check_path(phagcninput_pth)
contig2name = {}
with open(f"{rootpth}/{midfolder}/phagcn_name_list.csv",'w') as list_out:
list_out.write("Contig,idx\n")
for contig_id, record in enumerate(SeqIO.parse(f'{rootpth}/checked_phage_phagcn_contigs.fa', "fasta")):
name = f"PhaGCN_{str(contig_id)}"
list_out.write(record.id + "," + name+ "\n")
contig2name[record.id] = name
record.id = name
_ = SeqIO.write(record, f"{single_pth}/{name}.fa", "fasta")
rename_rec = []
for record in SeqIO.parse(f'{rootpth}/checked_phage_phagcn_protein.fa',"fasta"):
old_name = record.id
idx = old_name.rsplit('_', 1)[1]
record.id = contig2name[old_name.rsplit('_', 1)[0]] +"_"+ idx
rename_rec.append(record)
SeqIO.write(rename_rec, f'{rootpth}/{midfolder}/phagcn_renamed_protein.fa', 'fasta')
# sequence encoding using CNN
seq_dict = {}
for file in os.listdir(single_pth):
rec = create_fragments(single_pth, file)
seq_dict[file.split('.fa')[0]] = rec
int_to_vocab, vocab_to_int = return_kmer_vocab()
for seq in seq_dict:
int_feature = encode(seq_dict[seq], vocab_to_int)
inputs_feat = create_cnndataset(int_feature)
np.savetxt(f"{cnninput_pth}/{seq}.csv", inputs_feat, delimiter=",", fmt='%d')
cnn, embed = load_cnnmodel(parampth)
cnn = cnn.to(device)
compress_feature = []
file_list = os.listdir(cnninput_pth)
file_list = sorted(file_list)
for name in file_list:
val = np.genfromtxt(f'{cnninput_pth}/{name}', delimiter=',')
val_label = val[:, -1]
val_feature = val[:, :-1]
# comvert format
val_feature = torch.from_numpy(val_feature).long()
val_feature = embed(val_feature)
val_feature = val_feature.reshape(len(val_feature), 1, 1998, 100).to(device)
# prediction
out = cnn(val_feature)
out = out.cpu().detach().numpy()
out = np.sum(out, axis=0)
compress_feature.append(out)
compress_feature = np.array(compress_feature)
pkl.dump(compress_feature, open(f"{phagcninput_pth}/phagcn_contig.F", 'wb'))
# Generate knowledge graph
# add convertxml (Nov. 8th)
run_diamond(f'{db_dir}/phagcn_database.dmnd', os.path.join(rootpth, midfolder), 'phagcn_renamed_protein.fa', 'phagcn', threads)
convert_xml(os.path.join(rootpth, midfolder), 'phagcn', scriptpth)
#FLAGS
if os.path.getsize(f'{rootpth}/{midfolder}/phagcn_results.abc') == 0:
phagcn_exception_no_visual(rootpth, midfolder, out_dir, ID2length, inputs, 'checked_phage_contigs.fa')
else:
abc_fp = f"{rootpth}/{midfolder}/merged.abc"
_ = subprocess.check_call(f"cat {db_dir}/phagcn_database.self-diamond.tab.abc {rootpth}/{midfolder}/phagcn_results.abc > {abc_fp}", shell=True)
# generate gene2genome
generate_gene2genome(os.path.join(rootpth, midfolder), os.path.join(rootpth, midfolder), 'phagcn', rootpth)
# Combining the gene-to-genomes files
_ = subprocess.check_call(f"cat {db_dir}/Caudovirales_gene_to_genomes.csv {rootpth}/{midfolder}/phagcn_contig_gene_to_genome.csv > {rootpth}/{midfolder}/phagcn_gene_to_genome.csv", shell=True)
# Running MCL
print("\n\n" + "{:-^80}".format("Protein clustering"))
print("Loading proteins...")
gene2genome_fp = f"{rootpth}/{midfolder}/phagcn_gene_to_genome.csv"
gene2genome_df = pd.read_csv(gene2genome_fp, sep=',', header=0)
pc_overlap, pc_penalty, pc_haircut, pc_inflation = 0.8, 2.0, 0.1, 2.0
pcs_fp = make_protein_clusters_mcl(abc_fp, os.path.join(rootpth, midfolder), pc_inflation)
print("Building the cluster and profiles (this may take some time...)")
protein_df, clusters_df, profiles_df, contigs_df = build_clusters(pcs_fp, gene2genome_df)
print("Saving files")
dfs = [gene2genome_df, contigs_df, clusters_df]
names = ['proteins', 'contigs', 'pcs']
for name, df in zip(names, dfs):
fn = "Cyber_phagcn_{}.csv".format(name)
fp = os.path.join(f'{rootpth}/{midfolder}', fn)
index_id = name.strip('s') + '_id'
df.set_index(index_id).to_csv(fp)
contigs_csv_df = contigs_df.copy()
contigs_csv_df['contig_id'] = contigs_csv_df['contig_id'].str.replace(' ', '~')
contigs_csv_df.index.name = "pos"
contigs_csv_df.reset_index(inplace=True)
pcs_csv_df = clusters_df.copy()
profiles = profiles_df.copy()
profiles['contig_id'] = profiles['contig_id'].str.replace(' ', '~') # ClusterONE can't handle spaces
# Filtering the PC profiles that appears only once
before_filter = len(profiles)
cont_by_pc = profiles.groupby("pc_id").count().contig_id.reset_index()
# get the number of contigs for each pcs and add it to the dataframe
cont_by_pc.columns = ["pc_id", "nb_proteins"]
pcs_csv_df = pd.merge(pcs_csv_df, cont_by_pc, left_on="pc_id", right_on="pc_id", how="left")
pcs_csv_df.fillna({"nb_proteins": 0}, inplace=True)
# Drop the pcs that <= 1 contig from the profiles.
pcs_csv_df = pcs_csv_df[pcs_csv_df['nb_proteins'] > 1] # .query("nb_contigs>1")
at_least_a_cont = cont_by_pc[cont_by_pc['nb_proteins'] > 1] # cont_by_pc.query("nb_contigs>1")
profiles = profiles[profiles['pc_id'].isin(at_least_a_cont.pc_id)]
pcs_csv_df = pcs_csv_df.reset_index(drop=True)
pcs_csv_df.index.name = "pos"
pcs_csv_df = pcs_csv_df.reset_index()
matrix, singletons = build_pc_matrices(profiles, contigs_csv_df, pcs_csv_df)
profiles_csv = {"matrix": matrix, "singletons": singletons}
merged_df = contigs_csv_df
ntw = create_network(matrix, singletons, thres=1, max_sig=300)
fi = to_clusterer(ntw, f"{rootpth}/{midfolder}/phagcn_network.ntw", merged_df.copy())
print("\n\n" + "{:-^80}".format("Calculating E-edges"))
# loading database
gene2genome = pd.read_csv(f'{db_dir}/Caudovirales_gene_to_genomes.csv')
contig_id = gene2genome["contig_id"].values
contig_id = [item.replace(" ", "~") for item in contig_id]
gene2genome["contig_id"] = contig_id
protein_to_ref = {protein:ref for protein, ref in zip(gene2genome["protein_id"].values, gene2genome["contig_id"].values)}
contig_set = list(set(gene2genome["contig_id"].values))
ID_to_ref = {i:ref for i, ref in enumerate(contig_set)}
ref_to_ID = {ref:i for i, ref in enumerate(contig_set)}
contig_to_id = {}
file_list = os.listdir(single_pth)
file_list = sorted(file_list)
for file_n in file_list:
name = file_n.split(".")[0]
contig_to_id[name] = file_list.index(file_n)
# record the row id for each contigs
id_to_contig = {value: key for key, value in contig_to_id.items()}
blastp = pd.read_csv(f'{rootpth}/{midfolder}/phagcn_results.abc', sep=" ", names = ["contigs", "ref", "e-value"])
gene_to_genome = pd.read_csv(f"{rootpth}/{midfolder}/phagcn_contig_gene_to_genome.csv", sep=",")
e_matrix = np.ones((len(contig_to_id), len(ref_to_ID.keys())))
blast_contigs = blastp["contigs"].values
blast_ref = blastp["ref"].values
blast_value = blastp["e-value"].values
for i in range(len(blast_contigs)):
contig_name = gene_to_genome[gene_to_genome["protein_id"] == blast_contigs[i]]["contig_id"].values
contig_name = contig_name[0]
row_id = contig_to_id[contig_name]
reference = protein_to_ref[blast_ref[i]]
col_id = ref_to_ID[reference]
e_value = float(blast_value[i])
if e_value == 0:
e_value = 1e-250
if e_matrix[row_id][col_id] == 1:
e_matrix[row_id][col_id] = e_value
else:
e_matrix[row_id][col_id] += e_value
e_weight = -np.log10(e_matrix)-50
e_weight[e_weight < 1] = 0
print("\n\n" + "{:-^80}".format("Calculating P-edges"))
name_to_id = {}
reference_df = pd.read_csv(f"{db_dir}/phagcn_reference_name_id.csv")
tmp_ref = reference_df["name"].values
tmp_id = reference_df["idx"].values
for ref, idx in zip(tmp_ref,tmp_id):
name_to_id[ref.replace(" ", "~")] = idx
edges = pd.read_csv(f"{rootpth}/{midfolder}/phagcn_network.ntw", sep=' ', names=["node1", "node2", "weight"])
merged_df = pd.read_csv(f"{db_dir}/Caudovirales_genome_profile.csv", header=0, index_col=0)
Taxonomic_df = pd.read_csv(f"{db_dir}/phagcn_taxonomic_label.csv")
merged_df = pd.merge(merged_df, Taxonomic_df, left_on="contig_id", right_on="contig_id", how="inner")
contig_id = merged_df["contig_id"].values
family = merged_df["class"].values
contig_to_family = {name: family for name, family in zip(contig_id, family) if type(family) != type(np.nan) }
G = nx.Graph()
# Add p-edges to the graph
with open(f"{rootpth}/{midfolder}/phagcn_network.ntw") as file_in:
for line in file_in.readlines():
tmp = line[:-1].split(" ")
node1 = tmp[0]
node2 = tmp[1]
weight = float(tmp[2])
if "~" in node1 and node1 not in name_to_id.keys():
print(node1)
print("ERROR")
exit(1)
if "~" in node2 and node2 not in name_to_id.keys():
print(node2)
print("ERROR")
exit(1)
G.add_edge(node1, node2, weight = 1)
cnt = 0
for i in range(e_weight.shape[0]):
contig_name = id_to_contig[i]
if contig_name not in G.nodes():
sorted_idx = np.argsort(e_weight[i])
for j in range(5):
idx = sorted_idx[-j]
if e_weight[i][idx] != 0:
ref_name = ID_to_ref[idx]
if ref_name in G.nodes():
G.add_edge(contig_name, ref_name, weight = 1)
cnt += 1
node_list = list(G.nodes())
for node in node_list:
if "~" in node and node not in contig_to_family.keys():
G.remove_node(node)
test_to_id = {}
with open(f'{rootpth}/{midfolder}/phagcn_graph.csv', 'w') as file:
file.write('Source,Target\n')
for node in G.nodes():
for _, neighbor in G.edges(node):
file.write(f'{node},{neighbor}\n')
# Generating the Knowledge Graph
print("\n\n" + "{:-^80}".format("Generating Knowledge graph"))
mode = "testing"
if mode == "testing":
test_mask = []
label = []
cnt = 0
for node in G.nodes():
try:
label.append(contig_to_family[node])
cnt+=1
except:
if "PhaGCN_" in node:
try:
label.append(-1)
test_mask.append(cnt)
test_to_id[node] = cnt
cnt+=1
except:
print(node)
else:
print(node)
pkl.dump(test_mask, open(f"{phagcninput_pth}/contig.mask", "wb" ) )
adj = nx.adjacency_matrix(G)
pkl.dump(adj, open(f"{phagcninput_pth}/contig.graph", "wb" ) )
pkl.dump(test_to_id, open(f"{phagcninput_pth}/contig.dict", "wb" ) )
# contructing feature map
fn = "database"
contig_feature = pkl.load(open(f"{phagcninput_pth}/phagcn_contig.F",'rb'))
database_feature = pkl.load(open(f"{db_dir}/phagcn_dataset_compressF",'rb'))
feature = []
for node in G.nodes():
if "~" not in node:
idx = contig_to_id[node]
feature.append(contig_feature[idx])
else:
try:
idx = int(name_to_id[node])
feature.append(database_feature[idx])
except:
print(node)
feature = np.array(feature)
pkl.dump(feature, open(f"{phagcninput_pth}/contig.feature", "wb" ) )
# Graph check for each testing samples
cnt = 0
for node in G.nodes:
if "~" not in node:
neighbor_label = []
for edge in G.edges(node):
neighbor = edge[1]
if "~" in neighbor:
neighbor_label.append(contig_to_family[neighbor])
else:
continue
if len(set(neighbor_label)) == 1:
label[test_to_id[node]] = neighbor_label[0]
cnt += 1
pkl.dump(label, open(f"{phagcninput_pth}/contig.label", "wb" ) )
phagcninput_pth = os.path.join(rootpth, midfolder, "phgcn/")
seed = 123
np.random.seed(seed)
torch.random.manual_seed(seed)
adj = pkl.load(open(f"{phagcninput_pth}/contig.graph",'rb'))
labels = pkl.load(open(f"{phagcninput_pth}/contig.label",'rb'))
features = pkl.load(open(f"{phagcninput_pth}/contig.feature",'rb'))
test_to_id = pkl.load(open(f"{phagcninput_pth}/contig.dict",'rb'))
idx_test = pkl.load(open(f"{phagcninput_pth}/contig.mask",'rb'))
if not idx_test:
phagcn_exception_no_visual(rootpth, midfolder, out_dir, ID2length, inputs)
else:
idx_test = np.array(idx_test)
labels = np.array(labels)
y_train = np.zeros(labels.shape)
y_test = np.zeros(labels.shape)
idx_train = np.array([i for i in range(len(labels)) if i not in idx_test])
train_mask = sample_mask(idx_train, labels.shape[0])
test_mask = sample_mask(idx_test, labels.shape[0])
y_train[train_mask] = labels[train_mask]
y_test[test_mask] = labels[test_mask]
features = sp.sparse.csc_matrix(features)
print('adj:', adj.shape)
print('features:', features.shape)
print('y:', y_train.shape, y_test.shape) # y_val.shape,
print('mask:', train_mask.shape, test_mask.shape) # val_mask.shape
features = preprocess_features(features) # [49216, 2], [49216], [2708, 1433]
supports = preprocess_adj(adj)
train_label = torch.from_numpy(y_train).long().to(device)
num_classes = max(labels)+1
train_mask = torch.from_numpy(train_mask.astype(np.bool)).to(device)
test_label = torch.from_numpy(y_test).long().to(device)
test_mask = torch.from_numpy(test_mask.astype(np.bool)).to(device)
i = torch.from_numpy(features[0]).long().to(device)
v = torch.from_numpy(features[1]).to(device)
feature = torch.sparse.FloatTensor(i.t(), v, features[2]).float().to(device)
i = torch.from_numpy(supports[0]).long().to(device)
v = torch.from_numpy(supports[1]).to(device)
support = torch.sparse.FloatTensor(i.t(), v, supports[2]).float().to(device)
print('x :', feature)
print('sp:', support)
num_features_nonzero = feature._nnz()
feat_dim = feature.shape[1]
net = GCN(feat_dim, num_classes, num_features_nonzero)
net.to(device)
optimizer = optim.Adam(net.parameters(), lr=0.01)#args.learning_rate
_ = net.train()
for epoch in range(400):
# forward pass
out = net((feature, support))
loss = masked_loss(out, train_label, train_mask)
loss += 5e-4 * net.l2_loss()
# backward pass
optimizer.zero_grad()
loss.backward()
optimizer.step()
# output
if epoch % 10 == 0:
# calculating the acc
_ = net.eval()
out = net((feature, support))
acc_train = phagcn_accuracy(out.detach().cpu().numpy(), train_mask.detach().cpu().numpy(), labels)
print(epoch, loss.item(), acc_train)
if acc_train > 0.98:
break
_ = net.train()
net.eval()
out = net((feature, support))
out = F.softmax(out,dim =1)
out = out.cpu().detach().numpy()
pred = np.argmax(out, axis = 1)
score = np.max(out, axis = 1)
pred_to_label = {0: 'Autographiviridae', 1: 'Straboviridae', 2: 'Herelleviridae', 3: 'Drexlerviridae', 4: 'Demerecviridae', 5: 'Peduoviridae', 6: 'Casjensviridae', 7: 'Schitoviridae', 8: 'Kyanoviridae', 9: 'Ackermannviridae', 10: 'Rountreeviridae', 11: 'Salasmaviridae', 12: 'Vilmaviridae', 13: 'Zierdtviridae', 14: 'Mesyanzhinovviridae', 15: 'Chaseviridae', 16: 'Zobellviridae', 17: 'Orlajensenviridae', 18: 'Guelinviridae', 19: 'Steigviridae', 20: 'Duneviridae', 21: 'Pachyviridae', 22: 'Winoviridae', 23: 'Assiduviridae', 24: 'Suoliviridae', 25: 'Naomviridae', 26: 'Intestiviridae', 27: 'Crevaviridae', 28: 'Pervagoviridae'}
with open(f'{rootpth}/{midfolder}/phagcn_mid_prediction.csv', 'w') as f_out:
_ = f_out.write("Contig,Pred,Score\n")
for key in test_to_id.keys():
if labels[test_to_id[key]] == -1:
_ = f_out.write(str(key) + "," + str(pred_to_label[pred[test_to_id[key]]]) + "," + str(score[test_to_id[key]]) + "\n")
else:
_ = f_out.write(str(key) + "," + str(pred_to_label[labels[test_to_id[key]]]) + "," + str(1) + "\n")
name_list = pd.read_csv(f"{rootpth}/{midfolder}/phagcn_name_list.csv")
prediction = pd.read_csv(f'{rootpth}/{midfolder}/phagcn_mid_prediction.csv')
prediction = prediction.rename(columns={'Contig':'idx'})
contig_to_pred = pd.merge(name_list, prediction, on='idx')
contig_to_pred = contig_to_pred.rename(columns={'Contig':'Accession'})
#contig_to_pred = contig_to_pred.drop(columns=['idx'])
contig_to_pred.to_csv(f"{rootpth}/{midfolder}/phagcn_prediction.csv", index = None)
# add no prediction (Nov. 13th)
all_acc_phagcn = contig_to_pred['Accession'].values
phamer_df = pd.read_csv(f'{rootpth}/{out_dir}/phamer_prediction.csv')
phage_contig = phamer_df[phamer_df['Pred']=='phage']['Accession'].values
unpredict_contig = []
unnamed_family = []
for contig in phage_contig:
if contig not in all_acc_phagcn:
if contig in check_unknown_all:
unnamed_family.append(contig)
else:
unpredict_contig.append(contig)
unnamed_pred = np.array([check_unknown_all[item] for item in unnamed_family])
unnamed_pred = np.array([f'no_family_avaliable({item})' for item in unnamed_pred])
unnamed_score = np.array([check_unknown_all_score[item] for item in unnamed_family])
unpredict_df = pd.DataFrame({'Accession':unpredict_contig, 'Pred': ['unknown']*len(unpredict_contig), 'Score':[0]*len(unpredict_contig)})
unnamed_df = pd.DataFrame({'Accession':unnamed_family, 'Pred': unnamed_pred, 'Score':unnamed_score})
contig_to_pred = pd.concat((contig_to_pred, unpredict_df, unnamed_df))
contig_to_pred.drop(columns=['idx'])
contig_to_pred.to_csv(f"{rootpth}/{out_dir}/phagcn_prediction.csv", index = None)
###############################################################
########################## Cherry ############################
###############################################################
single_pth = os.path.join(rootpth, "CNN_temp/single")
check_path(single_pth)
contig2name = {}
with open(f"{rootpth}/{midfolder}/cherry_name_list.csv",'w') as list_out:
list_out.write("Contig,idx\n")
for contig_id, record in enumerate(SeqIO.parse(f'{rootpth}/checked_phage_contigs.fa', "fasta")):
name = f"PhaGCN_{str(contig_id)}"
list_out.write(record.id + "," + name+ "\n")
contig2name[record.id] = name
record.id = name
_ = SeqIO.write(record, f"{single_pth}/{name}.fa", "fasta")
rename_rec = []
for record in SeqIO.parse(f'{rootpth}/checked_phage_protein.fa',"fasta"):
old_name = record.id
idx = old_name.rsplit('_', 1)[1]
record.id = contig2name[old_name.rsplit('_', 1)[0]] +"_"+ idx
rename_rec.append(record)
SeqIO.write(rename_rec, f'{rootpth}/{midfolder}/cherry_renamed_protein.fa', 'fasta')
# generate 4mer feature
cherrypth = f'{rootpth}/{midfolder}/cherry/'
check_path(cherrypth)
test_virus, test_virus2id = return_4mer(f'{rootpth}/CNN_temp/single/')
pkl.dump(test_virus2id, open(f'{cherrypth}/test_virus.dict', 'wb'))
pkl.dump(test_virus, open(f'{cherrypth}/test_virus.F', 'wb'))
try:
make_diamond_cmd = f'diamond makedb --threads {threads} --in {rootpth}/{midfolder}/cherry_renamed_protein.fa -d {cherrypth}/test_database.dmnd'
print("Creating Diamond database...")
_ = subprocess.check_call(make_diamond_cmd, shell=True)
except:
print("create database failed")
exit(1)
# add convertxml (Nov. 8th)
run_diamond(f'{db_dir}/cherry_database.dmnd', os.path.join(rootpth, midfolder), 'cherry_renamed_protein.fa', 'cherry', threads)
convert_xml(os.path.join(rootpth, midfolder), 'cherry', scriptpth)
run_diamond(f'{cherrypth}/test_database.dmnd', os.path.join(rootpth, midfolder), f'cherry_renamed_protein.fa', 'cherry_test', threads)
convert_xml(os.path.join(rootpth, midfolder), 'cherry_test', scriptpth)
database_abc_fp = f"{rootpth}/{midfolder}/cherry_merged.abc"
_ = subprocess.check_call(f"cat {db_dir}/cherry_database.self-diamond.tab.abc {rootpth}/{midfolder}/cherry_results.abc {rootpth}/{midfolder}/cherry_test_results.abc > {database_abc_fp}", shell=True)
blastp = pd.read_csv(database_abc_fp, sep=' ', names=["contig", "ref", "e-value"])
protein_id = sorted(list(set(blastp["contig"].values)|set(blastp["ref"].values)))
contig_protein = [item for item in protein_id if "PhaGCN" == item.split("_")[0]]
contig_id = [item.rsplit("_", 1)[0] for item in contig_protein]
description = ["hypothetical protein" for item in contig_protein]
gene2genome = pd.DataFrame({"protein_id": contig_protein, "contig_id": contig_id ,"keywords": description})
gene2genome.to_csv(f"{rootpth}/{midfolder}/cherry_contig_gene_to_genome.csv", index=None)
_ = subprocess.check_call(f"cat {db_dir}/cherry/database_gene_to_genome.csv {rootpth}/{midfolder}/cherry_contig_gene_to_genome.csv > {rootpth}/{midfolder}/cherry_gene_to_genome.csv", shell=True)
gene2genome_fp = f"{rootpth}/{midfolder}/cherry_gene_to_genome.csv"
gene2genome_df = pd.read_csv(gene2genome_fp, sep=',', header=0)
# Parameters for MCL
pc_overlap, pc_penalty, pc_haircut, pc_inflation = 0.8, 2.0, 0.1, 2.0
pcs_fp = make_protein_clusters_mcl(database_abc_fp, os.path.join(rootpth, midfolder), pc_inflation)
print("Building the cluster and profiles (this may take some time...)")
# Dump MCL results
protein_df, clusters_df, profiles_df, contigs_df = build_clusters(pcs_fp, gene2genome_df)
print("Saving files")
dfs = [gene2genome_df, contigs_df, clusters_df]
names = ['proteins', 'contigs', 'pcs']
for name, df in zip(names, dfs):
fn = "Cyber_cherry_{}.csv".format(name)
fp = os.path.join(f'{rootpth}/{midfolder}', fn)
index_id = name.strip('s') + '_id'
df.set_index(index_id).to_csv(fp)
# Replace names
contigs_csv_df = contigs_df.copy()
contigs_csv_df.index.name = "pos"
contigs_csv_df.reset_index(inplace=True)
pcs_csv_df = clusters_df.copy()
profiles = profiles_df.copy()
# Filtering the PC profiles that appears only once
before_filter = len(profiles)
cont_by_pc = profiles.groupby("pc_id").count().contig_id.reset_index()
# get the number of contigs for each pcs and add it to the dataframe
cont_by_pc.columns = ["pc_id", "nb_proteins"]
pcs_csv_df = pd.merge(pcs_csv_df, cont_by_pc, left_on="pc_id", right_on="pc_id", how="left")
pcs_csv_df.fillna({"nb_proteins": 0}, inplace=True)
# Drop the pcs that <= 1 contig from the profiles.
pcs_csv_df = pcs_csv_df[pcs_csv_df['nb_proteins'] > 1] # .query("nb_contigs>1")
at_least_a_cont = cont_by_pc[cont_by_pc['nb_proteins'] > 1] # cont_by_pc.query("nb_contigs>1")
profiles = profiles[profiles['pc_id'].isin(at_least_a_cont.pc_id)]
pcs_csv_df = pcs_csv_df.reset_index(drop=True)
pcs_csv_df.index.name = "pos"
pcs_csv_df = pcs_csv_df.reset_index()
matrix, singletons = build_pc_matrices(profiles, contigs_csv_df, pcs_csv_df)
profiles_csv = {"matrix": matrix, "singletons": singletons}
merged_df = contigs_csv_df
merged_fp = os.path.join(cherrypth, 'merged_df.csv')
merged_df.to_csv(merged_fp)
ntw = create_network(matrix, singletons, thres=1, max_sig=300)
fi = to_clusterer(ntw, f"{cherrypth}/intermediate.ntw", merged_df.copy())
# BLASTN
try:
rec = []
for file in os.listdir(f'{rootpth}/CNN_temp/single/'):
for record in SeqIO.parse(f'{rootpth}/CNN_temp/single/{file}', 'fasta'):
rec.append(record)
SeqIO.write(rec, f"{cherrypth}/test.fa", 'fasta')
except:
_ = subprocess.check_call(f"cat {rootpth}/CNN_temp/single/* > {cherrypth}/test.fa", shell=True)
query_file = f"{cherrypth}/test.fa"
db_virus_prefix = f"{db_dir}/virus_db/allVIRUS"
output_file = f"{cherrypth}/virus_out.tab"
virus_call = NcbiblastnCommandline(query=query_file,db=db_virus_prefix,out=output_file,outfmt="6 qseqid sseqid evalue pident length qlen", evalue=1e-10,
task='megablast', max_target_seqs=1, perc_identity=90,num_threads=threads)
virus_call()
virus_pred = {}
with open(output_file) as file_out:
for line in file_out.readlines():
parse = line.replace("\n", "").split("\t")
virus = parse[0]
ref_virus = parse[1].split('|')[1]
ref_virus = ref_virus.split('.')[0]
ident = float(parse[-3])
length = float(parse[-2])
qlen = float(parse[-1])
if virus not in virus_pred and length/qlen > 0.95 and ident > 0.95:
virus_pred[virus] = ref_virus
pkl.dump(virus_pred, open(f'{cherrypth}/virus_pred.dict', 'wb'))
# Dump graph
G = nx.Graph()
# Create graph
with open(f"{cherrypth}/intermediate.ntw") as file_in:
for line in file_in.readlines():
tmp = line[:-1].split(" ")
node1 = tmp[0]
node2 = tmp[1]
G.add_edge(node1, node2, weight = 1)
graph = f"{cherrypth}/phage_phage.ntw"
with open(graph, 'w') as file_out:
for node1 in G.nodes():
for _,node2 in G.edges(node1):
_ = file_out.write(node1+","+node2+"\n")
query_file = f"{cherrypth}/test.fa"
db_host_crispr_prefix = f"{db_dir}/crispr_db/allCRISPRs"
output_file = f"{cherrypth}/crispr_out.tab"
crispr_call = NcbiblastnCommandline(query=query_file,db=db_host_crispr_prefix,out=output_file,outfmt="6 qseqid sseqid evalue pident length slen", evalue=1,gapopen=10,penalty=-1,
gapextend=2,word_size=7,dust='no',
task='blastn-short',perc_identity=90,num_threads=threads)
crispr_call()
crispr_pred = {}
with open(output_file) as file_out:
for line in file_out.readlines():
parse = line.replace("\n", "").split("\t")
virus = parse[0]
prokaryote = parse[1].split('|')[1]
prokaryote = prokaryote.split('.')[0]
ident = float(parse[-3])
length = float(parse[-2])
slen = float(parse[-1])
if virus not in crispr_pred and length/slen > 0.95 and ident > 0.95:
crispr_pred[virus] = prokaryote
pkl.dump(crispr_pred, open(f'{cherrypth}/crispr_pred.dict', 'wb'))
blast_database_out = f'{db_dir}/blast_db/'
blast_tab_out = f'{cherrypth}/blast_tab'
all_blast_tab = f'{cherrypth}/all_blast_tab'
check_path(blast_database_out)
check_path(blast_tab_out)
check_path(all_blast_tab)
# database only
genome_list = os.listdir(f'{db_dir}/prokaryote')
for genome in genome_list:
accession = genome.split(".")[0]
blast_cmd = f'blastn -query {cherrypth}/test.fa -db {blast_database_out}/{accession} -outfmt 6 -out {blast_tab_out}/{accession}.tab -num_threads {threads}'
print("Running blastn...")
_ = subprocess.check_call(blast_cmd, shell=True)
for file in os.listdir(blast_tab_out):
os.system(f"cat {blast_tab_out}/{file} {db_dir}/blast_tab/{file} > {all_blast_tab}/{file}")
# add connections between prokaryotes and viruses
tab_file_list = os.listdir(all_blast_tab)
prokaryote2virus = {}
for file in tab_file_list:
prokaryote_id = file.split('.')[0]
virus_id_list = []
with open(f'{all_blast_tab}/{file}') as file_in:
for line in file_in.readlines():
tmp = line.split('\t')
virus_id = tmp[0]
try:
prokaryote2virus[prokaryote_id].append(virus_id)
except:
prokaryote2virus[prokaryote_id] = [virus_id]
# De-duplication
for key in prokaryote2virus:
prokaryote2virus[key] = list(set(prokaryote2virus[key]))
# Save the virus-host graph
with open(f"{cherrypth}/phage_host.ntw", 'w') as file_out:
for prokaryote in prokaryote2virus:
for virus in prokaryote2virus[prokaryote]:
_ = file_out.write(prokaryote + "," + virus + "\n")
phage_phage_ntw = f"{cherrypth}/phage_phage.ntw"
phage_host_ntw = f"{cherrypth}/phage_host.ntw"
# Add virus-virus edges
G = nx.Graph()
with open(phage_phage_ntw) as file_in:
for line in file_in.readlines():
tmp = line[:-1].split(",")
node1 = tmp[0].split('.')[0]
node2 = tmp[1].split('.')[0]
G.add_edge(node1, node2, weight = 1)
# Add blastn edges
with open(phage_host_ntw) as file_in:
for line in file_in.readlines():
tmp = line[:-1].split(",")
node1 = tmp[0].split('.')[0]
node2 = tmp[1].split('.')[0]
G.add_edge(node1, node2, weight = 1)
bacteria_df = pd.read_csv(f'{db_dir}/cherry/prokaryote.csv')
virus_df = pd.read_csv(f'{db_dir}/cherry/virus.csv')
bacteria_list = os.listdir(f'{db_dir}/prokaryote/')
bacteria_list = [name.split('.')[0] for name in bacteria_list]
# add crispr edges
species2bacteria = {bacteria_df[bacteria_df['Accession'] == item]['Species'].values[0]: item for item in bacteria_list}
crispr_pred = pkl.load(open(f'{cherrypth}/crispr_pred.dict', 'rb'))
for virus, host in crispr_pred.items():
if host in species2bacteria:
G.add_edge(virus, species2bacteria[host])
# add dataset edges
for bacteria in bacteria_list:
species = bacteria_df[bacteria_df['Accession'] == bacteria]['Species'].values[0]
phage_list = virus_df[virus_df['Species'] == species]['Accession'].values
for phage in phage_list:
if phage in G.nodes():
G.add_edge(bacteria, phage, weight = 1)
# dump the graph G
with open(f'{rootpth}/{midfolder}/cherry_graph.csv', 'w') as file:
file.write('Source,Target\n')
for node in G.nodes():
for _, neighbor in G.edges(node):
file.write(f'{node},{neighbor}\n')
virus2id = pkl.load(open(f"{db_dir}/cherry/virus.dict",'rb'))
virusF = pkl.load(open(f"{db_dir}/cherry/virus.F",'rb'))
prokaryote2id = pkl.load(open(f"{db_dir}/cherry/prokaryote.dict",'rb'))
prokaryoteF = pkl.load(open(f"{db_dir}/cherry/prokaryote.F",'rb'))
test_virus2id = pkl.load(open(f"{cherrypth}/test_virus.dict",'rb'))
test_virusF = pkl.load(open(f"{cherrypth}/test_virus.F",'rb'))
test_prokaryote2id = {}
node_feature = []
for node in G.nodes():
# if prokaryote node
if node in prokaryote2id.keys():
node_feature.append(prokaryoteF[prokaryote2id[node]])
# if virus node
elif node in virus2id.keys():
node_feature.append(virusF[virus2id[node]])
# if test virus node
elif node in test_virus2id.keys():
node_feature.append(test_virusF[test_virus2id[node]])
# if test prokaryote node
elif node in test_prokaryote2id.keys():
node_feature.append(test_prokaryoteF[test_prokaryote2id[node]])
else:
print(f"node error {node}")
exit()
node_feature = np.array(node_feature)
crispr_pred = pkl.load(open(f'{cherrypth}/crispr_pred.dict', 'rb'))
virus_pred = pkl.load(open(f'{cherrypth}/virus_pred.dict', 'rb'))
virus_df = virus_df
prokaryote_df = bacteria_df
idx = 0
test_id = {}
node2label = {}
cnt = 0
for node in G.nodes():
# if test virus node
if "PhaGCN" in node:
neighbor_label = []
for _, neighbor in G.edges(node):
if neighbor in virus2id.keys():
virus_label = virus_df[virus_df['Accession'] == neighbor]['Species'].values[0]
neighbor_label.append(virus_label)
elif neighbor in prokaryote2id.keys():
prokaryote_label = prokaryote_df[prokaryote_df['Accession'] == neighbor]['Species'].values[0]
neighbor_label.append(prokaryote_label)
# subgraph
if len(set(neighbor_label)) == 1:
node2label[node] = neighbor_label[0]
test_id[node] = 1
# CRISPR
elif node in crispr_pred:
node2label[node] = prokaryote_df[prokaryote_df['Accession'] == crispr_pred[node]]['Species'].values[0]
test_id[node] = 1
elif node in virus_pred:
node2label[node] = virus_df[virus_df['Accession'] == virus_pred[node]]['Species'].values[0]
test_id[node] = 1
# unlabelled
else:
node2label[node] = 'unknown'
test_id[node] = 2
# if phage or host node
elif node in prokaryote2id.keys():
prokaryote_label = prokaryote_df[prokaryote_df['Accession'] == node]['Species'].values[0]
node2label[node] = prokaryote_label
test_id[node] = 0
elif node in test_prokaryote2id.keys():
prokaryote_label = prokaryote_df[prokaryote_df['Accession'] == node]['Species'].values[0]
node2label[node] = prokaryote_label
test_id[node] = 0
elif node in virus2id.keys():
virus_label = virus_df[virus_df['Accession'] == node]['Species'].values[0]
node2label[node] = virus_label
test_id[node] = 0
else:
print("Error: " + node)
idx += 1
# check subgraph situation 1
for sub in nx.connected_components(G):
flag = 0
for node in sub:
if "PhaGCN" not in node:
flag = 1
# use CRISPR
if not flag:
CRISPR_label = ""
CRISPR_cnt = 0
for node in sub:
if node in crispr_pred:
CRISPR_cnt+=1
CRISPR_label = crispr_pred[node]
if CRISPR_cnt == 1:
for node in sub:
node2label[node] = CRISPR_label
# check subgraph situation 2
for sub in nx.connected_components(G):
sub_label = []
for node in sub:
if node in virus2id.keys():
virus_label = virus_df[virus_df['Accession'] == node]['Species'].values[0]
sub_label.append(virus_label)
elif node in prokaryote2id.keys():
prokaryote_label = prokaryote_df[prokaryote_df['Accession'] == node]['Species'].values[0]
sub_label.append(prokaryote_label)
if len(set(sub_label)) == 1:
for node in sub:
node2label[node] = sub_label[0]
test_id[node] = 1
elif len(set(sub_label)) == 0:
for node in sub:
node2label[node] = 'unknown'
test_id[node] = 3
# check graph situation 3
for node in G.nodes():
# if test virus node
if "PhaGCN" in node:
neighbor_label = []
for _, neighbor in G.edges(node):
if neighbor in virus2id.keys():
virus_label = virus_df[virus_df['Accession'] == neighbor]['Species'].values[0]
neighbor_label.append(virus_label)
elif neighbor in prokaryote2id.keys():
prokaryote_label = prokaryote_df[prokaryote_df['Accession'] == neighbor]['Species'].values[0]
neighbor_label.append(prokaryote_label)
try:
if not neighbor_label:
continue
cnt = Counter(neighbor_label)
most_cnt = cnt.most_common()[0]
if len(set(sub_label)) == 0:
continue
if most_cnt[1]- 1/len(set(sub_label)) > 0.3:
node2label[node] = most_cnt[0]
test_id[node] = 1
except:
continue
id2node = {idx: node for idx, node in enumerate(G.nodes())}
node2id = {node: idx for idx, node in enumerate(G.nodes())}
adj = nx.adjacency_matrix(G)
pkl.dump(adj, open(f"{cherrypth}/graph.list", "wb" ))
pkl.dump(node_feature, open(f"{cherrypth}/feature.list", "wb" ))
pkl.dump(node2label, open(f"{cherrypth}/node2label.dict", "wb" ))
pkl.dump(id2node, open(f"{cherrypth}/id2node.dict", "wb" ))
pkl.dump(node2id, open(f"{cherrypth}/node2id.dict", "wb" ))
pkl.dump(test_id, open(f"{cherrypth}/test_id.dict", "wb" ))
# model
trainable_host = []
for file in os.listdir(f'{db_dir}/prokaryote/'):
trainable_host.append(file.rsplit('.', 1)[0])
idx_test= test_id
host2id = {}
label2hostid = {}
trainable_host_idx = []
trainable_label = []
for idx, node in id2node.items():
# if prokaryote
if node in trainable_host:
host2id[node] = idx
trainable_host_idx.append(idx)
trainable_label.append(node2label[node])
label2hostid[node2label[node]] = idx
# pre-processing
features = sp.sparse.csc_matrix(node_feature)
print('adj:', adj.shape)
print('features:', features.shape)
# convert to torch tensor
features = preprocess_features(features)
supports = preprocess_adj(adj)
num_classes = len(set(list(node2label.values())))+1
# graph
i = torch.from_numpy(features[0]).long().to(device)
v = torch.from_numpy(features[1]).to(device)
feature = torch.sparse.FloatTensor(i.t(), v, features[2]).float().to(device)
feature = feature.to_dense()
i = torch.from_numpy(supports[0]).long().to(device)
v = torch.from_numpy(supports[1]).to(device)
support = torch.sparse.FloatTensor(i.t(), v, supports[2]).float().to(device)
support = support.to_dense()
print('x :', feature)
print('sp:', support)
feat_dim = adj.shape[0]
node_dim = feature.shape[1]
# Definition of the model
net = Cherry.encoder(feat_dim, node_dim, node_dim, 0)
decoder = Cherry.decoder(node_dim, 128, 32)
# Load pre-trained model
encoder_dict = torch.load(f"{parampth}/cherry/Encoder_Species.pkl", map_location='cpu')
decoder_dict = torch.load(f"{parampth}/cherry/Decoder_Species.pkl", map_location='cpu')
net.load_state_dict(encoder_dict)
decoder.load_state_dict(decoder_dict)
net.to(device)
decoder.to(device)
# end-to-end training
params = list(net.parameters()) + list(decoder.parameters())
optimizer = optim.Adam(params, lr=0.001)#args.learning_rate
loss_func = nn.BCEWithLogitsLoss()
# predicting host
node2pred = {}
with torch.no_grad():
encode = net((feature, support))
for i in range(len(encode)):
confident_label = 'unknown'
if idx_test[id2node[i]] == 0 or idx_test[id2node[i]] == 3:
continue
if idx_test[id2node[i]] == 1:
confident_label = node2label[id2node[i]]
virus_feature = encode[i]
pred_label_score = []
for label in set(trainable_label):
if label == confident_label:
pred_label_score.append((label, 1))
continue
prokaryote_feature = encode[label2hostid[label]]
pred = decoder(virus_feature - prokaryote_feature)
pred_label_score.append((label, torch.sigmoid(pred).detach().cpu().numpy()[0]))
node2pred[id2node[i]] = sorted(pred_label_score, key=lambda tup: tup[1], reverse=True)
for virus in crispr_pred:
if virus not in node2pred:
pred = prokaryote_df[prokaryote_df['Accession'] == crispr_pred[virus]]['Species'].values[0]
node2pred[virus] = [(pred, 1)]
# dump the prediction
with open(f"{rootpth}/{midfolder}/cherry_mid_predict.csv", 'w') as file_out:
file_out.write('Contig,')
for i in range(inputs.topk):
file_out.write(f'Top_{i+1}_label,Score_{i+1},')
file_out.write('Type\n')
for contig in node2pred:
file_out.write(f'{contig},')
cnt = 1
for label, score in node2pred[contig]:
if cnt > inputs.topk:
break
cnt+=1
file_out.write(f'{label},{score:.3f},')
if contig in crispr_pred:
file_out.write(f'CRISPR')
else:
file_out.write(f'Predict')
file_out.write('\n')
tmp_pred = pd.read_csv(f"{rootpth}/{midfolder}/cherry_mid_predict.csv")
name_list = pd.read_csv(f"{rootpth}/{midfolder}/cherry_name_list.csv")
prediction = tmp_pred.rename(columns={'Contig':'idx'})
contig_to_pred = pd.merge(name_list, prediction, on='idx')
contig_to_pred = contig_to_pred.rename(columns={'Contig': 'Accession'})
#contig_to_pred = contig_to_pred.drop(columns=['idx'])
contig_to_pred.to_csv(f"{rootpth}/{midfolder}/cherry_prediction.csv", index = None)
all_Contigs = contig_to_pred['Accession'].values
all_Pred = contig_to_pred['Top_1_label'].values
all_Score = contig_to_pred['Score_1'].values
all_Type = contig_to_pred['Type'].values
# add no prediction (Nov. 13th)
all_acc_cherry = contig_to_pred['Accession'].values
phamer_df = pd.read_csv(f'{rootpth}/{out_dir}/phamer_prediction.csv')
phage_contig = phamer_df[phamer_df['Pred']=='phage']['Accession'].values
unpredict_contig = []
for contig in phage_contig:
if contig not in all_acc_cherry:
unpredict_contig.append(contig)
all_Contigs = np.concatenate((all_Contigs, np.array(unpredict_contig)))
all_Pred = np.concatenate((all_Pred, np.array(['unknown']*len(unpredict_contig))))
all_Score = np.concatenate((all_Score, np.array([0]*len(unpredict_contig))))
all_Type = np.concatenate((all_Type, np.array(['-']*len(unpredict_contig))))
contig_to_pred = pd.DataFrame({'Accession': all_Contigs, 'Pred': all_Pred, 'Score': all_Score, 'Type': all_Type})
contig_to_pred.to_csv(f"{rootpth}/{out_dir}/cherry_prediction.csv", index = None)
#### draw network
if os.path.isfile(os.path.join(rootpth, midfolder, 'phagcn_graph.csv')):
drop_network('phagcn', rootpth, midfolder, db_dir, out_dir)
if os.path.isfile(os.path.join(rootpth, midfolder, 'cherry_graph.csv')):
drop_network('cherry', rootpth, midfolder, db_dir, out_dir)
#### download files
# protein files
blast_df = pd.read_csv(f"{rootpth}/{midfolder}/phamer_results.abc", sep=' ', names=['query', 'ref', 'evalue'])
protein2evalue = parse_evalue(blast_df, f'{rootpth}/{midfolder}', 'phamer')
rec = []
for record in SeqIO.parse(f'{rootpth}/{midfolder}/test_protein.fa', 'fasta'):
try:
protein2evalue[record.id]
rec.append(record)
except:
pass
SeqIO.write(rec, f'{rootpth}/{out_dir}/significant_proteins.fa', 'fasta')
os.system(f"cp {rootpth}/{midfolder}/phamer_results.tab {rootpth}/{out_dir}/blast_results.tab")
os.system(f"sed -i '1i\qseqid\tsseqid\tpident\tlength\tmismatch\tgapopen\tqstart\tqend\tsstart\tsend\tevalue' {rootpth}/{out_dir}/blast_results.tab")
| KennthShang/PhaBOX | main.py | main.py | py | 55,258 | python | en | code | 16 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
... |
17087609247 | """
Author: Bradley Fernando
Purpose: Uses Netmiko to connect to devices directly instead of using the
plugin. Cisco devices also establish connections via SSH keys.
Usage:
python exercise3.py
Output:
cisco4#
cisco3#
nxos2#
arista2#
arista3#
pyclass@srx1>
arista1#
arista4#
nxos1#
"""
from nornir import InitNornir
def netmiko_direct(task):
if "ios" in task.host.platform:
task.host.username = "student1"
net_connect = task.host.get_connection("netmiko", task.nornir.config)
print(net_connect.find_prompt())
def main():
nr = InitNornir(config_file="config.yaml")
nr.run(task=netmiko_direct)
if __name__ == "__main__":
main()
| bfernando1/nornir-automation | week7/exercise3/exercise3.py | exercise3.py | py | 716 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "nornir.InitNornir",
"line_number": 33,
"usage_type": "call"
}
] |
17422403783 | import json
from glob import glob
from pathlib import Path
from typing import Any, Dict, List, Optional, Sequence, Union
import numpy as np
import torch
import yaml
from PIL import Image
from sklearn.model_selection import train_test_split
from termcolor import colored
from torch.utils.data import Dataset
from torchvision.transforms import ToTensor
from tqdm import tqdm
from ..metrics import ComputeMetrics
from .utils import Augmentator
def red_bold(x):
return colored(f"{x}", "red", attrs=["bold"])
def blue_bold(x):
return colored(f"{x}", "blue", attrs=["bold"])
class KvasirDatasetBase(Dataset):
"""
Basic Dataset class for Kvasir-SEG
Initialises dataset with images and masks
Attributes
----------
says_str : str
a formatted string to print out what the animal says (default 45)
"""
def __init__(
self,
root_dir: Union[str, Path],
bboxes_file: Optional[str] = "kavsir_bboxes.json",
image_format: str = "jpg",
mask_format: str = "jpg",
augment_conf: Union[Dict[str, Any], Union[str, Path]] = {},
) -> None:
self.image_format = image_format
self.mask_format = mask_format
self.root_dir = Path(root_dir)
self.images_dir = self.root_dir / "images"
self.masks_dir = self.root_dir / "masks"
self.bboxes_file = self.root_dir / bboxes_file
# prepare data
self.data_images = {}
for name in tqdm(glob(f"{self.images_dir}/*.{self.image_format}")):
try:
name_key = Path(name).name.split(".")[0]
image_raw = Image.open(name)
self.data_images[name_key] = {"pil_image_raw": image_raw}
except Exception:
print(f"{red_bold('error during reading image:')}: {name}")
for name in tqdm(glob(f"{self.masks_dir}/*.{self.mask_format}")):
try:
name_key = Path(name).name.split(".")[0]
if name_key not in self.data_images:
print("key error")
self.data_images[name_key]
image_mask = Image.open(name)
self.data_images[name_key]["pil_image_mask"] = image_mask
size_image = self.data_images[name_key]["pil_image_raw"].size
size_mask = self.data_images[name_key]["pil_image_mask"].size
assert (
size_image == size_mask
), f"the mask and image size should be the same, name: {name_key}"
except Exception:
print(f"{red_bold('error during reading image:')}: {name}")
self.data_names = list(self.data_images.keys())
self.data_names.sort()
# prepare bboxes
try:
with open(self.bboxes_file) as f:
bboxes = json.load(f)
for name_key in bboxes:
if name_key in self.data_images:
self.data_images[name_key]["bboxes"] = bboxes.get(name_key)
except FileNotFoundError:
print(
f"Warning! {self.bboxes_file} file not found, dataset will be without bboxes"
)
except KeyError:
print(f"Warning! {self.bboxes_file} doesn't consist of bbox for {name_key}")
# prepare augmentations
self.augmentator = Augmentator(augment_conf)
self.to_tensor = ToTensor()
def set_up_augmentator(
self, augment_conf: Union[Dict[str, Any], Union[str, Path]] = {}
) -> None:
self.augmentator = Augmentator(augment_conf)
def __getitem__(
self, idx: int, return_bboxes: bool = True
) -> Dict["str", Union[torch.Tensor, List[torch.Tensor]]]:
input_tensor = self.data_images[self.data_names[idx]].get("pil_image_raw")
target_mask_tensor = self.data_images[self.data_names[idx]].get(
"pil_image_mask"
)
input_tensor = np.array(input_tensor)
target_mask_tensor = np.array(target_mask_tensor)
augmentations = self.augmentator()
transformed = augmentations(image=input_tensor, mask=target_mask_tensor)
input_tensor = transformed["image"]
target_mask_tensor = transformed["mask"]
input_tensor = self.to_tensor(input_tensor)
target_mask_tensor = self.to_tensor(target_mask_tensor)[0, :, :]
target_mask_tensor = target_mask_tensor == 1.0
target_mask_tensor = target_mask_tensor.long()
result = {"input": input_tensor, "target": target_mask_tensor}
if "bboxes" in self.data_images[self.data_names[idx]] and return_bboxes:
curr_bboxes = self.data_images[self.data_names[idx]].get("bboxes")
height_orig = curr_bboxes["height"]
weight_orig = curr_bboxes["width"]
normalized_bboxes = []
for bbox in curr_bboxes["bbox"]:
normalized_bboxes.append(
torch.tensor(
[
bbox["xmin"] / weight_orig,
bbox["ymin"] / height_orig,
bbox["xmax"] / weight_orig,
bbox["ymax"] / height_orig,
]
)
)
result["bboxes"] = normalized_bboxes
object_area = target_mask_tensor.sum()
image_area = np.product(target_mask_tensor.shape[-2:])
relative_area = object_area / image_area
result["relative_area"] = relative_area
result["name"] = self.data_names[idx]
return result
def __len__(self) -> int:
return len(self.data_images)
class KvasirDataset(KvasirDatasetBase):
def __init__(self, names: Optional[List[str]] = None, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
if names is not None:
self.data_names = names
self.data_names.sort()
names_current = list(self.data_images.keys())
for k in names_current:
if k not in self.data_names:
self.data_images.pop(k)
def __getitem__(self, idx) -> Dict["str", Union[torch.Tensor, List[torch.Tensor]]]:
result = super().__getitem__(idx, return_bboxes=False)
return result
def load_dataset(resource: Union[Path, str, Dict[str, Any]]) -> KvasirDataset:
config = resource
if isinstance(resource, Path) or isinstance(resource, str):
resource = Path(resource)
with open(resource, "r") as file:
config = yaml.safe_load(file)
assert "data_config" in config, f"wrong resource {resource} construction"
path_to_dataset = config["data_config"]["path_to_dataset"]
augment_config_path = config["data_config"]["augment_config"]
dataset = KvasirDataset(root_dir=path_to_dataset, augment_conf=augment_config_path)
return dataset
def load_datasets(
resource: Union[Path, str, Dict[str, Any]]
) -> Sequence[KvasirDataset]:
"""
loss loader from configureation, suppose that resource
contains field 'loss_config'
"""
config = resource
if isinstance(resource, Path) or isinstance(resource, str):
resource = Path(resource)
with open(resource, "r") as file:
config = yaml.safe_load(file)
assert "data_config" in config, f"wrong resource {resource} construction"
path_to_dataset = config["data_config"]["path_to_dataset"]
augment_config_path_train = config["data_config"]["augment_config"]["train"]
augment_config_path_test = config["data_config"]["augment_config"]["test"]
seed = config["data_config"]["train_test_split"]["seed"]
lens = config["data_config"]["train_test_split"]["lens"]
dataset = KvasirDatasetBase(
root_dir=path_to_dataset, augment_conf=augment_config_path_train
)
compute_metrics = ComputeMetrics(config)
name_group = [
[
item["name"],
compute_metrics.compute_group_area_by_relarea(item["relative_area"].item()),
]
for item in dataset
]
X = [n[0] for n in name_group]
y = [n[1] for n in name_group]
test_size = lens[1] / (sum(lens))
train_names, test_names, train_y, test_y = train_test_split(
X, y, test_size=test_size, random_state=seed, stratify=y
)
dataset_train = KvasirDataset(
names=train_names,
root_dir=path_to_dataset,
augment_conf=augment_config_path_train,
)
dataset_test = KvasirDataset(
names=test_names,
root_dir=path_to_dataset,
augment_conf=augment_config_path_test,
)
return dataset_train, dataset_test
| GerasimovIV/kvasir-seg | src/data_utils/dataset.py | dataset.py | py | 8,726 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "termcolor.colored",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "termcolor.colored",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "torch.utils.data.Dataset",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "typing.U... |
27166856149 | import logging
from typing import List
import requests
from config import settings
from models.media import Media
class YouTubeService:
API_URL = 'https://www.googleapis.com/youtube/v3/'
def search_video(self, query: str) -> List[Media]:
search_url = self.API_URL + 'search'
search_params = {
'key': settings.YOUTUBE_API_KEY,
'part': 'id,snippet',
'order': 'relevance',
'q': query,
'type': 'video',
}
try:
response = requests.get(search_url, params=search_params)
except Exception as e:
logging.error(e, exc_info=True)
raise Exception('YouTube search request failed.')
try:
data = response.json()
except Exception as e:
logging.error(e, exc_info=True)
raise Exception("YouTube search didn't return a valid JSON.")
try:
return [Media(media_id=item['id']['videoId'], title=item['snippet']['title'])
for item in data['items']]
except (KeyError, TypeError, ValueError) as e:
logging.error(e, exc_info=True)
raise Exception("YouTube search didn't return the expected response.")
| pythrick/playlist-bot | src/services/youtube.py | youtube.py | py | 1,252 | python | en | code | 4 | github-code | 1 | [
{
"api_name": "config.settings.YOUTUBE_API_KEY",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "config.settings",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "requests.get",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "log... |
12446665048 | import math
import pandas as pd
import numpy as np
from sklearn.preprocessing import MinMaxScaler
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
df = pd.read_csv('S&P500.csv')
#Select date variable
data = df.filter(['Adj Close'])
data = data.values
#Get the number of rows to train the model on
training_data_len = math.ceil( len(data) * .8 )
scaler = MinMaxScaler(feature_range=(0,1))
scaled_data = scaler.fit_transform(data)
data_train = scaled_data[0:training_data_len , :]
x_train = []
y_train = []
for i in range(5, len(data_train)):
x_train.append(data_train[i-5:i, 0])
y_train.append(data_train[i, 0])
x_train, y_train = np.array(x_train), np.array(y_train)
linear_reg = LinearRegression()
# Train the model
linear_reg.fit(x_train, y_train)
#Create the testing data set
data_test = scaled_data[training_data_len - 5: , :]
#Create the data sets x_test and y_test
x_test = []
y_test = scaled_data[training_data_len:, :]
for i in range(5, len(data_test)):
x_test.append(data_test[i-5:i, 0])
#Convert the data to a numpy array
x_test = np.array(x_test)
y_test = np.array(y_test)
predictions = linear_reg.predict(x_test)
rmse = np.sqrt(np.mean(((predictions - y_test)**2)))
print ('rmse = ', rmse)
std = np.std(predictions - y_test)
print ('std = ', std)
print ('r2 = ', r2_score(y_test, predictions))
| Susannnn/Stock-Price-Prediction | LR.py | LR.py | py | 1,369 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pandas.read_csv",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "math.ceil",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.MinMaxScaler",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "numpy.ar... |
42426994810 | """
░░░░░░░░░░░██╗░░░░░░░██╗██╗░░░██╗██████╗░██████╗░███████╗░░░░░░██████╗░░█████╗░██████╗░██████╗░░░░░░░░░░░░
░░░░░░░░░░░██║░░██╗░░██║██║░░░██║██╔══██╗██╔══██╗╚════██║░░░░░░██╔══██╗██╔══██╗██╔══██╗██╔══██╗░░░░░░░░░░░
░░░░░░░░░░░╚██╗████╗██╔╝██║░░░██║██║░░██║██║░░██║░░███╔═╝█████╗██║░░██║███████║██████╔╝██████╔╝░░░░░░░░░░░
░░░░░░░░░░░░████╔═████║░██║░░░██║██║░░██║██║░░██║██╔══╝░░╚════╝██║░░██║██╔══██║██╔═══╝░██╔═══╝░░░░░░░░░░░░
░░░░░░░░░░░░╚██╔╝░╚██╔╝░╚██████╔╝██████╔╝██████╔╝███████╗░░░░░░██████╔╝██║░░██║██║░░░░░██║░░░░░░░░░░░░░░░░
░░░░░░░░░░░░░╚═╝░░░╚═╝░░░╚═════╝░╚═════╝░╚═════╝░╚══════╝░░░░░░╚═════╝░╚═╝░░╚═╝╚═╝░░░░░╚═╝░░░░░░░░░░░░░░░░
[*]Descr: ERC20 DAPP, CREATE ACCOUNTS, GET ALL TOKEN BALANCES & USD VALUE FOR AN ACCOUNT/WALLET,
MAKE TRANSACTIONS, INTERACT SMART CONTRACTS, SWAP ERC20 TOKENS, GET CURRENT CRYPTO PRICES,
AUTHENTICATE TO EXCHANGE ACCOUNT VIA API, CONVERT CRYPTO TO CRYPTO VALUE AND BASE64 DECODER
[*]Coder: Wuddz_Devs
[*]Email: wuddz_devs@protonmail.com
[*]Github: https://github.com/wuddz-devs
[*]Reddit: https://reddit.com/users/wuddz-devs
[*]Twitter: https://twitter.com/wuddz_devs
[*]Telegram: https://t.me/wuddz_devs
[*]Videos: https://mega.nz/folder/IWVAXTqS#FoZAje2NukIcIrEXXKTo0w
[*]Youtube: https://youtube.com/@wuddz-devs
[*]Donation:
BTC -> bc1qa7ssx0e4l6lytqawrnceu6hf5990x4r2uwuead
ERC20 -> 0xbF4d5309Bc633d95B6a8fe60E6AF490F11ed2Dd1
LTC -> LdbcFiQVUMTfc9eJdc5Gw2nZgyo6WjKCj7
TRON -> TY6e3dWGpqyn2wUgnA5q63c88PJzfDmQAD
DOGE -> DFwLwtcam7n2JreSpq1r2rtkA48Vos5Hgm
[*]Menu:
1 => Create A New Ethereum Account
2 => Check Account Balance(s)
3 => Send/Deposit To An Account
4 => Get Account Address & Balance(s) From Private Key Or Mnemonic Seed
5 => Get Transaction Hash Attributes
6 => Compile & Deploy Smart Contract To Blockchain
7 => Interact, Read & Execute Smart Contract Functions
8 => Verify Deployed Smart Contract On Etherscan/Polygonscan
9 => Swap/Purchase ERC20 Tokens Using 0x Api
x => Interact With Exchange Account (Authentication ApiKey, ApiSecret, ApiPassword etc...)
d => Decode Base64 String
p => Crypto Price & Conversion
n => Choose Blockchain Network
e => Exit Program
"""
import docs, re, sys, json, base64, requests, warnings
from secrets import choice
from platform import system as _ps
from importlib.machinery import SourceFileLoader
from shutil import copy as Cp
from pycoingecko import CoinGeckoAPI
from time import sleep
from pathlib import Path
from web3 import Web3, EthereumTesterProvider
from subprocess import call
from os import system, _exit
warnings.simplefilter(action='ignore', category=FutureWarning)
system('')
class Dapp:
def __init__(self):
"""Connect & Interact With Various Ethereum Blockchains Using Specified Testnet/Mainnet Networks."""
self.da=''
self.pk=''
self.nw=None
self.node_url=''
self.name=_ps()
self.cg=CoinGeckoAPI()
self.bt={
'celo': 'CELO', 'starknet': 'STRK',
'aurora': 'AURORA', 'near': 'NEAR',
'avalanche': 'AVAX', 'palm': 'PALM',
'arbitrum': 'ARB', 'optimism': 'OP',
'polygon': 'MATIC', 'bsc': 'BNB'
}
self.ss='abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'
self.sw='https://{}api.0x.org/swap/v1/quote?buyToken={}&sellToken={}&{}Amount={}&slippagePercentage={}&takerAddress={}'
self.hs='\n\033[1;34;40m{}\nTx_Hash: {}\033[0m'
self.ns='\n\033[1;34;40m current network: {} | connected: {}\n'
self.nh='\n\033[1;34;40mTransaction Completed Successfully\nTx_Hash: {}\033[0m'
self.at='\n\033[1;32;40mDo You Approve Transaction?\nInput y or n=> '
pkg=str(Path.home().expanduser().joinpath('Desktop','DAPP'))
if not Path(pkg).exists:Path(pkg).mkdir(parents=True, exist_ok=True)
self.dp=Path(pkg).joinpath('config.py')
self.cf=Path(pkg).joinpath('contract_info.txt')
self.kf=Path(pkg).joinpath('key_file.txt')
self.kd=Path(pkg).joinpath('key_data.txt')
self.tr=Path(pkg).joinpath('tx_receipts.txt')
self.lf=Path(pkg).joinpath('trade-log.txt')
self.to=Path(pkg).joinpath('trade_output.txt')
self.dc=(Path(__file__).absolute().parent).joinpath('dapp_config.py')
if not Path(self.dp).exists():Cp(self.dc, self.dp)
self.dcfg=self.config_import()
self.web3=Web3(EthereumTesterProvider)
self.web3.eth.account.enable_unaudited_hdwallet_features()
def _fwrite(self, fn: str, data):
"""
Writes Data To Specified File.
:param fn: File To Write Data To
:param data: Data To Write To File
"""
with open(fn, 'a', encoding='utf-8') as fw:
fw.write(data)
def _fread(self, fn: str):
"""
Returns Data Read From Specified File.
:param fn: File To Read Data From
"""
with open(fn, 'r', encoding='utf-8') as fr:
return fr.read()
def nw_status(self) -> bool:
"""Returns Web3 Connection Status True Or False."""
try:
return self.web3.is_connected()
except:return False
def config_import(self):
"""Returns `config.py` Module Imported From Wuddz-Dapp Output Folder."""
mn='config'
sp=SourceFileLoader(mn, str(self.dp))
cm=sp.load_module()
return cm
def pexit(self):
"""Clear CLI Screen & Exit Program."""
self.clear_screen()
_exit(0)
def get_menu(self, m: str='', c: bool=True):
"""
Print Output To Screen & Wait For User Input.
:param m: Optional String To Print To Screen, If String Is `e` Prints `*Error Occurred*`
:param c: Optional Bool To Clear CLI Screen Or Not Defaults To True
"""
if c:self.clear_screen()
if m:
a='\033[1;32;40m[*]OUTPUT:\n\n'
if m=='e':m='\033[1;31;40m*Error Occurred*'
m=a+m
input(m+'\n\n\033[1;32;40m...Hit Enter|Return Key To Continue....\033[0m\n')
def slow_print(self, doc: str, sp: float=0.0005):
"""
Print String By Speed In Seconds Less Is Faster.
:param doc: String To Be Printed
:param sp: Speed To Print String `e.g 0.0001 or 0.0005 used`
"""
for d in doc:
sys.stdout.write(f"\033[1;32;40m{d}")
sleep(sp)
def clear_screen(self):
"""Clear Command Line Screen."""
if self.name=='Linux':system('clear')
elif self.name=='Windows':system('cls')
elif self.name=='Darwin':system("printf '\\33c\\e[3J'")
def decode_bsf(self, s: str):
"""
Prints Decoded Base64 String To Screen.
:param s: Encoded Base64 String e.g `3R1ZGVudCBub2l=`
"""
self.get_menu('\033[1;34;40m'+str(base64.b64decode(s).decode('utf-8')))
def func_menu(self, c: str, i: str, n: str=None, f: str=None):
"""
Loads Menu To Return Input.
:param c: Menu Banner Type Docstring
:param i: String Specifying Menu Input
:param n: String To Specify Network Choice In Menu Defaults To None
:param f: String Specifying Method To Execute Defaults To None
"""
while True:
try:
self.clear_screen()
d=f'\033[1;32;40m{c}\n\033[0mInput {i}=> '
if n:d=f'\033[1;32;40m{c}{self.ns.format(self.nw,self.nw_status())}\n\033[0mInput {i}=> '
a=input(d) or None
if a=='b':break
elif a=='e':self.pexit()
elif n and a=='n':self.block_network()
elif a and f:eval(f'self.{f}(a)')
elif a:return a
except Exception as e:self.get_menu(f'\033[1;31;40m{e}')
def get_price(self, i: str='ethereum'):
"""
Return Price Of Token In Usd Using CoinGecko API Token ID.
:param i: Coingecko Token Id To Get Usd Value Defaults To BTC e.g `ethereum`
"""
return self.cg.get_price(i,'usd')[i]['usd']
def js_resp(self, u: str, h: dict={}):
"""
Returns Url Request Response
:param u: Url To Call
:param h: Headers As Type Dictionary
"""
return requests.get(u, headers=h, verify=None)
def account_auth(self):
"""Loads Menu To Return Eth Account Object & Private Key From Input, Mnemonic String Or File."""
try:
pp=self.func_menu(docs.aa, 'Authentication', 'n')
if not pp:return 'b',None
if len(pp) in [64,66] or ' ' in pp:pk=pp
elif len(pp)==44:pk=self.pkey_file(pp)
da=self.account_key(pk, x='d')
return da, pk
except:pass
def account_key(self, s: str, x: str=None):
"""
Returns Address Or Prints Address Balance To Screen.
:param s: Mnemonic String Or Private Key
:param x: String To Specify Print Balance To Screen If Specified
"""
if ' ' in s:acct=self.mnemonic_str(s)
else:acct=self.pkey_str(s)
if x:return acct
bal=self.bal_main(acct.address)
def mnemonic_str(self, m: str):
"""
Returns Web3 Eth Account Object From Mnemonic String.
:param m: Mnemonic String Of Account
"""
return self.web3.eth.account.from_mnemonic(m)
def pkey_str(self, k: str):
"""
Returns Web3 Eth Account Object From Private Key.
:param k: Private Key Of Account
"""
return self.web3.eth.account.from_key(k)
def account_create(self):
"""Creates A Web3 Eth Account, Writes Encrypted Data & Base64 Encoded Info To Output Files & Prints Address To Screen."""
psd=''.join(choice(self.ss) for i in range(32))
pd=base64.urlsafe_b64encode(bytes(psd, 'utf-8'))
acct, mnemonic=self.eth_wallet()
encrypted=self.web3.eth.account.encrypt(acct.key, psd)
pk=base64.urlsafe_b64encode(bytes(self.web3.to_hex(acct.key), 'utf-8'))
mn=base64.urlsafe_b64encode(bytes(mnemonic, 'utf-8'))
self._fwrite(self.kf, f'{pd[:14]} {json.dumps(encrypted)}\n')
self._fwrite(self.kd, f'Account: {acct.address}\nPrivate_Key: {pk}\nPrivate_Key_Password: {pd}\nMnemonic: {mn}\n\n')
self.get_menu(f'\033[1;34;40mAccount_Created=> {acct.address}')
def pkey_file(self, p: str):
"""
Returns Private Key From File Containing Encrypted Account Data Using Password.
:param p: Password To Decrypt Account Data
"""
psd=str(base64.b64decode(p).decode('utf-8'))
line=re.search("b'"+p[:14]+"' (.*)",self._fread(self.kf)).group(1)
return self.web3.to_hex(self.web3.eth.account.decrypt(line, psd))
def crypto_price(self):
"""Loads Menu To Get Crypto Price & Conversion Using CoinGecko API."""
while True:
try:
self.clear_screen()
cc=self.func_menu(docs.cp, 'Choice')
if not cc:break
ccl=cc.split()
cd=self.cg.get_coin_by_id(ccl[1])
cpa=self.cg.get_price(cd['id'],'usd')[str(cd['id'])]['usd']
crp="${:,.2f}".format(float(ccl[0])*float(cpa))
syma=cd['symbol'].upper()
if len(ccl)==2:self.get_menu(f"\033[1;34;40m{cc.replace(ccl[1],syma)} => {crp} USD")
elif len(cc.split())==3:
cb=self.cg.get_coin_by_id(ccl[2])
symb=cb['symbol'].upper()
cpb=self.cg.get_price(ccl[2],'usd')[str(ccl[2])]['usd']
cvv=float(ccl[0])*(float(cpa)/float(cpb))
self.get_menu(f"\033[1;34;40m{ccl[0]} {syma} => {cvv} {symb}\nTotal Value => {crp} USD")
except requests.exceptions.ConnectionError:
self.get_menu(f'\033[1;31;40mNo Connection Error!!')
except:self.get_menu(f'\033[1;31;40m{cc} Not Valid!!')
def eth_value(self, a: str):
"""
Returns Balance Of Address In Ether Denomination.
:param a: Address To Get Balance For
"""
b=self.web3.eth.get_balance(self.web3.to_checksum_address(a))
return self.web3.from_wei(b, 'ether')
def ens_addr(self, a: str) -> str:
"""
Return Ens Name Or Address For Specified Ens Name.
:param a: Ens Name Or Address e.g `vitalik.eth | 0xd8dA6BF26964aF9D7eEd9e03E53415D37aA96045`
"""
try:
f='name'
if '.eth' in a:f='address'
adr=eval(f'self.web3.ens.{f}("{a}")')
if f=='name':adr=re.search('\S+.eth',adr).group()
return adr
except:return a
def tkn_address(self, s: str, n: str='mainnet') -> str:
"""
Returns Token Contract Address From Dapp_Config Token Info If Available.
:param s: Symbol Of Token To Retrieve Contract Address e.g `BNB`
:param e: First Letter Of Token To Retrieve Contract Address e.g `BNB`
"""
if n not in ['mainnet', 'polygon', 'goerli', 'sepolia', 'mumbai']:return
return self.dcfg.tcd[n][[i for i in list(self.dcfg.tcd[n].keys()) if i==s.upper()][0]]
def bal_main(self, a: str):
"""
Prints Address Balance(s) To Screen From API Or Contract Call.
:param a: Address To Get Balance(s) For
"""
enn=''
tca=''
ttt=self.bt.get(self.nw)
if not ttt:ttt='ETH'
try:
l=a.split()
if len(l)==1:addr=l[0]
elif len(l)==2:
addr=l[0]
if len(l[1])==42:tca=l[1]
else:
tca=self.tkn_address(l[1], n=self.nw)
assert self.web3.to_checksum_address(tca)
addr=self.ens_addr(l[0])
if '.eth' in addr:
enn=addr
addr=l[0]
elif '.eth' in a:enn=a
base=self.eth_value(addr)
if self.nw not in ['goerli','mumbai','sepolia','ganache','bsctestnet','development']:
tot=0
bs=''
nw=self.nw[0].upper()+self.nw[1:]
url=f'https://api.ethplorer.io/getAddressInfo/{addr}?showTxsCount=true&apiKey=freekey'
if tca:url=f'https://api.ethplorer.io/getAddressInfo/{addr}?token={tca}&showETHTotals=false&showTxsCount=true&apiKey=freekey'
bjs=self.js_resp(url).json()
prc=bjs['ETH']['price']['rate']
ethv=bjs['ETH']['balance']
txc=bjs['countTxs']
if bjs.get('tokens'):tot,bs=self.bal_sub(bjs)
eus=float(prc)*float(ethv)
usdt="${:,.2f}".format(tot+eus)
eusd="${:,.2f}".format(eus)
j=f'\033[1;34;40mAccount: {addr}\nEthereum: {ethv} ETH \033[1;32;40m({eusd} USD)\n'
if ttt!='ETH':j=f'\033[1;34;40mAccount: {addr}\n{nw}: {base} {ttt}\nEthereum: {ethv} ETH \033[1;32;40m({eusd} USD)\n'
if enn:j=j+f'\033[1;34;40mEnsName: {enn}\n'
self.get_menu(bs+j+f'\033[1;34;40mTotal Value: \033[1;32;40m{usdt} USD\n\033[1;34;40mTotal Transactions: {txc}')
else:
if tca:c,ethv,ttt,tkn,d=self.contract_info(str(tca), chk=addr)
self.get_menu(f'\033[1;34;40mAccount: {addr}\nBalance: {base} {ttt}')
except:self.get_menu('e')
def bal_sub(self, d: dict) -> int:
"""
Returns Total USD Value Of All Tokens API Dictionary Response & Prints Each Iterated Token & Balance To Screen.
:param d: Ethplorer API Response As Json Dictionary
"""
tot=[]
bl=''
for i in range(len(d['tokens'])+1):
usdc='0'
try:
nam=d['tokens'][i]['tokenInfo']['name']
sym=d['tokens'][i]['tokenInfo']['symbol']
dec=d['tokens'][i]['tokenInfo']['decimals']
rbal=d['tokens'][i]['rawBalance']
bal=rbal
if str(dec).isdigit():bal=int(rbal)/10**int(dec)
if d['tokens'][i]['tokenInfo']['price']:
pri=d['tokens'][i]['tokenInfo']['price']['rate']
usd=float(pri)*int(bal)
tot.append(usd)
usdc="{:,.2f}".format(usd)
bl+=f'\033[1;34;40m{nam}: {bal} \033[1;32;40m(${usdc} USD) \033[1;34;40m{sym}\033[0m\n\n'
except:pass
return sum(tot),bl
def bal_fkey(self):
"""Loads Menu To Print Balance & Address Of Token From Authentication."""
while True:
try:
a,k=self.account_auth()
if a=='b':break
self.bal_main(a.address)
except:self.get_menu('e')
def trx_hash(self, h: str, o=None):
"""
Prints OR Writes Transaction Hash Info To Screen/Output File.
:param h: Transaction Hash
:param o: String To Specify Write Transaction Info To File Defaults To None
"""
txr=dict(self.web3.eth.get_transaction(h))
txr['gas']=format(self.web3.from_wei(txr['gas'], 'ether'),'f')
txr['gasPrice']=format(self.web3.from_wei(txr['gasPrice'], 'ether'),'f')
txr['value']=self.web3.from_wei(txr['value'], 'ether')
txr['blockHash']=self.web3.to_hex(txr['blockHash'])
txr['hash']=self.web3.to_hex(txr['hash'])
txr['r']=self.web3.to_hex(txr['r'])
txr['s']=self.web3.to_hex(txr['s'])
if o:self._fwrite(self.tr, f'{self.node_url}\n{json.dumps(txr, indent=4, default=str)}\n\n')
else:self.get_menu('\033[1;34;40m'+json.dumps(txr, indent=4, default=str))
def block_network(self, nc: str=None):
"""
Sets Or Loads Menu To Select & Set Specified Blockchain Network.
:param nc: String To Specify Blockchain Network e.g `mainnet | goerli`
"""
d={'1':'mainnet','2':'sepolia','3':'goerli',
'4':'linea','5':'celo','6':'starknet',
'7':'aurora','8':'near','9':'avalanche',
'10':'palm','11':'arbitrum', '12':'optimism',
'13':'polygon', '14':'mumbai', '15':'bsc',
'16':'bsctestnet', 'g':'ganache', 'd':'development'}
try:
if not nc:nc=self.func_menu(docs.nwl, 'Network Choice')
if d.get(nc):self.nw=str(d[nc])
else:self.nw='mainnet'
self.node_url=eval(f'self.dcfg.{self.nw}')
self.web3=Web3(Web3.HTTPProvider(self.node_url))
except:pass
def account_deposit(self):
"""Loads Menu To Create, Approve & Sign Deposit Transaction."""
while True:
try:
ttt='ETH'
acc, pk=self.account_auth()
if acc=='b':break
nonce=self.web3.eth.get_transaction_count(acc.address)
dtx=self.func_menu(docs.der, 'Transfer Format', n='n')
if not dtx:break
dpa,amnt,tca=dtx.split()
if tca.lower()!='eth':
if len(tca)!=42:
tca=self.tkn_address(tca, n=self.nw)
assert self.web3.to_checksum_address(tca)
c,b,ttt,n,dec=self.contract_info(tca)
amt=float(amnt)*(10**int(dec))
else:amt=self.web3.to_wei(amnt, 'ether')
assert self.web3.to_checksum_address(dpa)
if ttt=='ETH':tx=self.transfer_eth(dpa, amt, nonce)
else:tx=self.transfer_tkn(contract, dpa, amt, nonce)
tc='\n\033[1;32;40mTransaction:\033[1;34;40m\n{}\n{}\n'
td='Deposit Amount: {} {}\nFrom: {}\nTo: {}'.format(str(amnt), ttt, acc.address, dpa)
at=tc.format(json.dumps(tx, indent=2, default=str),td)
ap=input(f'{at}{self.at}') or 'n'
if ap=='y':self.sign_tx(tx,pk)
except:self.get_menu('e')
def sign_tx(self, tx: dict, k: str, hs: str=None, cd: str=None):
"""
Signs Transaction With Provided Private Key, Returns Or Prints To Screen & Writes Tx Hash & Receipt To Output File.
:param tx: Unsigned Transaction As Type Dictionary
:param k: Private Key To Sign Transaction
:param hs: Optional String If Specified Prints Transaction Hash To Screen Defaults To None
:param cd: Optional String If Specified Returns Transaction Receipt & Hash Defaults To None
"""
signed_tx=self.web3.eth.account.sign_tx(tx, k)
tx_hash=self.web3.eth.send_raw_transaction(signed_tx.rawTransaction)
tx_receipt=self.web3.eth.wait_for_transaction_receipt(tx_hash)
th=self.web3.to_hex(tx_receipt.transactionHash)
if cd:return tx_receipt,th
elif hs:print(self.hs.format(hs,th))
else:self.get_menu(self.nh.format(th))
self.trx_hash(th,'t')
def get_contract(self, abi: list, ca: str=None, bc: str=None):
"""
Returns Web3 Contract Instance For Specified Contract Address.
:param abi: ABI List For Specified Contract `ca`
:param ca: Contract Address Defaults To None
:param bc: Contract ByteCode Defaults To None
"""
if ca:return self.web3.eth.contract(abi=abi, address=ca)
return self.web3.eth.contract(abi=abi, bytecode=bc)
def contract_info(self, tca: str, chk: str=None):
"""
Returns Web3 Contract Instance, Balance Of Contract Name For Address If Specified, Symbol, Name & Decimal Number For Contract Address.
:param tca: Contract Address
:param chk: Address To Get Balance Of Contract Address For Defaults To None
"""
ethv=''
contract=self.get_contract(self.dcfg.abi, ca=tca)
if chk:ethv=contract.functions.balanceOf(str(chk)).call()
dec=contract.functions.decimals().call()
sym=contract.functions.symbol().call()
nam=contract.functions.name().call()
if str(dec)!='0' and chk:ethv=ethv/10**dec
return contract,ethv,sym,nam,dec
def contract_rw(self, addr: str):
"""
Loads Menu To Interact, Call & Execute Contract Functions & Print Results.
:param addr: Verified Smart Contract Address
"""
abi=''
if self.nw=='ganache':abi=self.local_abi()
else:abi=self.remote_abi(addr, n=self.nw)
if abi:
contract=self.get_contract(abi, ca=addr)
cfs=[(str(c).split('>')[0]).split(' ')[1] for c in contract.all_functions()]
if cfs:
dec=''
if 'decimals()' in str(cfs):dec=contract.functions.decimals().call()
while True:
self.clear_screen()
print(f'\n\033[1;32;40m [*]ALL CONTRACT FUNCTIONS FOR => {addr}:\033[0m\n')
for c in cfs:
if '()' in c:c=self.contract_call(contract,c,dec=dec)
print(f'\033[1;34;40m{c}\033[0m')
fnc=input(f'\n\033[1;32;40m{docs.crw}\033[0m\nInput Choice=> ')
if fnc=='b':break
elif fnc=='c':self.convert_value()
elif f'{fnc}(' in str(cfs):
v=[c for c in cfs if f'{fnc}(' in c][0]
self.contract_sub(v,contract,dec)
else:self.get_menu('\033[1;31;40m*Contract Not Valid*')
else:self.get_menu('\033[1;31;40m*Contract Abi Not Found*')
def contract_sub(self, fnc: str, contract, dec: str):
"""
Execute Private Contract Function.
:param fnc: Function To Be Executed e.g `balanceOf`
:param contract: Contract Instance To Execute Functions
:param dec: Decimal Number For Contract
"""
if '()' in fnc:self.get_menu(self.contract_call(contract,fnc,dec=dec))
else:
et=''
fnc=fnc.split('(')[0]
fd,et=self.contract_data(fnc,contract.abi)
fo=f'{fnc}('
for k,v in fd.items():
if v:
if 'uint' in str(k):fo+=f'int("{v}"),'
else:fo+=f'"{v}",'
if fo[-1]!='(':
fp=f'{fo[:-1]})'
if str(et) not in ['pure', 'view']:
acc, pk=self.account_auth()
if acc=='b':return
nonce=self.web3.eth.get_transaction_count(acc.address)
gas=eval(fp).estimate_gas()
tx=eval(fp).build_transaction({'nonce': nonce, 'gas': gas, 'gasPrice': self.web3.eth.gas_price})
self.sign_tx(tx,pk)
else:self.get_menu(self.contract_call(contract,fp,dec=dec))
else:self.get_menu('e')
def contract_deploy(self, cn: str):
"""
Verifies & Deploys Smart Contract To Current Blockchain Network & Prints Transaction Results To Screen.
:param cn: Name Of Contract To Be Deployed
"""
fd=list(Path(self.dcfg.scdir).rglob(f'{cn}.json'))
if fd:fd=fd[0]
else:call(['truffle', 'compile'], cwd=self.dcfg.scdir)
ci=json.loads(self._fread(fd))
abi=ci['abi']
bc=ci['bytecode']
acc,pk=self.account_auth()
contract=self.get_contract(abi=abi, bc=bc)
tx=contract.constructor().build_transaction(
{
'nonce': self.web3.eth.get_transaction_count(acc.address),
'gasPrice': self.web3.eth.gas_price
}
)
txr,txh=self.sign_tx(tx,pk,cd='cd')
ca=txr.contractAddress
cna=f'{cn}@{ca}'
self._fwrite(self.cf, f'name: {cn}.json\nabi: {abi}\naddress: {ca}\nnetwork: {self.node_url}\n\n')
if self.nw!='ganache':call(['truffle', 'run', 'verify', cna, '--network', self.nw], cwd=self.dcfg.scdir)
self.trx_hash(txh,'t')
self.get_menu(f'\n\033[1;34;40mSmart_Contract_Address: {ca}')
def contract_call(self, contract, cf: str, dec: str=None):
"""
Prints Contract Function Call & Result To Screen If Valid.
:param contract: Contract Instance To Call Functions With
:param cf: Contract Function To Call e.g `balanceOf`
:param dec: Decimal Number For Token Contract Defaults To None
"""
try:
fnc=eval(f'contract.functions.{cf}.call()')
if dec and len(str(fnc))>int(dec):fnc=fnc/(10**int(dec))
return f'\033[1;34;40m{cf} = {fnc}\033[0m'
except:return 'e'
def convert_value(self):
"""Loads Menu To Convert To/From Wei."""
while True:
try:
self.clear_screen()
cv=input(f'\n\033[1;32;40m{docs.cwd}\n\033[0mInput Parameters=> ')
if cv=='b':break
cv=cv.split()
if cv[2]=='w':
cs="\033[1;34;40m{} => {}"
if cv[1]=='t':self.get_menu(cs.format('Wei', self.web3.to_wei(float(cv[0]), 'ether')))
elif cv[1]=='f':self.get_menu(cs.format('Eth', self.web3.to_wei(float(cv[0]), 'ether')))
elif str(cv[2]).isdigit():
if cv[1]=='t':val=float(cv[0])*10**int(cv[2])
else:val=float(cv[0])/10**int(cv[2])
self.get_menu(f"\033[1;34;40mValue => {val}")
except:pass
def remote_abi(self, a: str, n: str='mainnet'):
"""
Returns ABI Of Verified Contract Address Using Etherscan API.
:param a: Verified Contract Address
:param n: Network Verified Contract Address Is Deployed On e.g `goerli`
"""
abi=''
try:
cn=''
ed={'User-Agent': 'Mozilla/5.0', 'Host':'api.etherscan.io'}
hd={'User-Agent': 'Mozilla/5.0', 'Host':f'api-{n}.etherscan.io'}
md={'User-Agent': 'Mozilla/5.0', 'Host':'api-testnet.polygonscan.com'}
pd={'User-Agent': 'Mozilla/5.0', 'Host':'api.polygonscan.com'}
if n=='mainnet':
cn=requests.get(f'https://api.etherscan.io/api?module=contract&action=getabi&address={a}', headers=ed).text
elif n=='mumbai':
cn=requests.get(f'https://api-testnet.polygonscan.com/api?module=contract&action=getabi&address={a}', headers=md).text
elif n=='polygon':
cn=requests.get(f'https://api.polygonscan.com/api?module=contract&action=getabi&address={a}', headers=pd).text
elif n!='ganache':
cn=requests.get(f'https://api-{n}.etherscan.io/api?module=contract&action=getabi&address={a}', headers=hd).text
abi=json.loads(cn)['result']
except requests.exceptions.ConnectionError:
print(f'\033[1;31;40mNo Connection Error!!\033[0m')
return abi
def local_abi(self):
"""Loads Prompt To Return ABI Of Locally Stored Contract."""
abi=''
cnt=input('\nInput Contract Name Or Pass=> ')
if cnt:
ci=json.loads(self._fread(Path(self.dcfg.scdir).joinpath('build','contracts',f'{cnt}.json')))
abi=ci['abi']
return abi
def token_id(self):
"""Loads Menu To Search For Token Id Using CoinGecko API."""
while True:
try:
st=self.func_menu(docs.st, 'String')
if not st:break
sd=self.cg.search(st)['coins']
print('')
for i in range(len(sd)):
print("Id: {}, Name: {}, Symbol: {}".format(sd[i]['id'], sd[i]['name'], sd[i]['api_symbol']))
input('\n\n\033[1;32;40m...Hit Enter|Return Key To Continue....\033[0m\n') or None
except:pass
def eth_wallet(self):
"""Returns Tuple Containing Eth Account Object & Mnemonic String For Newly Created Account."""
return self.web3.eth.account.create_with_mnemonic()
def token_swap(self):
"""Loads Menu To Swap Tokens On Blockchain Of Choice."""
while True:
try:
acc, pk=self.account_auth()
if acc=='b':break
td={'1': '','2': 'goerli.',
'3': 'polygon.','4': 'mumbai.',
'5': 'bsc.','6': 'optimism.',
'7': 'fantom.','8': 'celo.',
'9': 'avalanche.','10': 'arbitrum.',
'11': 'base.'
}
self.clear_screen()
chain=input(f'\n\033[1;32;40m{docs.tsn}\033[0m\nInput Number=> ') or '1'
self.clear_screen()
ld={
'1 -> BuyToken [e.g DAI Or Token Smart Contract Address]':'',
'2 -> Slippage [e.g 0.01 = 1% slippage]':'0.01',
'3 -> SellToken [e.g ETH Or Token Smart Contract Address]':'ETH',
'4 -> SellAmount [e.g Sell Specified Amount Of Sell Token]':'',
'5 -> BuyAmount [e.g Buy Specified Amount Of Buy Token]':'',
'6 -> TakerAddress [e.g Address]':acc.address
}
kd={f'{k.split()[0]} ':k for k in ld.keys()}
hd=self.function_dict(ld,kd,docs.tsa,st='s',zx=td[chain][:-1])
bt=[x for x in (v for k,v in hd.items())]
dt=[d for d in bt if d]
bs='Buy'
if bt[3]:bs='Sell'
ff=self.web3.to_checksum_address(dt[4])
api=self.js_resp(self.sw.format(td[str(chain)],dt[0],dt[2],bs.lower(),dt[3],dt[1],ff), h=self.dcfg.zerox).json()
ps=dict(api)
del(ps['data'])
del(ps['sources'])
apr=input(f'\n\033[1;34;40m{json.dumps(ps, indent=4)}{self.at}') or 'n'
if apr.lower()=='y':
self.token_approve(api['sellTokenAddress'],acc.address,pk,api['sellAmount'],n=str(td[chain])[:-1])
nonce=self.web3.eth.get_transaction_count(acc.address)
tx={
'nonce': nonce,
'from':acc.address,
'to': self.web3.to_checksum_address(api['to']),
'data': api['data'],
'value': int(api['value']),
'gas': int(api['gas']),
'gasPrice': int(api['gasPrice']),
'chainId': api['chainId']
}
self.sign_tx(tx,pk)
except requests.exceptions.ConnectionError:
self.get_menu(f'\033[1;31;40mConnection Error!!')
except Exception as e:self.get_menu(f'\033[1;31;40m{e}')
def token_approve(self, t: str, o: str, k: str, a: str, n: str='mainnet'):
"""
Creates & Signs Swap Approval Transaction.
:param t: Token Contract Address
:param o: Owner Address Containing Token To Swap
:param k: Private Key Of Owner Address
:param a: Amount Of Token To Be Swapped
:param n: Network To Make Swap On Defaults To Ethereum Mainnet e.g `polygon | mainnet`
"""
da=self.dcfg.exp[n]
contract=self.web3.eth.contract(address=self.web3.to_checksum_address(t), abi=self.dcfg.abi)
spender=self.web3.to_checksum_address(da)
tx=contract.functions.approve(spender, int(a)).build_transaction({
'from': o,
'nonce': self.web3.eth.get_transaction_count(o),
})
self.sign_tx(tx,k,hs='Transaction Approved')
def contract_data(self, fnc: str, abi: list):
"""
Returns Contract Function Parameters As Dictionary & Function Type As String.
:param fnc: Name Of Contract Function e.g `balanceOf`
:param abi: ABI Of Contract Type List
"""
doc=f" [*]{fnc}() FUNCTION PARAMETERS:"
ld={}
hd={}
ex=''
for i in range(len(abi)):
if abi[i].get('type')=='function' and abi[i].get('name')==fnc:
ex=abi[i]['stateMutability']
lst=abi[i]['inputs']
for n in range(len(lst)):
la=f'{n+1} -> {lst[n]["type"]} ({lst[n]["name"]})'
ld[la]=''
hd[f'{n+1} ']=la
ld=self.function_dict(ld,hd,doc)
return ld,ex
def function_dict(self, ld: dict, hd: dict, sd: str, st: str=None, zx: str=None) -> dict:
"""
Loads Menu To Set & Return Function Arguments & Values As Dictionary.
:param ld: Arguments Dictionary
:param hd: Arguments Key Dictionary
:param sd: Banner String
:param st: String To Specify Swap Token Menu Banner Defaults To None
:param zx: 0x Swap Network As String Defaults To None e.g `celo`
"""
od=dict(ld)
while True:
try:
fs='Function'
self.clear_screen()
if st:
fs='Swap'
doc=docs.fdi+docs.fdt+self.ns.format(self.nw,self.nw_status())+f'\nSwap Network: {zx}'
else:doc=docs.fdi+self.ns.format(self.nw,self.nw_status())
bs=f'\n\033[1;32;40m{sd}\n{doc}\n\033[1;34;40m{fs} Parameters: {json.dumps(ld, indent=2)}'
lda=input(f'{bs}\n\n\033[0mInput Choice=> ')
if lda=='b':break
elif lda=='e':self.pexit()
elif lda=='n':self.block_network()
elif lda=='c':self.convert_value()
elif lda=='d':ld.update(od)
elif len(lda)>1:
if lda[0]=='t' and st:
s=lda[2:]
tca=self.tkn_address(s, n=zx)
if not tca:
self.get_menu(f'\033[1;31;40m*{s} Not Found In Dapp_Config*')
continue
c,v,t,n,d=self.contract_info(tca)
self.get_menu(f'\033[1;34;40mAddress: {tca}\nName: {n}\nSymbol: {t}\nDecimals: {d}\nNetwork: {self.nw}')
elif hd.get(lda[:2]):
k,v=lda[:2],lda[2:]
ld[hd[k]]=v
except:self.get_menu('e')
return ld
def contract_verify(self, s: str):
"""
Verify Contract On Blockchain.
:param s: String With Contract Name & Contract Address e.g `FeeCollector 0xd8dA6BF26964aF9D7eEd9e03E53415D37aA96045`
"""
cn,adr=s.split()
if adr and cn:
cna=f'{cn}@{adr}'
call(['truffle', 'run', 'verify', cna, '--network', self.nw], cwd=self.dcfg.scdir)
self.get_menu(c=False)
def transfer_eth(self, d: str, a: str, n: str) -> dict:
"""
Returns Unsigned Transaction Dictionary For Transferral Of Ethereum.
:param d: Deposit Address
:param a: Exact Amount To Transfer (Converted To Wei/Decimal)
:param n: Nonce (Amount Of Transactions Made By Address)
"""
gas=self.web3.eth.estimate_gas({'to': d, 'value': a})
tx={
'nonce': n,
'to': d,
'value': a,
'gas': gas,
'gasPrice': self.web3.eth.gas_price
}
return tx
def transfer_tkn(self, c, d: str, a: str, n: str) -> dict:
"""
Returns Unsigned Transaction Dictionary For Transferral Of Contract Address Token.
:param c: Eth Contract Object To Execute Token Smart Contract Functions
:param d: Deposit Address
:param a: Exact Amount To Transfer (Converted To Wei/Decimal)
:param n: Nonce (Amount Of Transactions Made By Address)
"""
gas=c.functions.transfer(d, a).estimate_gas()
tx=c.functions.transfer(d, a).build_transaction({
'nonce': n,
'gas': gas,
'gasPrice': self.web3.eth.gas_price
})
return tx
def main(self):
"""Loads Wuddz_Dapp Main Menu."""
while True:
try:
self.clear_screen()
self.slow_print(__doc__)
etht=input("\033[0m\nInput Choice=> ")
if etht=='e':break
elif etht=='d':self.func_menu(docs.ds, 'String', f='decode_bsf')
elif etht=='p':self.crypto_price()
elif etht=='n':self.block_network()
elif etht=='x':
import exchange_api as _eapi
_eapi.Exchange().main()
elif etht=='1':self.account_create()
elif etht=='2':self.func_menu(docs.gb, 'Balance Format', n='n', f='bal_main')
elif etht=='3':self.account_deposit()
elif etht=='4':self.bal_fkey()
elif etht=='5':self.func_menu(docs.ts, 'Hash', n='n', f='trx_hash')
elif etht=='6':self.func_menu(docs.cnf, 'Contract Name', n='n', f='contract_deploy')
elif etht=='7':self.func_menu(docs.sca, 'Contract Address', n='n', f='contract_rw')
elif etht=='8':self.func_menu(docs.cnv, 'Verify Format', n='n', f='contract_verify')
elif etht=='9':self.token_swap()
except KeyboardInterrupt:break
self.clear_screen()
def cli_main():
"""Wuddz_Dapp Entry Point Launches Wuddz_Dapp Main Menu."""
Dapp().main()
| wuddz-devs/wuddz-dapp | wuddz_dapp/dapp.py | dapp.py | py | 42,733 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "warnings.simplefilter",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "os.system",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "platform.system",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "pycoingecko.CoinGecko... |
38722585148 | import os
import pandas as pd
from tqdm import tqdm
from geopy.geocoders import Nominatim
loc_app = Nominatim(user_agent="tutorial")
data_load_path = os.path.join('..', '..', 'data', 'train.csv')
data_dump_path = os.path.join('..', '..', 'data', 'df_for_plot_on_map.csv')
def recognize_location(location: str):
try:
return loc_app.geocode(location).raw
except:
return None
def prepare_df_for_plot_on_map(dataset: pd.DataFrame) -> pd.DataFrame:
df = dataset.copy()
df = df[df['location'].notna()]
if 'size' not in df.columns:
df['target'] = df['target'].map({0: 'fake', 1: 'real'})
df['size'] = 1
right_locations = []
for _, row in tqdm(df.iterrows(), total=len(df)):
right_locations.append(recognize_location(row['location']))
df['lat'] = [el['lat'] if isinstance(el, dict) else None for el in right_locations]
df['lon'] = [el['lon'] if isinstance(el, dict) else None for el in right_locations]
df = df.dropna(subset=['location', 'lat', 'lon'])
return df
if __name__ == '__main__':
df = pd.read_csv(data_load_path)
print(f"DF SHAPE: {df.shape}")
df = prepare_df_for_plot_on_map(df)
print(f"PREPARED DF SHAPE: {df.shape}")
df.to_csv(data_dump_path, index=False) | tedey-01/TweetsAnalyzer | utility_scripts/UI/plot_tools.py | plot_tools.py | py | 1,294 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "geopy.geocoders.Nominatim",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"... |
33179678645 | # Importing Module
from pygame import mixer
import customtkinter, tkinter
from PIL import Image
import eyed3
import os
mixer.init()
# Variables
sList = []
musicFolder = "/home/bir/Music/"
current = ''
start = False
fname = "assets/notfound.jpg"
# Functions
def changeThumb():
global fname, playImg, mImg
audio_file = eyed3.load(musicFolder+current)
image = audio_file.tag.images[0]
fname = f"assets/song.png"
image_file = open(fname, "wb")
image_file.write(image.image_data)
image_file.close()
playImg.configure(light_image=Image.open(fname))
os.remove(fname)
def play():
global start, current
if current == '':
current = sList[0]
i = sList.index(current)
changeThumb()
if start:
mixer.music.unpause()
else:
mixer.music.load(musicFolder+current)
mixer.music.play()
mixer.music.queue(musicFolder+current)
start = True
def pause():
mixer.music.pause()
def resume():
mixer.music.unpause()
def next():
global current, start
length = len(sList)-1
i = sList.index(current)
if i < length:
current = sList[i+1]
else:
current = sList[0]
changeThumb()
start = False
play()
def prev():
global current, start
length = len(sList)-1
i = sList.index(current)
if i != length:
current = sList[i-1]
else:
current = sList[0]
changeThumb()
start = False
play()
def lsSongs():
for f in os.listdir(musicFolder):
if '.mp3' in f or '.wav' in f:
sList.append(f)
def addsongs():
global current, musicFolder,sList
songs = list(tkinter.filedialog.askopenfilenames(initialdir=musicFolder,title="Choose a song", filetypes=(("mp3 Files","*.mp3"),)))
if len(songs) != 0:
sList = songs
current = sList[0]
print(current)
musicFolder = ''
next()
# Main GUI Code
root = customtkinter.CTk()
root.geometry("840x545")
root.minsize(840, 545)
root.maxsize(840, 545)
root.title("Music Player")
# Fonts
bg = customtkinter.CTkFont(family="Helevetica", size=35, weight = 'bold')
md = customtkinter.CTkFont(family="Helevetica", size=18, weight='bold')
sm = customtkinter.CTkFont(family="Helevetica", size=15)
lsSongs()
# Menu Bar
mainmenu = customtkinter.CTkFrame(root, height=30, width=830)
mainmenu.grid(row=0, column=0,padx=5, pady=5, sticky="nw")
addSongs = customtkinter.CTkButton(mainmenu, text="Add Songs", width=50, height=20, font=md, corner_radius=0, command=addsongs, fg_color="transparent")
addSongs.grid(padx=5,ipadx=5,ipady=5,sticky="w")
# Music Thumbnail
mThumb = customtkinter.CTkFrame(master=root, width=835, height=370, fg_color="transparent")
mThumb.grid(row=1, column=0, padx=5, pady=5)
playImg = customtkinter.CTkImage(light_image=Image.open(fname), size=(835,370))
mImg = customtkinter.CTkLabel(mThumb, image=playImg, text="")
mImg.grid(padx=0)
# Music Player Options
mPlay = customtkinter.CTkFrame(master=root, width=835, height=110, fg_color='transparent')
mPlay.grid(row=2, columnspan=2, column=0, padx=5, pady=15)
nextBtn = customtkinter.CTkButton(mPlay, text="Prev", width=75, height=75, corner_radius=75, font=md, border_spacing=0, command=prev)
nextBtn.pack(side='left', padx=7)
playBtn = customtkinter.CTkButton(mPlay, text="Play", width=75, height=75, corner_radius=75, font=md, border_spacing=0, command=play)
playBtn.pack(side='left', padx=7)
pauseBtn = customtkinter.CTkButton(mPlay, text="Pause", width=75, height=75, corner_radius=75, font=md, border_spacing=0, command=pause)
pauseBtn.pack(side='left', padx=7)
nextBtn = customtkinter.CTkButton(mPlay, text="Next", width=75, height=75, corner_radius=75, font=md, border_spacing=0, command=next)
nextBtn.pack(side='left', padx=7)
root.mainloop() | birgrewal/Music-Player-Python | main.py | main.py | py | 3,821 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "pygame.mixer.init",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pygame.mixer",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "eyed3.load",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_nu... |
234780590 | import json
import pickle
import time
from hashlib import sha256
# Block class
# Block is a class that contains the data and the hash of the previous block
# The hash of the previous block is used to link the blocks together
# The hash of the block is used to verify the integrity of the chain
# We plan to use the blockchain to store the data
# Need to solve below issues:
# 1. In CRUD operations, we need to be able to add, read data
# 2. How to solve the conflicts
# 3. Performance of reading data, especially when the data is large and have many nodes
from os.path import exists
from decouple import config
class Block:
def __init__(self, index, data, timestamp, previous_hash):
self.index = index
self.data = data
self.timestamp = timestamp
self.previous_hash = previous_hash
self.nonce = 0
def compute_hash(self):
"""
A function that return the hash of the block contents.
"""
block_string = json.dumps(self.__dict__, sort_keys=True)
return sha256(block_string.encode()).hexdigest()
class Blockchain:
# difficulty of our PoW algorithm
difficulty = 2
file_name = config("BLOCK_CHAIN_BACKUP", default='./blockchain.bk')
def __init__(self):
if exists(self.file_name):
with open(self.file_name, 'rb') as f:
self.chain = pickle.load(f)
else:
self.unconfirmed_data = []
self.chain = []
self.create_genesis_block()
def dump_data(self):
pickle.dump(self.chain, open(self.file_name, 'wb'))
def create_genesis_block(self):
"""
A function to generate genesis block and appends it to
the chain. The block has index number 0, previous_hash as 0, and
a valid hash.
"""
genesis_block = Block(0, [], time.time(), "0")
genesis_block.hash = genesis_block.compute_hash()
self.chain.append(genesis_block)
@property
def last_block(self):
return self.chain[-1]
def add_block(self, block, proof):
"""
A function that adds the block to the chain after verification.
Verification includes:
* Checking if the proof is valid.
* The previous_hash referred in the block and the hash of last block
in the chain match.
"""
previous_hash = self.last_block.hash
if previous_hash != block.previous_hash:
return False
if not self.is_valid_proof(block, proof):
return False
block.hash = proof
self.chain.append(block)
return True
def is_valid_proof(self, block, block_hash):
"""
Check if block_hash is valid hash of block and satisfies
the difficulty criteria.
"""
return (block_hash.startswith('0' * Blockchain.difficulty) and
block_hash == block.compute_hash())
def proof_of_work(self, block):
"""
Function that tries different values of nonce to get a hash
that satisfies our difficulty criteria.
"""
block.nonce = 0
computed_hash = block.compute_hash()
while not computed_hash.startswith('0' * Blockchain.difficulty):
block.nonce += 1
computed_hash = block.compute_hash()
return computed_hash
def set_data(self, data):
self.unconfirmed_data.append(data)
@classmethod
def check_chain_validity(cls, chain):
result = True
previous_hash = "0"
for block in chain:
block_hash = block.hash
# remove the hash field to recompute the hash again
# using `compute_hash` method.
delattr(block, "hash")
if not cls.is_valid_proof(block, block_hash) or \
previous_hash != block.previous_hash:
result = False
break
block.hash, previous_hash = block_hash, block_hash
return result
def mine(self):
"""
This function serves as an interface to add the pending
transactions to the blockchain by adding them to the block
and figuring out Proof Of Work.
"""
if not self.unconfirmed_data:
return False
last_block = self.last_block
new_block = Block(index=last_block.index + 1,
data=self.unconfirmed_data,
timestamp=time.time(),
previous_hash=last_block.hash)
proof = self.proof_of_work(new_block)
self.add_block(new_block, proof)
self.unconfirmed_data = []
return new_block.index
| huangjien/datachain | blockchain.py | blockchain.py | py | 4,690 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "json.dumps",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "hashlib.sha256",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "decouple.config",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_... |
43758227481 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import os.path as osp
from os.path import join as pjoin
import numpy as np
# from distutils import spawn
# run: pip install easydict
from easydict import EasyDict as edict
__C = edict()
# get config by:
# from lib.config import cfg
cfg = __C
#
# Training options
#
__C.TRAIN = edict()
# Whether to use focal loss to train
__C.TRAIN.USE_FOCAL_LOSS = False
# learning rate manully decay
__C.TRAIN.LR_DIY_DECAY = False
# training optimizer: either 'adam' 'sgd_m'
__C.TRAIN.OPTIMIZER = 'adam'
__C.TRAIN.MOMENTUM = 0.9
# Initial learning rate
__C.TRAIN.LEARNING_RATE = 1e-3
# Weight decay, for regularization
__C.TRAIN.WEIGHT_DECAY = 0.001
# Use horizontally-flipped images during training?
__C.TRAIN.USE_FLIPPED = True
# Weight initializer
__C.TRAIN.WEIGHT_INITIALIZER = 'normal'
# Image size
__C.TRAIN.IMG_WIDTH = 224
__C.TRAIN.IMG_HEIGHT = 224
__C.TRAIN.EXP_DECAY_STEPS = 5000
__C.TRAIN.EXP_DECAY_RATE = 0.8
__C.TRAIN.BATCH_SIZE = 32
# The time interval for saving tensorflow summaries
__C.TRAIN.SUMMARY_INTERVAL = 300
# Iterations between snapshots
__C.TRAIN.SNAPSHOT_ITERS = 5000
__C.TRAIN.DISPLAY = 10
# TEST OPTIONS
__C.TEST = edict()
__C.TEST.FLIP = False
__C.NET = 'res101'
# Global settings
__C.DATA_PATH = '/home/joe/git/furniture/data'
__C.EXP_DIR = 'default'
__C.DEBUG = False
# __C.PIXEL_MEANS = np.array([[[102.9801, 115.9465, 122.7717]]])
# VGG preprocessing tf.slim
__C.PIXEL_MEANS = np.array([[[123.681, 116.78, 103.94]]])
#
# ResNet options
#
__C.RESNET = edict()
__C.RESNET.FIXED_BLOCKS = 3
# Root directory of project
__C.ROOT_DIR = osp.abspath(pjoin(osp.dirname(__file__), '..'))
__C.RESULT_PATH = pjoin(__C.ROOT_DIR, 'result')
def get_output_dir():
"""Return the directory where experimental artifacts are placed.
If the directory does not exist, it is created.
A canonical path is built using the name from an imdb and a network
(if not None).
"""
outdir = osp.abspath(osp.join(__C.ROOT_DIR, 'output', __C.EXP_DIR, 'ckpt'))
# if weights_filename is not None:
# outdir = osp.join(outdir, weights_filename)
if not os.path.exists(outdir):
os.makedirs(outdir)
return outdir
def get_output_tb_dir():
"""Return the directory where tensorflow summaries are placed.
If the directory does not exist, it is created.
A canonical path is built using the name from an imdb and a network
(if not None).
"""
outdir = osp.abspath(osp.join(__C.ROOT_DIR, 'output', __C.EXP_DIR, 'tb'))
# if weights_filename is None:
# weights_filename = 'default'
# outdir = osp.join(outdir, weights_filename)
if not os.path.exists(outdir):
os.makedirs(outdir)
return outdir
def cfg_from_list(cfg_list):
"""Set config keys via list (e.g., from command line)."""
from ast import literal_eval
assert len(cfg_list) % 2 == 0
for k, v in zip(cfg_list[0::2], cfg_list[1::2]):
key_list = k.split('.')
d = __C
for subkey in key_list[:-1]:
assert d.has_key(subkey)
d = d[subkey]
subkey = key_list[-1]
assert d.has_key(subkey)
try:
value = literal_eval(v)
except:
# handle the case when v is a string literal
value = v
assert type(value) == type(d[subkey]), \
'type {} does not match original type {}'.format(
type(value), type(d[subkey]))
d[subkey] = value
| InnerPeace-Wu/imaterialist_challenge-furniture | lib/config.py | config.py | py | 3,576 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "easydict.EasyDict",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "easydict.EasyDict",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "easydict.EasyDict",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "numpy.array",
... |
18884483513 | import argparse
import glob
import os
import numpy as np
from scipy import ndimage
from PIL import Image
from tqdm import tqdm
def main(args):
image_paths = glob.glob(os.path.join(args.annotation_train_dir, "*.png"))
image_paths.extend(glob.glob(os.path.join(args.annotation_valid_dir, "*.png")))
invalid_images = []
for image_path in tqdm(image_paths):
mask = Image.open(image_path).convert("L")
# Convert mask to numpy array
mask = np.array(mask)
# Labels are decoded as different colors
mask_labels = np.unique(mask)
boxes = []
for mask_label in mask_labels:
# Ignore the background/unlabeled
if mask_label == 255:
continue
# Extract the mask of the current label
independent_mask = mask == mask_label
# Extract instance in the current mask
blobs, no_blobs = ndimage.label(independent_mask)
# For each instance
for i in range(1, no_blobs + 1):
# Get bounding box
pos = np.where(blobs == i)
xmin = np.min(pos[1])
xmax = np.max(pos[1])
ymin = np.min(pos[0])
ymax = np.max(pos[0])
if (xmax - xmin) * (ymax - ymin) == 0:
continue
boxes.append([xmin, ymin, xmax, ymax])
if len(boxes) == 0:
invalid_images.append(image_path)
with open(args.output_file, "w") as f:
f.write("\n".join(invalid_images))
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Test and generate invalid image list"
)
parser.add_argument("--annotation-train-dir",
default="../cocostuff/dataset/annotations/train2017",
help="Annotation directory")
parser.add_argument("--annotation-valid-dir",
default="../cocostuff/dataset/annotations/val2017",
help="Annotation directory")
parser.add_argument("--output-file",
default="invalid_images.txt",
help="Output file of invalid images")
args = parser.parse_args()
main(args)
| hmchuong/Detectron | test_invalid_image.py | test_invalid_image.py | py | 2,358 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "glob.glob",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "glob.glob",
"line_number": 12... |
43472856046 | from django import forms
from datetime import *
from .models import Transaccion
from ..cliente.models import Cliente
from ..user.models import User
class TransaccionForm(forms.ModelForm):
# constructor
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for field in self.Meta.fields:
self.fields[field].widget.attrs.update({
'class': 'form-control'
})
self.fields['fecha_trans'].widget.attrs = {
'readonly': True,
'class': 'form-control'
}
self.fields['user'].widget.attrs = {
'class': 'custom-select select2'
}
self.fields["user"].queryset = User.objects.none()
self.fields['subtotal'].widget.attrs = {
'value': '0.00',
'class': 'form-control',
'readonly': True
}
self.fields['iva'].widget.attrs = {
'value': '0.00',
'class': 'form-control',
'readonly': True
}
self.fields['total'].widget.attrs = {
'value': '0.00',
'class': 'form-control',
'readonly': True
}
# habilitar, desabilitar, y mas
class Meta:
model = Transaccion
fields = [
'fecha_trans',
'user',
'subtotal',
'iva',
'total'
]
labels = {
'fecha_trans': 'Fecha',
'user': 'Cliente',
'subtotal': 'Subtotal',
'iva': 'I.V.A.',
'total': 'TOTAL'
}
widgets = {
'fecha_trans': forms.DateInput(
format='%Y-%m-%d',
attrs={'value': datetime.now().strftime('%Y-%m-%d')},
),
'subtotal': forms.TextInput(),
'iva': forms.TextInput(),
'total': forms.TextInput(),
}
| chrisstianandres/pagos | apps/transaccion/forms.py | forms.py | py | 2,002 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.forms.ModelForm",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "user.models.User.objects.none",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "... |
37774857793 | import argparse
import sys
from os.path import dirname, realpath
sys.path.append(dirname(dirname(realpath(__file__))))
import project.data.dataset_utils as data_utils
import project.models.model_utils as model_utils
import project.training.train_utils as train_utils
import torch.nn as N
import os
import torch
import datetime
import pickle
import pdb
from pytorch_pretrained_bert import BertForSequenceClassification
parser = argparse.ArgumentParser(description='Lung Cancer Disease Progression Classifier')
# learning
parser.add_argument('--lr', type=float, default=0.0001, help='initial learning rate [default: 0.001]')
parser.add_argument('--dropout', type=float, default=0.05, help='sets dropout layer [default: 0.4]')
parser.add_argument('--valid_split', type=float, default=0.2, help='sets validation split from data [default: 0.2]')
parser.add_argument('--epochs', type=int, default=30, help='number of epochs for train [default: 30]')
parser.add_argument('--batch_size', type=int, default=10, help='batch size for training [default: 32]')
parser.add_argument('--mid_dim', type=int, default=100, help='middle dimension of feature extraction architecture [default: 100]')
parser.add_argument('--max_base', type=int, default=400, help='maximum text features for baseline data [default: 400]')
parser.add_argument('--max_prog', type=int, default=800, help='maximum text features for baseline data [default: 800]')
parser.add_argument('--max_before', type=int, default=600, help='maximum text features for context before volume [default: 600]')
parser.add_argument('--max_after', type=int, default=300, help='maximum text features for context after volume [default: 300]')
parser.add_argument('--desired_features', type=tuple, default=("lens", "organs", "date_dist"), help='enter context features in format - (\"feat_1\", ..., \"feat_n\")')
# device
parser.add_argument('--cuda', action='store_true', default=False, help='enable the gpu [default: True]')
parser.add_argument('--train', action='store_true', default=False, help='enable train [default: False]')
# task
parser.add_argument('--snapshot', type=str, default=None, help='filename of model snapshot to load[default: None]')
parser.add_argument('--save_path', type=str, default="model2.pt", help='Path where to dump model')
args = parser.parse_args()
def test_hyperparamaters(args, model_save_directory="test_models/", dictionary_save_file="data.pkl"):
MAX_ACC1, MAX_ACC2, MAX_ACC3 = 0, 0, 0
print("\nParameters:")
for attr, value in sorted(args.__dict__.items()):
print("\t{}={}".format(attr.upper(), value))
out_dict = {}
train_data, valid_data = data_utils.make_datasets(args)
for concat_func in (torch.sum, torch.max, torch.mean, torch.min):
for lr in (0.0001, 0.0002, 0.0005, 0.001, 0.002, 0.005, 0.01):
args.lr = lr
for dropout in (0.05, 0.1, 0.2, 0.3, 0.4):
args.dropout = dropout
for mid_dim in (20, 50, 100, 200, 300):
args.mid_dim = mid_dim
args.save_path=model_save_directory + 'model_concat.pt'
model1 = model_utils.CombinedNet(args, concat_func)
print(model1)
MAX_ACC1, temp_max_acc = train_utils.train_model(train_data, valid_data, model1, args, MAX_ACC1, "combined")
if temp_max_acc not in out_dict:
out_dict[temp_max_acc] = [(concat_func, lr, dropout, mid_dim, "combined")]
else:
out_dict[temp_max_acc].append((concat_func, lr, dropout, mid_dim, "combined"))
print()
args.save_path=model_save_directory + 'model_features.pt'
model2 = model_utils.FeatureNet(args, concat_func)
print(model2)
MAX_ACC2, temp_max_acc = train_utils.train_model(train_data, valid_data, model2, args, MAX_ACC2, "features")
if temp_max_acc not in out_dict:
out_dict[temp_max_acc] = [(concat_func, lr, dropout, mid_dim, "features")]
else:
out_dict[temp_max_acc].append((concat_func, lr, dropout, mid_dim, "features"))
print()
args.save_path=model_save_directory + 'model_text.pt'
model3 = model_utils.TextNet(args)
print(model3)
MAX_ACC3, temp_max_acc = train_utils.train_model(train_data, valid_data, model3, args, MAX_ACC3, "text")
if temp_max_acc not in out_dict:
out_dict[temp_max_acc] = [(concat_func, lr, dropout, mid_dim, "text")]
else:
out_dict[temp_max_acc].append((concat_func, lr, dropout, mid_dim, "text"))
print()
pickle.dump(out_dict, open(dictionary_save_file, "wb"))
def finetune_bert(args, model_save_directory, bert_file_path):
train_data, valid_data = data_utils.make_datasets(args)
model = BertForSequenceClassification.from_pretrained("bert-base-uncased", num_labels=4)
args.save_path = model_save_directory + "bert_model.pt"
ACC, _ = train_utils.train_model(train_data, valid_data, model, args, 0, "bert")
if __name__ == '__main__':
#test_hyperparamaters(args)
finetune_bert(args, "models", "BertModels/cased_bert.bin")
| Sanger2000/Predicting-Lung-Cancer-Disease-Progression-from-CT-reports | scripts/main.py | main.py | py | 5,433 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.path.append",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "os.path.realpath",
"li... |
41035595174 | """
Debug ORMAdapter calls within ORM runs.
Demos::
python tools/trace_orm_adapter.py -m pytest \
test/orm/inheritance/test_polymorphic_rel.py::PolymorphicAliasedJoinsTest::test_primary_eager_aliasing_joinedload
python tools/trace_orm_adapter.py -m pytest \
test/orm/test_eager_relations.py::LazyLoadOptSpecificityTest::test_pathed_joinedload_aliased_abs_bcs
python tools/trace_orm_adapter.py my_test_script.py
The above two tests should spit out a ton of debug output. If a test or program
has no debug output at all, that's a good thing! it means ORMAdapter isn't
used for that case.
You can then set a breakpoint at the end of any adapt step:
python tools/trace_orm_adapter.py -d 10 -m pytest -s \
test/orm/test_eager_relations.py::LazyLoadOptSpecificityTest::test_pathed_joinedload_aliased_abs_bcs
""" # noqa: E501
# mypy: ignore-errors
from __future__ import annotations
import argparse
import contextlib
import contextvars
import sys
from typing import TYPE_CHECKING
from sqlalchemy.orm import util
if TYPE_CHECKING:
from typing import Any
from typing import List
from typing import Optional
from sqlalchemy.sql.elements import ColumnElement
class _ORMAdapterTrace:
def _locate_col(
self, col: ColumnElement[Any]
) -> Optional[ColumnElement[Any]]:
with self._tracer("_locate_col") as tracer:
return tracer(super()._locate_col, col)
def replace(self, col, _include_singleton_constants: bool = False):
with self._tracer("replace") as tracer:
return tracer(super().replace, col)
_orm_adapter_trace_context = contextvars.ContextVar("_tracer")
@contextlib.contextmanager
def _tracer(self, meth):
adapter = self
ctx = self._orm_adapter_trace_context.get(
{"stack": [], "last_depth": 0, "line_no": 0}
)
self._orm_adapter_trace_context.set(ctx)
stack: List[Any] = ctx["stack"] # type: ignore
last_depth = len(stack)
line_no: int = ctx["line_no"] # type: ignore
ctx["last_depth"] = last_depth
stack.append((adapter, meth))
indent = " " * last_depth
if hasattr(adapter, "mapper"):
adapter_desc = (
f"{adapter.__class__.__name__}"
f"({adapter.role.name}, mapper={adapter.mapper})"
)
else:
adapter_desc = f"{adapter.__class__.__name__}({adapter.role.name})"
def tracer_fn(fn, arg):
nonlocal line_no
line_no += 1
print(f"{indent} {line_no} {adapter_desc}", file=REAL_STDOUT)
sub_indent = " " * len(f"{line_no} ")
print(
f"{indent}{sub_indent} -> "
f"{meth} {_orm_adapter_trace_print(arg)}",
file=REAL_STDOUT,
)
ctx["line_no"] = line_no
ret = fn(arg)
if DEBUG_ADAPT_STEP == line_no:
breakpoint()
if ret is arg:
print(f"{indent} {line_no} <- same object", file=REAL_STDOUT)
else:
print(
f"{indent} {line_no} <- {_orm_adapter_trace_print(ret)}",
file=REAL_STDOUT,
)
if last_depth == 0:
print("", file=REAL_STDOUT)
return ret
try:
yield tracer_fn
finally:
stack.pop(-1)
util.ORMAdapter.__bases__ = (_ORMAdapterTrace,) + util.ORMAdapter.__bases__
util.ORMStatementAdapter.__bases__ = (
_ORMAdapterTrace,
) + util.ORMStatementAdapter.__bases__
def _orm_adapter_trace_print(obj):
if obj is None:
return "None"
t_print = _orm_adapter_trace_printers.get(obj.__visit_name__, None)
if t_print:
return t_print(obj)
else:
return f"{obj!r}"
_orm_adapter_trace_printers = {
"table": lambda t: (
f'Table("{t.name}", '
f"entity={t._annotations.get('parentmapper', None)})"
),
"column": lambda c: (
f'Column("{c.name}", {_orm_adapter_trace_print(c.table)} '
f"entity={c._annotations.get('parentmapper', None)})"
),
"join": lambda j: (
f"{j.__class__.__name__}({_orm_adapter_trace_print(j.left)}, "
f"{_orm_adapter_trace_print(j.right)})"
),
"label": lambda l: f"Label({_orm_adapter_trace_print(l.element)})",
}
DEBUG_ADAPT_STEP = None
REAL_STDOUT = sys.__stdout__
def main():
global DEBUG_ADAPT_STEP
parser = argparse.ArgumentParser()
parser.add_argument(
"-d", "--debug", type=int, help="breakpoint at this adaptation step"
)
parser.add_argument(
"-m",
"--module",
type=str,
help="import module name instead of running a script",
)
parser.add_argument(
"args", metavar="N", type=str, nargs="*", help="additional arguments"
)
argparse_args = []
sys_argv = list(sys.argv)
progname = sys_argv.pop(0)
# this is a little crazy, works at the moment for:
# module w args:
# python tools/trace_orm_adapter.py -m pytest test/orm/test_query.py -s
# script:
# python tools/trace_orm_adapter.py test3.py
has_module = False
while sys_argv:
arg = sys_argv.pop(0)
if arg in ("-m", "--module", "-d", "--debug"):
argparse_args.append(arg)
argparse_args.append(sys_argv.pop(0))
has_module = arg in ("-m", "--module")
else:
if not has_module:
argparse_args.append(arg)
else:
sys_argv.insert(0, arg)
break
options = parser.parse_args(argparse_args)
sys.argv = ["program.py"] + sys_argv
if options.module == "pytest":
sys.argv.extend(["--capture", "sys"])
import runpy
if options.debug:
DEBUG_ADAPT_STEP = options.debug
if options.module:
runpy.run_module(options.module, run_name="__main__")
else:
progname = options.args[0]
runpy.run_path(progname)
if __name__ == "__main__":
main()
| sqlalchemy/sqlalchemy | tools/trace_orm_adapter.py | trace_orm_adapter.py | py | 6,124 | python | en | code | 8,024 | github-code | 1 | [
{
"api_name": "typing.TYPE_CHECKING",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.sql.elements.ColumnElement",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": ... |
7531610125 | import torch
from torch.nn.utils.rnn import pad_sequence
from transformers4ime.data.logits_processor import ConstrainedLogitsProcessor
class PinyinGPTConcatLogitsProcessor(ConstrainedLogitsProcessor):
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, constraint_id) -> torch.FloatTensor:
bs, il = input_ids.size()
# gather index
gather_index_list = []
for _ in range(bs):
pid = constraint_id
c_df = self.pc_df.get(pid.item())
if c_df is not None:
gather_index_list.append(torch.tensor(c_df.index.tolist()))
else:
gather_index_list.append(torch.zeros(1))
gather_index = pad_sequence(gather_index_list, batch_first=True, padding_value=0).long().to(scores.device)
score = torch.gather(scores, 1, gather_index)
scores.fill_(-float("inf"))
scores.scatter_(1, gather_index, score)
return scores
| VisualJoyce/Transformers4IME | src/transformers4ime/data/logits_processor/pinyingpt_concat.py | pinyingpt_concat.py | py | 976 | python | en | code | 17 | github-code | 1 | [
{
"api_name": "transformers4ime.data.logits_processor.ConstrainedLogitsProcessor",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "torch.LongTensor",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "torch.FloatTensor",
"line_number": 9,
"usage_type":... |
1117221641 | import os
import re
import gspread
from oauth2client.service_account import ServiceAccountCredentials
import convertapi
import cv2
import pytesseract
from pdfminer.high_level import extract_text
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from django.shortcuts import render
from .forms import PersonalInformation
from .models import Information
EMAIL_REG = re.compile(r'[a-z0-9\.\-+_]+@[a-z0-9\.\-+_]+\.[a-z]+')
PHONE_REG = re.compile(r'[\+\(]?[1-9][0-9 .\-\(\)]{8,}[0-9]')
LINKEDIN_REG = re.compile(r'((http(s?)://)*([www])*\.|[linkedin])[linkedin/~\-]+\.[a-zA-Z0-9/~\-_,&=\?\.;]+[^\.,\s<]')
def extract_emails(resume_text):
return re.findall(EMAIL_REG, resume_text)
def extract_linkedIn(resume_text):
return re.findall(LINKEDIN_REG, resume_text)
def extract_phone_number(resume_text):
phone = re.findall(PHONE_REG, resume_text)
if phone:
number = ''.join(phone[0])
if resume_text.find(number) >= 0 and len(number) < 16:
return number
return None
def write_to_gsheet(folder_id, filename, phone, email):
Scopes = [
'https://www.googleapis.com/auth/spreadsheets',
'https://www.googleapis.com/auth/drive'
]
creds = ServiceAccountCredentials.from_json_keyfile_name("/home/samarth/pdfparser/pdfparser/secret_key.json", scopes=Scopes)
file = gspread.authorize(creds)
workbook = file.open("Candidates information")
sheet = workbook.sheet1
candidate_info = [folder_id, filename, phone, email, 'https://www.linkedin.com/in/sam-tyagi-6b6487245/']
sheet.append_row(candidate_info)
def convert_to_pdf(file_path, typ):
convertapi.api_secret = 'DBuhCGfisLXtWsTg'
convertapi.convert('pdf', {
'File': file_path
}, from_format = typ).save_files('/home/samarth/pdfparser/pdfparser')
def get_form(request):
gauth = GoogleAuth()
drive = GoogleDrive(gauth)
if request.method == 'POST':
drive_link = request.POST.get('drive_link')
data = Information(drive_link=drive_link)
data.save()
print('------------'+drive_link+'--------------------------')
x = len(drive_link)
for i in range(x-1,-1,-1):
if drive_link[i] == '/':
break
folder = drive_link[i+1:]
file_list = drive.ListFile({'q' : f"'{folder}' in parents and trashed=false and mimeType='application/vnd.google-apps.folder'"}).GetList()
for index, file in enumerate(file_list):
# print(index+1, 'file downloaded : ', file['title'])
folder_id = file['id']
print('folder id: ', file['id'])
fl = drive.ListFile({'q' : f"'{folder_id}' in parents and trashed=false"}).GetList()
for f in fl:
length = len(f['title'])
typ = ''
for i in range(length-1,-1,-1):
if f['title'][i] == '.':
break
typ = f['title'][i+1:]
if typ == 'pdf':
f.GetContentFile(f['title'])
text = extract_text(f['title'])
# print(text)
emails = extract_emails(text)
phone_number = extract_phone_number(text)
linkedIn = extract_linkedIn(text)
if emails:
print(emails[0])
print(phone_number)
write_to_gsheet(folder_id,f['title'], phone_number, emails[0])
os.remove(f['title'])
elif typ == 'docx' or typ == 'doc':
f.GetContentFile(f['title'])
convert_to_pdf(f['title'], typ)
converted_file_path = f['title'][:i+1] + 'pdf'
text = extract_text(converted_file_path)
emails = extract_emails(text)
phone_number = extract_phone_number(text)
linkedIn = extract_linkedIn(text)
if emails:
print(emails[0])
print(phone_number)
if emails and phone_number:
write_to_gsheet(folder_id, f['title'], phone_number, emails[0])
os.remove(f['title'])
os.remove(converted_file_path)
elif typ in ['png', 'jpeg', 'jpg']:
f.GetContentFile(f['title'])
tessdata_dir_config= r'/--tessdata-dir "/home/samarth/pdfparser/my_env/lib/python3.8/site-packages"'
img = cv2.imread(f['title'],1)
img_scale_up = cv2.resize(img, (0, 0), fx=1.5, fy=1.5)
# cv2.imshow('Upscaled Image', img_scale_up)
if not cv2.imwrite(r'/home/samarth/pdfparser/pdfparser/upscaled.{0}'.format(typ), img_scale_up):
raise Exception('Could not write image')
cv2.waitKey(0)
upscaled_image = 'upscaled.{0}'.format(typ)
text = pytesseract.image_to_string(upscaled_image, config=tessdata_dir_config)
emails = extract_emails(text)
phone_number = extract_phone_number(text)
linkedIn = extract_linkedIn(text)
if emails:
print(emails[0])
print(phone_number)
if emails and phone_number:
write_to_gsheet(folder_id, f['title'], phone_number, emails[0])
os.remove(f['title'])
os.remove(upscaled_image)
return render(request, 'app/handleForm.html')
else:
form = PersonalInformation()
return render(request, 'app/userdata.html', {'form': form})
def upload(request):
if request.method == 'POST':
uploaded_file = request.FILES['document']
print(uploaded_file.name)
return render(request, 'app/upload.html')
| samarth-ty/pdf_parser | app/views.py | views.py | py | 5,994 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "re.compile",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 20,
... |
5230043340 | import tempfile
import pdfkit
from django.contrib import messages
from django.http import FileResponse
from django.shortcuts import render
from django.template.loader import get_template
from rest_framework import generics
from rest_framework.permissions import (
SAFE_METHODS,
BasePermission,
IsAuthenticated)
from .models import Event
from .models import Institution
from .performance_scheduling import get_event, schedule_performances_for_each_theater, generate_time_table
from .serializers import InstitutionSerializer
WKHTMLTOPDF_PATH: str = '/usr/local/bin/wkhtmltopdf'
class IsOwnerOrReadOnly(BasePermission):
"""Custom permission"""
message = 'Updating of institution details and deregistration' \
' restricted to Head of Institution Only'
def has_object_permission(self, request, view, obj):
if request.method in SAFE_METHODS:
return True
return obj.head_of_institution == request.user
class RegisterInstitutions(generics.RetrieveUpdateAPIView):
"""
Update institution details once heads of institutions are registered into the system
"""
queryset = Institution.objects.all()
serializer_class = InstitutionSerializer
permission_classes = [IsAuthenticated, IsOwnerOrReadOnly]
lookup_field = 'id'
lookup_url_kwarg = 'institution_pk'
def schedule_performances(request, festival_event):
list(messages.get_messages(request)) # Clear messages.
event = Event.objects.get(pk=festival_event)
try:
get_event(event=event)
performances_for_each_theater: dict = schedule_performances_for_each_theater()
time_table = generate_time_table(performances_for_each_theater)
event_level: str = event.event_level.lower()
ctx = {
'time_table': time_table,
'event': event,
'event_level': event_level
}
# Getting template, and rendering data
template = get_template('time_table.html')
html = template.render(ctx)
# Function for creating file name
# Inner function
def create_file_name():
file_name = 'time_table_%s.pdf' % event
return file_name.strip()
filename = create_file_name()
config = pdfkit.configuration(wkhtmltopdf=WKHTMLTOPDF_PATH)
options = {
'page-size': 'A4',
'margin-top': '0.25in',
'margin-right': '0.3in',
'margin-bottom': '0.25in',
'margin-left': '0.3in',
'encoding': 'UTF-8',
'custom-header': [
('Accept-Encoding', 'gzip')
],
'no-outline': None,
'enable-local-file-access': None,
'enable-javascript': None,
}
with tempfile.NamedTemporaryFile(prefix=filename, suffix=".pdf") as f:
pdfkit.from_string(html, f.name, configuration=config, options=options)
return FileResponse(open(f.name, 'rb'), content_type='application/pdf')
except ZeroDivisionError:
messages.add_message(request, messages.ERROR, 'No theaters registered to the event!')
return render(request, 'time_table.html')
| mumoj/Music-Festival-Scheduling | performances/views.py | views.py | py | 3,203 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "rest_framework.permissions.BasePermission",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "rest_framework.permissions.SAFE_METHODS",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "rest_framework.generics.RetrieveUpdateAPIView",
"line_numbe... |
12565261862 | # -*- coding: utf-8 -*-
import itertools
from Text_terms import *
from Symmetrical_summ import *
from Templates import *
# пересчет весов для предложений
def countFinalWeights(tf_weights, stemmed_text, stemmed_pnn):
weighted_terms = dict(tf_weights.items())
total_sents_in_text = stemmed_text.len
# список первых и последних предложений
collection_of_first_last_sents = list(itertools.chain.from_iterable(
(paragraph[0], paragraph[-1]) if len(paragraph) > 1 else (paragraph[0],)
for paragraph in stemmed_text.text if paragraph))
# список слов первых и последних предложений абзацев
pairs_collection_of_first_last_sents = list(
itertools.chain.from_iterable(collection_of_first_last_sents))
stems_collection_of_first_last_sents = {
pair[0] for pair in pairs_collection_of_first_last_sents }
# количество слов в первых и последних предложениях
total_stems_in_first_last = len(stems_collection_of_first_last_sents)
# количество слов из словаря в первых и последних предложениях
total_dictwords_in_first_last = 0
for s1 in stems_collection_of_first_last_sents:
if s1 in tf_weights:
total_dictwords_in_first_last += 1
# количество слов в тексте
total_stems_in_text = len(list(itertools.chain.from_iterable(stemmed_text.sentences())))
# среднее количество слов из словаря в первых и последних предложениях абзацев
avg_dictwords_in_first_last = total_dictwords_in_first_last / total_stems_in_first_last
# среднее количество слов в первых и последних предложениях абзацев
avg_stems_in_first_last = total_stems_in_first_last / total_stems_in_text
# список вопросительных и восклиц. предложений
collection_of_q_excl_sents = [
sentence
for sentence in stemmed_text.sentences()
if sentence and sentence[-1][0] in {'?', '!'}]
# список слов из вопросительных и восклицательных предложений
stems_collection_of_q_excl_sents = {tpl[0] for tpl in itertools.chain.from_iterable(collection_of_q_excl_sents) if tpl[0] not in '?!'}
# количество вопросительных и восклицательных предложений в тексте
num_of_q_excl_sents = len(collection_of_q_excl_sents)
"""
если термины есть в первых и последн. предложениях абзацев, то вес термина
умножаем на частное среднего кол-ва терминов из словаря в первых и посл. предл.
и среднего кол-ва терминов в первых и последн. предл-х.
"""
for term in stems_collection_of_first_last_sents:
if term in weighted_terms:
weighted_terms[term] *= avg_dictwords_in_first_last / avg_stems_in_first_last
"""
если термины есть в вопросительных и восклицательных предложениях,
то умножаем вес термина на частное от кол-ва таких предложений
и общего кол-ва предложений текста
"""
for term in stems_collection_of_q_excl_sents:
if term in weighted_terms:
weighted_terms[term] *= num_of_q_excl_sents / total_sents_in_text
"""
если термины из словаря - это "имена собственные", то умножаем вес
термина на частное среднего кол-ва терминов из словаря в первых и посл. предл.
и среднего кол-ва терминов в первых и последн. предл-х.
"""
for term in stemmed_pnn:
if term in weighted_terms:
weighted_terms[term] *= avg_dictwords_in_first_last / avg_stems_in_first_last
mean_weight = sum(weighted_terms.values()) / len(weighted_terms)
sorted_tf = {term : weight
for term, weight in weighted_terms.items()
if weight > mean_weight}
return sorted_tf
# пересчет предложений по весам
def convertFinalWeights(symmetry, symm_weights, ordinary_sents, indicators = True, adj = False):
if indicators:
"""
Здесь стоит надстройка, что пересчитывает веса в зависимости от индикаторов
"""
processor = TextProcessor(flag = adj)
result = []
for (counter, weight),\
(index, original),\
( _ , sentence)\
in \
zip(symm_weights,
enumerate(ordinary_sents),
processor.parse(ordinary_sents)
):
search_result = processor.apply(sentence)
weight_indicator = 1
if search_result:
for lst in search_result:
if lst:
pattern_len = list(lst[0].values())[1][2]
word_num = list(lst[0].values())[1][1]
if pattern_len > 3:
if word_num >= 3:
weight_indicator += 1
elif word_num == 2:
weight_indicator += 0.5
elif pattern_len == 3:
if word_num >= 2:
weight_indicator += 1
elif word_num == 1:
weight_indicator += 0.5
else:
if word_num >= pattern_len:
weight_indicator += 1
if len(counter) > 6:
result.append((original, weight * weight_indicator, index))
return sorted(result, key=lambda x: x[1], reverse=True)
else:
return symmetry.convertSymmetryToOrdinary(symm_weights, ordinary_sents)
# процентная выборка
def selectFinalSents(converted_sents, percentage=10):
"""
Метод выбирает n первых предложений из списка.
n определяется указанным процентом. Список сортируется
по позиции предложения в оригинальном тексте, таким образом
возвращается оригинальная последовательность, чтобы хоть
как-то сохранить связность.
"""
compression_rate = int(len(converted_sents) * percentage / 100 + 0.5)
sorted_salient_sentences = sorted(converted_sents[:compression_rate], key=lambda w: w[2])
return sorted_salient_sentences
# суммаризатор
class SUMMARIZER():
def __init__(self):
self.language = 'ru'
self.stopwords = list(stopwords.words('russian'))
self.re_term = re.compile("[\wа-яА-Я]+\-[\wа-яА-Я]+|[\wа-яА-Я]+|[!?]")
self.symmetry = SymmetricalSummarizationWeightCount()
def check(self, term):
if term in self.stopwords:
return False
elif re.match(self.re_term, term):
return True
else:
return False
def summarize(self, file_name, output_name, indicators = True, adj = False, percentage = 10):
file = open(file_name, 'r')
text = StructuredText(text_segmentor(file.read()))
res = ''
if text.len >= 3:
# стемминг предложений
# текст без стоп-слов: (стема, слово), предложения сгруппированны по абзацам
STEMMED_SENTENCES = StructuredText(text.map_sent(
lambda sentence:
[(normalize(term), term)
for term
in word_tokenize(sentence)
if self.check(term)] ))
# список всех стем
BIG_LIST_OF_PAIRS = list(itertools.chain.from_iterable(STEMMED_SENTENCES.sentences()))
LIST_OF_STEMS = [pair1[0] for pair1 in BIG_LIST_OF_PAIRS]
if LIST_OF_STEMS:
# список кортежей (слово, его относительная частота), усечённый по средней частоте
TOTAL_STEM_COUNT = dict(simpleTermFreqCount(LIST_OF_STEMS))
# список "имён собственных"
STEMMED_PNN = lookForProper(STEMMED_SENTENCES.text)
# список терминов с весовыми коэффициентами
SORTED_TFIDF = countFinalWeights(TOTAL_STEM_COUNT, STEMMED_SENTENCES, STEMMED_PNN)
SORTED_TFIDF = sorted(SORTED_TFIDF.items(), key=lambda w: w[1], reverse=True)
# словари каждого предложения с частотностью по словам
S_with_termfreqs = [Counter([word[0] for word in sentence]) for sentence in STEMMED_SENTENCES.sentences()]
# общее количество стем в тексте
TOTAL_STEMS_IN_TEXT = len(LIST_OF_STEMS)
# общее количество предложений в тексте
TOTAL_SENTS_IN_TEXT = len(text.sentences())
# пересчет весов
SYMMETRICAL_WEIGHTS = self.symmetry.countFinalSymmetryWeight(
SORTED_TFIDF, S_with_termfreqs,
TOTAL_STEMS_IN_TEXT, TOTAL_SENTS_IN_TEXT,
STEMMED_PNN)
# отбор предложений
ORIGINAL_SENTENCES = convertFinalWeights(
self.symmetry,
SYMMETRICAL_WEIGHTS,
text.sentences(),
indicators, adj)
#print(ORIGINAL_SENTENCES)
res = selectFinalSents(ORIGINAL_SENTENCES, percentage)
else:
print("There are no words to process!")
else:
print("Text should be at least 3 sentences long.")
# результат записан в отдельный файл
with open(output_name, 'w') as output_file:
for sent3 in range(len(res)):
#output_file.write(str(res[sent3][2]) + ',') # для выдачи № выбранных предложений
output_file.write(res[sent3][0] + '\n')
print(res[sent3][0])
'''
output_file.write(str(len(text.sentences()))) # для выдачи № выбранных предложений
print(len(res)) # для подсчета длины реферата
for i, x in enumerate(text.sentences()): # для получения разбиения на предложения
output_file.write(str(i+1) + '. ' + x + '\n')'''
if __name__ == '__main__':
Sum = SUMMARIZER()
Sum.summarize('text.txt', 'output.txt', indicators = True, adj = True, percentage = 60) | Svetych/aspect_ats_system | Auto_text_summ.py | Auto_text_summ.py | py | 12,128 | python | ru | code | 1 | github-code | 1 | [
{
"api_name": "itertools.chain.from_iterable",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "itertools.chain",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "itertools.chain.from_iterable",
"line_number": 21,
"usage_type": "call"
},
{
"... |
42152249060 | """
Define commonly used dialogs.
"""
import ceGUI
import cx_Exceptions
import cx_Logging
import os
import wx
__all__ = [ "AboutDialog", "PreferencesDialog", "SelectionListDialog",
"SelectionCheckListDialog", "SelectionTreeDialog" ]
class AboutDialog(ceGUI.Dialog):
baseSettingsName = "w_About"
createCancelButton = False
saveSize = False
def __init__(self, parent):
super(AboutDialog, self).__init__(parent, wx.CAPTION)
def OnCreate(self):
app = wx.GetApp()
self.SetTitle("About %s" % app.description)
self.panel = ceGUI.Panel(self, wx.SUNKEN_BORDER)
aboutText = app.description
if app.version is not None:
aboutText = "%s\n\nVersion %s" % (aboutText, app.version)
if app.copyrightOwner is not None:
if app.copyrightStartYear == app.copyrightEndYear:
copyrightYears = app.copyrightEndYear
else:
copyrightYears = "%s-%s" % \
(app.copyrightStartYear, app.copyrightEndYear)
aboutText = "%s\n\nCopyright %s\n%s" % \
(aboutText, copyrightYears, app.copyrightOwner)
self.aboutLabel = wx.StaticText(self.panel, -1, aboutText,
style = wx.ALIGN_CENTRE)
def OnLayout(self):
panelSizer = wx.BoxSizer(wx.VERTICAL)
panelSizer.Add(self.aboutLabel, flag = wx.ALL | wx.EXPAND, border = 5)
self.panel.SetSizer(panelSizer)
topSizer = wx.BoxSizer(wx.VERTICAL)
topSizer.Add(self.panel, flag = wx.ALL | wx.EXPAND, border = 5)
topSizer.Add(self.okButton,
flag = wx.ALL | wx.ALIGN_CENTER_HORIZONTAL | \
wx.ALIGN_CENTER_VERTICAL, border = 5)
return topSizer
class LoggingPreferencesPane(ceGUI.Panel):
loggingLevels = "DEBUG INFO WARNING ERROR CRITICAL".split()
def OnCreate(self):
self.fileNameLabel = wx.StaticText(self, -1, "File Name:")
self.fileNameField = wx.TextCtrl(self, -1)
self.selectFileNameButton = wx.Button(self, -1, "...",
size = (25, -1))
self.BindEvent(self.selectFileNameButton, wx.EVT_BUTTON,
self.OnSelectFileName)
self.levelLabel = wx.StaticText(self, -1, "Level:")
self.levelField = wx.Choice(self, -1, choices = self.loggingLevels)
def OnLayout(self):
fileNameSizer = wx.BoxSizer(wx.HORIZONTAL)
fileNameSizer.Add(self.fileNameField, proportion = 1, flag = wx.RIGHT,
border = 5)
fileNameSizer.Add(self.selectFileNameButton)
fieldLayout = self.CreateFieldLayout(self.fileNameLabel, fileNameSizer,
self.levelLabel, self.levelField)
topSizer = wx.BoxSizer(wx.VERTICAL)
topSizer.Add(fieldLayout, flag = wx.ALL | wx.EXPAND, border = 5,
proportion = 1)
return topSizer
def OnSelectFileName(self, event):
currentFileName = self.fileNameField.GetValue()
dir, fileName = os.path.split(currentFileName)
dialog = wx.FileDialog(self, "Select log file", wildcard = "*.log",
defaultDir = dir, defaultFile = fileName,
style = wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT)
if dialog.ShowModal() == wx.ID_OK:
fileName = dialog.GetPath()
if not fileName.lower().endswith(".log"):
fileName += ".log"
self.fileNameField.SetValue(fileName)
def RestoreSettings(self):
defaultFileName = wx.GetApp().GetDefaultLoggingFileName()
fileName = self.settings.Read("LogFileName", defaultFileName)
self.fileNameField.SetValue(fileName)
levelName = self.settings.Read("LogLevel", "ERROR")
self.levelField.SetStringSelection(levelName)
def SaveSettings(self):
fileName = self.fileNameField.GetValue()
if not fileName:
self.fileNameField.SetFocus()
raise LoggingFileNameNotSpecified()
levelName = self.levelField.GetStringSelection()
level = getattr(cx_Logging, levelName)
if fileName != cx_Logging.GetLoggingFileName():
cx_Logging.StartLogging(fileName, level)
elif level != cx_Logging.GetLoggingLevel():
cx_Logging.SetLoggingLevel(level)
self.settings.Write("LogFileName", fileName)
self.settings.Write("LogLevel", levelName)
class PreferencesDialog(ceGUI.StandardDialog):
baseSettingsName = "w_Preferences"
defaultSize = (450, 157)
def __init__(self, parent):
wx.Dialog.__init__(self, parent, -1, "Edit Preferences",
style = wx.CAPTION | wx.RESIZE_BORDER)
self._Initialize()
def OnCreate(self):
self.notebook = ceGUI.Notebook(self, -1)
self.OnCreateNotebook()
pane = LoggingPreferencesPane(self.notebook)
self.notebook.AddPage(pane, "Logging")
self.notebook.RestoreSettings()
def OnCreateNotebook(self):
pass
def OnOk(self):
self.notebook.SaveSettings()
self.settings.Flush()
def OnLayout(self):
topSizer = wx.BoxSizer(wx.VERTICAL)
topSizer.Add(self.notebook, proportion = 1,
flag = wx.EXPAND | wx.RIGHT | wx.LEFT | wx.TOP, border = 5)
return topSizer
def RestoreSettings(self):
pass
def SaveSettings(self):
pass
class SelectionListDialog(ceGUI.StandardDialog):
listClassName = "List"
selectFirstItem = True
def _GetList(self):
cls = self._GetClass(self.listClassName)
return cls(self, wx.SUNKEN_BORDER)
def GetSelectedItem(self):
return self.selectionList.GetSelectedItem()
def GetSelectedItems(self):
return self.selectionList.GetSelectedItems()
def OnCreate(self):
self.okButton.Enable(False)
self.selectionList = self._GetList()
self.selectionList.SetFocus()
self.BindEvent(self.selectionList, wx.EVT_LIST_ITEM_SELECTED,
self.OnItemSelected)
self.BindEvent(self.selectionList, wx.EVT_LIST_ITEM_DESELECTED,
self.OnItemDeselected)
self.BindEvent(self.selectionList, wx.EVT_LIST_ITEM_ACTIVATED,
self.OnItemActivated)
self.BindEvent(self.selectionList, wx.EVT_CHAR,
self.OnCharPressed)
def OnCharPressed(self, event):
key = event.GetKeyCode()
if key == 1 and not self.selectionList.singleSelection: # Ctrl-A
self.selectionList.SelectAll()
event.Skip()
def OnItemActivated(self, event):
if self.GetSelectedItems():
self._OnOk(event)
self.EndModal(wx.ID_OK)
def OnItemDeselected(self, event):
if self.selectionList.GetSelectedItemCount() == 0:
self.okButton.Enable(False)
def OnItemSelected(self, event):
self.okButton.Enable()
def OnLayout(self):
topSizer = wx.BoxSizer(wx.VERTICAL)
topSizer.Add(self.selectionList, proportion = 1, flag = wx.EXPAND)
return topSizer
def RestoreSettings(self):
self.selectionList.RestoreColumnWidths()
def Retrieve(self, *args):
self.selectionList.Retrieve(*args)
if self.selectFirstItem and len(self.selectionList.rowHandles) > 0:
handle = self.selectionList.rowHandles[0]
item = self.selectionList.dataSet.rows[handle]
self.selectionList.SelectItems([item])
def SaveSettings(self):
self.selectionList.SaveColumnWidths()
def SelectItems(self, items):
self.selectionList.SelectItems(items)
class SelectionCheckListDialog(ceGUI.StandardDialog):
listClassName = "List"
checkedAttrName = "checked"
def _GetList(self):
cls = self._GetClass(self.listClassName)
return cls(self, wx.SUNKEN_BORDER)
def CheckAllItems(self):
self.list.CheckAllItems()
def GetCheckedItems(self):
return self.list.GetCheckedItems()
def OnCreate(self):
self.list = self._GetList()
def OnLayout(self):
topSizer = wx.BoxSizer(wx.VERTICAL)
topSizer.Add(self.list, proportion = 1, flag = wx.EXPAND)
return topSizer
def RestoreSettings(self):
self.list.RestoreColumnWidths()
def Retrieve(self, *args):
self.list.Retrieve(*args)
def SaveSettings(self):
self.list.SaveColumnWidths()
def UncheckAllItems(self):
self.list.UncheckAllItems()
class SelectionTreeDialog(ceGUI.StandardDialog):
treeClassName = "Tree"
def _GetTree(self):
cls = self._GetClass(self.treeClassName)
return cls(self, -1, style = wx.TR_HAS_BUTTONS | wx.TR_HIDE_ROOT | \
wx.TR_LINES_AT_ROOT)
def _OnItemActivated(self, event):
itemId = event.GetItem()
item = self.selectionTree.GetItemData(itemId)
if self.CanSelectItem(item.data):
self.EndModal(wx.ID_OK)
def _OnSelectionChanged(self, event):
if self.okButton and self.selectionTree:
itemId = event.GetItem()
item = self.selectionTree.GetItemData(itemId)
self.okButton.Enable(self.CanSelectItem(item.data))
def CanSelectItem(self, item):
return True
def GetSelectedItem(self):
return self.selectionTree.GetSelectedItem()
def GetSelectedItemParents(self):
item = self.selectionTree.GetSelectedItem()
return self.selectionTree.GetItemParents(item)
def OnCreate(self):
self.okButton.Enable(False)
self.selectionTree = self._GetTree()
self.BindEvent(self.selectionTree, wx.EVT_TREE_SEL_CHANGED,
self._OnSelectionChanged)
self.BindEvent(self.selectionTree, wx.EVT_TREE_ITEM_ACTIVATED,
self._OnItemActivated)
def OnLayout(self):
topSizer = wx.BoxSizer(wx.VERTICAL)
topSizer.Add(self.selectionTree, proportion = 1, flag = wx.EXPAND)
return topSizer
def RestoreSettings(self):
pass
def SaveSettings(self):
pass
class LoggingFileNameNotSpecified(cx_Exceptions.BaseException):
message = "Logging file name must be specified."
| anthony-tuininga/cx_PyGenLib | ceGUI/CommonDialogs.py | CommonDialogs.py | py | 10,197 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "ceGUI.Dialog",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "wx.CAPTION",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "wx.GetApp",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "ceGUI.Panel",
"line_n... |
3237033272 | # -*- coding: utf-8 -*-
# Traffic Data Process Automatically
# Xin Meng
# 2015.11.02
# This script is used for process the traffic txt file.
# 1. Set the configure
# 2. Run this script and get the result.
import csv
import os
import logging
import re
import utils.network
__author__ = 'xin'
# configuration variable
# OutputCSVDir = ""
# ReportDir = ""
# NormalDBDir = ""
# InputNormalDir = ""
# InputInfectDir = ""
# 这里5个重要的参数:
# 输出的csv目录
output_csv_dir = ""
# 输出的报告目录
report_dir = ""
# normal db目录
normal_db_dir = ""
# 输入的normal文件目录
input_normal_dir = ""
# 输入的infect文件目录
input_infect_dir = ""
normal_db_file = "normal_db.txt"
public_ip_range_file = "public_ip_range.txt"
output_extension = ".csv"
# remember if you want to change the global variance in function, you need use global xxx
normal_db_size = 0
# Current Path (the Script)
path = os.path.split(os.path.realpath(__file__))[0]
# The Scan Information
num_normal_input_file = 0
num_infect_input_file = 0
normal_input_file_list = []
infect_input_file_list = []
# Globle statistic information
num_normal_packet = 0
num_packet_label_infect = 0
num_packet_label_normal = 0
# remember if you want to change the global variance in function, you need use global xxx
normal_db_size = 0
# Current Path (the Script)
path = os.path.split(os.path.realpath(__file__))[0]
# The Scan Information
num_normal_input_file = 0
num_infect_input_file = 0
normal_input_file_list = []
infect_input_file_list = []
# Global statistic information
num_normal_packet = 0
num_packet_label_infect = 0
num_packet_label_normal = 0
def match_normal_db(b_infect_packet, public_ip_range, str_normal_db):
# 打开一个DB文件,将所有DB数据读入到String中,
flag_public_range = 0
with open(normal_db_dir + normal_db_file, 'a+') as normal_db:
for normal_db_line in normal_db:
str_normal_db += normal_db_line
for public_ip_range_line in public_ip_range:
# compare the b_infect_packet ip source or ip destination with the IP range whether in the range
# Because there is one of the two IP is the mobile IP address, so we just find one of [0] [1]
# in the range, we decide the packet is normal
if 1 == in_ip_range(public_ip_range_line, b_infect_packet[0]) or \
1 == in_ip_range(public_ip_range_line, b_infect_packet[1]):
flag_public_range = 1
break
else:
flag_public_range = 0
if 1 == flag_public_range:
# the infect packet ip in the public ip range
return 1
else:
# 直接用正则表达式 将b_infect_packet 拼接成一个字符串, 在 str_normal_db 进行匹配.
infect_ip_source_str = str(long(b_infect_packet[0]))
infect_ip_des_str = str(long(b_infect_packet[1]))
# infect_ip_source_str_array = infect_ip_source_str.split('.')
# infect_ip_des_str_array = infect_ip_des_str.split('.')
str_pattern = ".*" + infect_ip_source_str + " " + infect_ip_des_str
pattern = re.compile(str_pattern)
match = pattern.match(str_normal_db)
if match:
return 1
else:
return 0
####################################################
# FUNCTION: Load Configuration
# RETURN: InputNormalDir
# InputInfectDir
# OutputCSVDir
# ReportDir
# NormalDBDir
####################################################
def load_conf():
# 主要是获取
global output_csv_dir
global report_dir
global normal_db_dir
global input_normal_dir
global input_infect_dir
# from __future__ import with_statement
# DataProcess.cfg
import ConfigParser
config = ConfigParser.ConfigParser()
# G:\2015-11-01-Research-experiment\0_MobileBotnetAnalysisLab\TrafficDataProcessAuto_pc.cfg
with open(path + "\TrafficDataProcessAuto_pc.cfg", "r") as cfgfile:
# with open(path+"\DataProcess.cfg", "r") as cfgfile:
config.readfp(cfgfile)
input_dir = config.get("DIR", "InputDir")
normal_dir = config.get("DIR", "NormalDir")
infect_dir = config.get("DIR", "InfectDir")
output_csv_dir = os.path.normcase(config.get("DIR", "OutputDir"))
report_dir = os.path.normcase(config.get("DIR", "ReportDir"))
normal_db_dir = os.path.normcase(config.get("DIR", "NormalDB"))
input_normal_dir = os.path.normcase(input_dir + normal_dir)
input_infect_dir = os.path.normcase(input_dir + infect_dir)
# return (InputNormalDir,InputInfectDir,OutputCSVDir,ReportDir,NormalDBDir)
####################################################
# FUNCTION: Scan the Input Data directory
####################################################
def scan_input_data():
global num_normal_input_file
global num_infect_input_file
global normal_input_file_list
global infect_input_file_list
# Load Configuration File to get parameters
load_conf()
# List the normal and infect files
logger.info(input_normal_dir)
logger.info(input_infect_dir)
normal_input_file_list = os.listdir(input_normal_dir)
infect_input_file_list = os.listdir(input_infect_dir)
num_normal_input_file = len(normal_input_file_list)
num_infect_input_file = len(infect_input_file_list)
logger.info(num_infect_input_file)
logger.info(num_normal_input_file)
# Show the result of scanning
show_scan_result(1)
####################################################
# FUNCTION1: Show Scanning Result
# Input: The level of show 0-Infect 1-Normal 2-Normal&Infect
####################################################
def show_scan_result(level=2):
print("--------------------Scanning Result--------------------")
if level > 0:
print("The number of Normal file:" + str(num_normal_input_file))
print("Files:")
for i in range(0, num_normal_input_file):
print("Input " + str(i + 1) + " to process: " + normal_input_file_list[i])
if level < 1 or level > 1:
print("The number of Infect file:" + str(num_infect_input_file))
print("Files:")
for i in range(0, num_infect_input_file):
print("Input " + str(i + 1) + " to process: " + infect_input_file_list[i])
print("--------------------Scanning End--------------------")
####################################################
# FUNCTION: ip2int
# INPUT: IP
# RETURN: int of the IP
####################################################
def ip2int(ip):
import struct
import socket
return struct.unpack("!I", socket.inet_aton(ip))[0]
####################################################
# FUNCTION: int2ip
# INPUT: int of IP
# RETURN: IP
####################################################
def int2ip(i):
import socket
import struct
return socket.inet_ntoa(struct.pack("!I", i))
####################################################
########## FUNCTION: change the IP/subset to IP range
########## Input: "216.239.32.0/19"
########## Return: IP range IP cmin cmax 216.239.32.0 216.239.63.255
####################################################
def subnet_mask_to_ip_range(iplist):
data = iplist.split('/')
ip = data[0]
ti = int(data[1])
d = int(ti / 8)
c = 256 / (2 ** (ti % 8))
ip_items = ip.split('.')
if len(ip_items[d:]) == 1:
if ti % 8 == 0:
cmin = '%s.%s' % ('.'.join(ip_items[:d]), '0')
cmax = '%s.%s' % ('.'.join(ip_items[:d]), '255')
else:
for i in range(2 ** (ti % 8)):
mymax = (i + 1) * c - 1
mymin = i * c
data = int(''.join(ip_items[d:]))
if data < mymax and data >= mymin:
cmin = '%s.%s' % ('.'.join(ip_items[:d]), mymin)
cmax = '%s.%s' % ('.'.join(ip_items[:d]), mymax)
else:
if ti % 8 == 0:
cmin = '%s.%s.%s' % ('.'.join(ip_items[:d]), '0', ('0.' * (len(ip_items) - d - 1))[:-1])
cmax = '%s.%s.%s' % ('.'.join(ip_items[:d]), '255', ('255.' * (len(ip_items) - d - 1))[:-1])
else:
for i in range(2 ** (ti % 8)):
mymax = (i + 1) * c - 1
mymin = i * c
data = int(''.join(ip_items[d]))
if data < mymax and data >= mymin:
cmin = '%s.%s.%s' % ('.'.join(ip_items[:d]), mymin, ('0.' * (len(ip_items) - d - 1))[:-1])
cmax = '%s.%s.%s' % ('.'.join(ip_items[:d]), mymax, ('255.' * (len(ip_items) - d - 1))[:-1])
# print cmin, cmax
return (cmin, cmax)
####################################################
########## FUNCTION: decision for whether the IP in a IP range
########## Input: IP range string "216.239.32.0/19" (string) , IP (int) to decide
########## Return: 1 in the range
########## 0 not in the range
####################################################
def in_ip_range(subnet_mask_ip, ip):
ip_range_lower, ip_range_upper = subnet_mask_to_ip_range(subnet_mask_ip)
int_ip_lower = ip2int(ip_range_lower)
int_ip_upper = ip2int(ip_range_upper)
# print(int_ip_lower)
# print(int_ip_upper)
# print(ip)
# print(ip_range_lower)
# print(ip_range_upper)
# print(int2ip(ip))
if int_ip_upper >= ip >= int_ip_lower:
return 1
else:
return 0
####################################################
# FUNCTION: compare between normal packet with the normal database
# Input: a: packet from traffic b: packet from normal database
####################################################
def compare(a, b):
if long(a[0]) == long(b[0]) and long(a[1]) == long(b[1]) and long(a[2]) == long(b[2]):
return 1
else:
return 0
####################################################
# FUNCTION: compare packet in infect traffic file with the packet in normal database,
# find whether the packet is normal or infect
# Input: a: packet from infect traffic file b: packet from normal database
# Return: 1 normal packet match with the packet in database
# 0 infect packet not match the database
####################################################
def decision_normal_infect(a_normal_packet, b_infect_packet):
# 0 source IP
# 1 destination IP
# we use the fuzzy matching to decide whether the packet in the database or not
# example: 192.168.123.***
# Step1: compare with google public IP address
# Step2: wildcard of the last 3 number of the IP address
# Step 1
# Load the public IP range address from database dir : public_ip_range.txt
flag_public_range = 0
with open(normal_db_dir + public_ip_range_file, 'a+') as public_ip_range:
for public_ip_range_line in public_ip_range:
# compare the b_infect_packet ip source or ip destination with the IP range whether in the range
# Because there is one of the two IP is the mobile IP address, so we just find one of [0] [1]
# in the range, we decide the packet is normal
if 1 == in_ip_range(public_ip_range_line, b_infect_packet[0]) or \
1 == in_ip_range(public_ip_range_line, b_infect_packet[1]):
flag_public_range = 1
break
else:
flag_public_range = 0
if 1 == flag_public_range:
# the infect packet ip in the public ip range
return 1
else:
# Step 2
# compare the ip and protocol with the normal database packet, we use fuzzy matching,
infect_ip_source_str = int2ip(b_infect_packet[0])
infect_ip_des_str = int2ip(b_infect_packet[1])
normal_ip_source_str = int2ip(int(a_normal_packet[0]))
normal_ip_des_str = int2ip(int(a_normal_packet[1]))
infect_ip_source_str_array = infect_ip_source_str.split('.')
infect_ip_des_str_array = infect_ip_des_str.split('.')
normal_ip_source_str_array = normal_ip_source_str.split('.')
normal_ip_des_str_array = normal_ip_des_str.split('.')
# logger.debug(infect_ip_source_str)
# logger.debug(infect_ip_des_str)
# logger.debug(normal_ip_source_str)
# logger.debug(normal_ip_des_str)
#
#
# logger.debug(infect_ip_source_str_array)
# logger.debug(infect_ip_des_str_array)
# logger.debug(normal_ip_source_str_array)
# logger.debug(normal_ip_des_str_array)
# set the level of fuzzy
# level = 3 xxx.xxx.xxx only match the first 3 ip number
level = 3
index = 0
match_flag = 1
while index < level:
if infect_ip_source_str_array[index] == normal_ip_source_str_array[index] and \
infect_ip_des_str_array[index] == normal_ip_des_str_array[index]:
index += 1
else:
match_flag = 0
break
if match_flag == 1:
return 1
else:
return 0
####################################################
# FUNCTION: Process Normal File
# Input: The number of the files in the normal list
####################################################
def normal_process(normal_file_no):
global normal_db_size
# Stream: initial 2 dimensions array to store the packet base on the stream index
tcp_stream = [[[0 for col in range(0)] for row in range(0)] for high in range(50000)]
udp_stream = [[[0 for col in range(0)] for row in range(0)] for high in range(20000)]
num_udp_stream = 0
num_tcp_stream = 0
# 获取当前需要处理的文件
process_normal_file = input_normal_dir + normal_input_file_list[normal_file_no - 1]
print (process_normal_file)
# Load Normal Packet database, if the file not exist, create it. And append the file every process
# normal_db = open(normal_db_dir + normal_db_file, 'a+')
normal_db_temp = [[0 for col in range(10)] for row in range(0)]
with open(normal_db_dir + normal_db_file, 'a+') as normal_db:
for normal_db_line in normal_db:
# normal_db_array = normal_db_line.split()
normal_db_line_array = normal_db_line.split()
normal_db_temp += [normal_db_line_array]
normal_db_size += 1
# Open normal csv file and initial the head of the table
# print (output_csv_dir + normal_input_file_list[normal_file_no - 1] + output_extension)
# 增加三个属性
with open(output_csv_dir + normal_input_file_list[normal_file_no - 1] + output_extension, 'wSb+') as csv_file:
csv_writer = csv.writer(csv_file, dialect='excel')
csv_writer.writerow(
['IP source', 'IP des', 'Protocol', 'Frame duration', 'UDP size', 'TCP size', 'Argument Count', 'TimeStamp',
'T1', 'T2', 'T3', 'Lable'])
# Stream: The output file will store the stream information
# process every line in the normal file
# -e ip.src -e ip.dst -e ip.proto
# -e frame.time_delta
# -e udp.length -e udp.stream
# -e tcp.len -e tcp.stream
# -e http.request.uri
# The structure of every line: 9 if
# IP source 0 , IP destination 1, IP protocol 2,
# Frame duration 3,
# UDP size 4, UDP stream index 5,
# TCP size 6, TCP stream index 7,
# URL 8
# tshark -r %%f -o tcp.calculate_timestamps:true -n -T fields -e ip.src -e ip.dst -e ip.proto -e frame.time_delta
# -e udp.length -e udp.stream -e tcp.len -e tcp.stream -e http.request.uri -e frame.time_epoch >%%f%outputFormat%
# UPD 7
# |--ip.src--|--ip.dst--|--ip.proto--|--frame.time_delta--|--udp.length--|--udp.stream--|--frame.time_epoch--|
# TCP 8
# |--ip.src--|--ip.dst--|--ip.proto--|--frame.time_delta--|--tcp.len--|--tcp.stream--|--http.request.uri--|--frame.time_epoch--|
for txt_line in open(process_normal_file):
txt_array = txt_line.split()
# 重新构造txt_array
# 如果长度小于7,直接过滤掉
if 7 > len(txt_array):
continue
# 首先处理第1,2字段,ip源地址
if len(txt_array[0]) <= 15:
# IP地址长度大于255.255.255.255,则为IPv6
ip_source = ip2int(txt_array[0])
txt_array[0] = ip_source
ip_des = ip2int(txt_array[1])
txt_array[1] = ip_des
# 6 UPD 将6,7,8字段配置为0
# 17 TCP 将4,5字段配置0
if txt_array[2] != '17':
# TCP 6 UDP 17
# if not UDP, SET udp size AS 0
txt_array.insert(4, '0')
txt_array.insert(5, '0')
if txt_array[2] != '6':
txt_array.insert(6, '0')
txt_array.insert(7, '0')
txt_array.insert(8, '0')
else:
# 如果是TCP并且包含有HTTP URI的话,字段为10,则计算参数个数
if len(txt_array) == 10:
# include the HTTP request uri
arg_num = txt_array.pop(8).count('=')
txt_array.insert(8, str(arg_num))
else:
arg_num = 0
txt_array.insert(8, str(arg_num))
# 9 --> timestamp
# txt_array.insert(9, 'normal')
# 时间戳保留在字段9
# 这里将lable插入到最后一个位置,这个位置不确定,有可能有带*的行,则多一个元素,所以这里,
# txt_array.insert(10, 'normal')
txt_array.append('normal')
# Decided whether the packet has already existed in the normal_db
status = 0
# 遍历临时的db列表,比较该package的IP信息是否存在
for i in normal_db_temp:
if 1 == compare(i, txt_array):
status = 1
break
# 如果不存在,则加入到 normal db中
if 0 == status:
normal_db_temp += [txt_array]
normal_db_size += 1
# append the data into to database file
with open(normal_db_dir + normal_db_file, 'a+') as normal_db:
# normal_db_array = normal_db_line.split()
n = 0
s = ""
while n < len(txt_array):
s += str(txt_array[n])
s += " "
n += 1
s += "\n"
# txt_array_convert = ' '.join(str(txt_array))
normal_db.writelines(s)
# 将该txt_array加入到 Stream中,最后将Stream写入CSV文件
if '17' == txt_array[2]:
# UDP process
udp_stream[int(txt_array[5])].append(txt_array)
elif '6' == txt_array[2]:
# TCP process
# logger.info("index of the stream:")
# logger.info(txt_array[7])
tcp_stream[int(txt_array[7])].append(txt_array)
# 这是已经获得了当前文件的所有的 package,并且根据每个 package 的stream index 信息,将package存放到对应的stream二维数组中了。
# 下面组合stream
# 记录之前的Stream开始和结束时间。
pre_start_timestamp = 0
pre_end_timestamp = 0
cur_start_timestamp = 0
cur_end_timestamp = 0
for packet_list in udp_stream:
# 组合udp_stream中的每个stream的packet,将信息存储在CSV文件中.
if len(packet_list) != 0:
# 8['IP source', 'IP des', 'Protocol', 'Frame duration', 'UDP size', 'TCP size', 'Argument Count', 'Lable'])
# 现在开始拼接udp_stream里面的每个packet_list到一个stream
stream_temp = [0 for col in range(12)]
stream_temp[0] = packet_list[0][0]
stream_temp[1] = packet_list[0][1]
stream_temp[2] = packet_list[0][2]
for packet in packet_list:
stream_temp[3] += float(packet[3])
stream_temp[4] += int(packet[4])
# 如果packet[9]为 *, 则取下一个做时间戳字段
if packet[9] == "*":
logger.info("Encounter some line with *")
packet.pop(9)
# 将stream中package的最早时间戳和最晚时间戳记录下来。
if cur_start_timestamp == 0 and cur_end_timestamp == 0:
cur_start_timestamp = float(packet[9])
cur_end_timestamp = float(packet[9])
if float(packet[9]) < cur_start_timestamp:
cur_start_timestamp = float(packet[9])
if float(packet[9]) > cur_end_timestamp:
cur_end_timestamp = float(packet[9])
stream_temp[5] = 0
stream_temp[6] = 0
# 将第一个package的时间戳,作为本stream的时间戳
stream_temp[7] = packet_list[0][9]
# 这里我们需要处理一下时间戳,加入时间戳属性。
# 这里我们需要加入三个feature
# 2. The experiment for the more features (time) of stream with ML
# more features:
# a) |Ts - Te| time between start and end of every stream
# b) |Ts2 - Ts1| time between start time of adjacent stream
# c) |Te1 - Ts2| time between end and start time of adjacent stream
stream_temp[8] = abs(cur_end_timestamp - cur_start_timestamp)
stream_temp[9] = abs(cur_start_timestamp - pre_start_timestamp)
stream_temp[10] = abs(cur_start_timestamp - pre_end_timestamp)
# 将第一个的label,作为本stream的label
stream_temp[11] = packet_list[0][10]
pre_start_timestamp = cur_start_timestamp
pre_end_timestamp = cur_end_timestamp
cur_start_timestamp = 0
cur_end_timestamp = 0
# Write the CSV file for the normal files.
with open(output_csv_dir + normal_input_file_list[normal_file_no - 1] + output_extension, 'ab+') \
as csv_file:
csv_writer = csv.writer(csv_file, dialect='excel')
csv_writer.writerow(stream_temp)
num_udp_stream += 1
pre_start_timestamp = 0
pre_end_timestamp = 0
cur_start_timestamp = 0
cur_end_timestamp = 0
for packet_list in tcp_stream:
# 组合udp_stream中的每个stream的packet,将信息存储在CSV文件中.
if len(packet_list) != 0:
# 8['IP source', 'IP des', 'Protocol', 'Frame duration', 'UDP size', 'TCP size', 'Argument Count', 'Lable'])
stream_temp = [0 for col in range(12)]
stream_temp[0] = packet_list[0][0]
stream_temp[1] = packet_list[0][1]
stream_temp[2] = packet_list[0][2]
for packet in packet_list:
stream_temp[3] += float(packet[3])
stream_temp[5] += int(packet[6])
stream_temp[6] += int(packet[8])
# 将stream中package的最早时间戳和最晚时间戳记录下来。
if cur_start_timestamp == 0 and cur_end_timestamp == 0:
cur_start_timestamp = float(packet[9])
cur_end_timestamp = float(packet[9])
if float(packet[9]) < cur_start_timestamp:
cur_start_timestamp = float(packet[9])
if float(packet[9]) > cur_end_timestamp:
cur_end_timestamp = float(packet[9])
stream_temp[4] = 0
stream_temp[7] = packet_list[0][9]
# 这里我们需要处理一下时间戳,加入时间戳属性。
# 这里我们需要加入三个feature
# 2. The experiment for the more features (time) of stream with ML
# more features:
# a) |Ts - Te| time between start and end of every stream
# b) |Ts2 - Ts1| time between start time of adjacent stream
# c) |Te1 - Ts2| time between end and start time of adjacent stream
stream_temp[8] = abs(cur_end_timestamp - cur_start_timestamp)
stream_temp[9] = abs(cur_start_timestamp - pre_start_timestamp)
stream_temp[10] = abs(cur_start_timestamp - pre_end_timestamp)
# 将第一个的label,作为本stream的label
stream_temp[11] = packet_list[0][10]
pre_start_timestamp = cur_start_timestamp
pre_end_timestamp = cur_end_timestamp
cur_start_timestamp = 0
cur_end_timestamp = 0
# Write the CSV file for the normal files.
with open(output_csv_dir + normal_input_file_list[normal_file_no - 1] + output_extension, 'ab+') \
as csv_file:
csv_writer = csv.writer(csv_file, dialect='excel')
csv_writer.writerow(stream_temp)
num_tcp_stream += 1
####################################################
# FUNCTION: Process Normal File
# Input: The number of the files in the normal list
####################################################
def infect_process(infect_file_no):
global num_packet_label_infect
global num_packet_label_normal
logger.info("Start to process the infect file")
tcp_stream = [[[0 for col in range(0)] for row in range(0)] for high in range(50000)]
udp_stream = [[[0 for col in range(0)] for row in range(0)] for high in range(20000)]
num_udp_stream = 0
num_tcp_stream = 0
process_infect_file = input_infect_dir + infect_input_file_list[infect_file_no - 1]
print (process_infect_file)
# Load Normal Packet database, if the file not exist, create it. And append the file every process
# normal_db = open(normal_db_dir + normal_db_file, 'a+')
public_ip_range_temp = []
with open(normal_db_dir + public_ip_range_file, 'a+') as public_ip_range:
for public_ip_range_line in public_ip_range:
public_ip_range_temp += [public_ip_range_line]
str_normal_db = ""
normal_db_temp = [[0 for col in range(10)] for row in range(0)]
with open(normal_db_dir + normal_db_file, 'a+') as normal_db:
for normal_db_line in normal_db:
# normal_db_array = normal_db_line.split()
normal_db_line_array = normal_db_line.split()
normal_db_temp += [normal_db_line_array]
str_normal_db += " "
normal_db_line = normal_db_line.strip('\n')
str_normal_db += normal_db_line
# Open normal csv file and initial the head of the table
# print (output_csv_dir + normal_input_file_list[normal_file_no - 1] + output_extension)
with open(output_csv_dir + infect_input_file_list[infect_file_no - 1] + output_extension, 'wSb+') as csv_file:
csv_writer = csv.writer(csv_file, dialect='excel')
csv_writer.writerow(
['IP source', 'IP des', 'Protocol', 'Frame duration', 'UDP size', 'TCP size', 'Argument Count', 'TimeStamp',
'T1', 'T2', 'T3', 'Lable'])
# process every line in the normal
num_txt_line = 0
for txt_line in open(process_infect_file):
logger.info("Process the txt line: " + str(num_txt_line))
num_txt_line += 1
txt_array = txt_line.split()
if 7 > len(txt_array):
continue
if len(txt_array[0]) <= 15:
# IP地址长度大于255.255.255.255,则为IPv6
IP_source = ip2int(txt_array[0])
txt_array[0] = IP_source
IP_des = ip2int(txt_array[1])
txt_array[1] = IP_des
if txt_array[2] != '17':
# TCP 6 UDP 17
# if not UDP, SET udp size AS 0
txt_array.insert(4, '0')
txt_array.insert(5, '0')
if txt_array[2] != '6':
txt_array.insert(6, '0')
txt_array.insert(7, '0')
txt_array.insert(8, '0')
else:
if len(txt_array) == 10:
# include the HTTP request uri
arg_num = txt_array.pop(8).count('=')
txt_array.insert(8, str(arg_num))
else:
arg_num = 0
txt_array.insert(8, str(arg_num))
# Decisied whether the packet is Normal or Infect
# We use some algorithms to compare with the normal database, and
# status = 1 : normal packet match the database
# status = 0 : infect packet not match the database
# 将这块匹配工作进行重新设计, 使用正则表达式进行匹配, 加快速度.
status = 0
# 判断当前txt_array是否为infect:
status = match_normal_db(txt_array, public_ip_range_temp, str_normal_db)
if status:
txt_array.append('normal')
num_packet_label_normal += 1
else:
txt_array.append('infect')
num_packet_label_infect += 1
# for i in normal_db_temp:
# if 1 == decision_normal_infect(i, txt_array):
# status = 1
# txt_array.append('normal')
# # txt_array.insert(10, 'normal')
# num_packet_label_normal += 1
# break
# if 0 == status:
# txt_array.append('infect')
# # txt_array.insert(10, 'infect')
# num_packet_label_infect += 1
# 将该txt_array加入到 Stream中,最后将Stream写入CSV文件
if '17' == txt_array[2]:
# UDP process
udp_stream[int(txt_array[5])].append(txt_array)
# print(int(txt_array[5]))
elif '6' == txt_array[2]:
# TCP process
tcp_stream[int(txt_array[7])].append(txt_array)
# print(int(txt_array[7]))
logger.info("Start to write the csv file")
pre_start_timestamp = 0
pre_end_timestamp = 0
cur_start_timestamp = 0
cur_end_timestamp = 0
for packet_list in udp_stream:
# 组合udp_stream中的每个stream的packet,将信息存储在CSV文件中.
if len(packet_list) != 0:
# 8 ['IP source', 'IP des', 'Protocol', 'Frame duration', 'UDP size', 'TCP size', 'Argument Count', 'Lable'])
stream_temp = [0 for col in range(12)]
stream_temp[0] = packet_list[0][0]
stream_temp[1] = packet_list[0][1]
stream_temp[2] = packet_list[0][2]
for packet in packet_list:
stream_temp[3] += float(packet[3])
stream_temp[4] += int(packet[4])
# 如果packet[9]为 *, 则取下一个做时间戳字段
if packet[9] == "*":
logger.info("Encounter some line with *")
packet.pop(9)
# 将stream中package的最早时间戳和最晚时间戳记录下来。
# 如果packet[9]为 *, 则取下一个做时间戳字段
if packet[9] == "*":
logger.info("Encounter some line with *")
packet.pop(9)
if cur_start_timestamp == 0 and cur_end_timestamp == 0:
cur_start_timestamp = float(packet[9])
cur_end_timestamp = float(packet[9])
if float(packet[9]) < cur_start_timestamp:
cur_start_timestamp = float(packet[9])
if float(packet[9]) > cur_end_timestamp:
cur_end_timestamp = float(packet[9])
stream_temp[5] = 0
stream_temp[6] = 0
stream_temp[7] = packet_list[0][9]
# 这里我们需要处理一下时间戳,加入时间戳属性。
# 这里我们需要加入三个feature
# 2. The experiment for the more features (time) of stream with ML
# more features:
# a) |Ts - Te| time between start and end of every stream
# b) |Ts2 - Ts1| time between start time of adjacent stream
# c) |Te1 - Ts2| time between end and start time of adjacent stream
stream_temp[8] = abs(cur_end_timestamp - cur_start_timestamp)
stream_temp[9] = abs(cur_start_timestamp - pre_start_timestamp)
stream_temp[10] = abs(cur_start_timestamp - pre_end_timestamp)
# 将第一个的label,作为本stream的label
stream_temp[11] = packet_list[0][10]
pre_start_timestamp = cur_start_timestamp
pre_end_timestamp = cur_end_timestamp
cur_start_timestamp = 0
cur_end_timestamp = 0
# Write the CSV file for the normal files.
with open(output_csv_dir + infect_input_file_list[infect_file_no - 1] + output_extension, 'ab+') \
as csv_file:
csv_writer = csv.writer(csv_file, dialect='excel')
csv_writer.writerow(stream_temp)
num_udp_stream += 1
logger.info("Finish UDP CSV write, the sum of stream: " + str(num_udp_stream))
pre_start_timestamp = 0
pre_end_timestamp = 0
cur_start_timestamp = 0
cur_end_timestamp = 0
for packet_list in tcp_stream:
# 组合udp_stream中的每个stream的packet,将信息存储在CSV文件中.
if len(packet_list) != 0:
# 8 ['IP source', 'IP des', 'Protocol', 'Frame duration', 'UDP size', 'TCP size', 'Argument Count', 'Lable'])
stream_temp = [0 for col in range(12)]
stream_temp[0] = packet_list[0][0]
stream_temp[1] = packet_list[0][1]
stream_temp[2] = packet_list[0][2]
for packet in packet_list:
stream_temp[3] += float(packet[3])
stream_temp[5] += int(packet[6])
stream_temp[6] += int(packet[8])
# 将stream中package的最早时间戳和最晚时间戳记录下来。
if cur_start_timestamp == 0 and cur_end_timestamp == 0:
cur_start_timestamp = float(packet[9])
cur_end_timestamp = float(packet[9])
if float(packet[9]) < cur_start_timestamp:
cur_start_timestamp = float(packet[9])
if float(packet[9]) > cur_end_timestamp:
cur_end_timestamp = float(packet[9])
stream_temp[4] = 0
stream_temp[7] = packet_list[0][9]
# 这里我们需要处理一下时间戳,加入时间戳属性。
# 这里我们需要加入三个feature
# 2. The experiment for the more features (time) of stream with ML
# more features:
# a) |Ts - Te| time between start and end of every stream
# b) |Ts2 - Ts1| time between start time of adjacent stream
# c) |Te1 - Ts2| time between end and start time of adjacent stream
stream_temp[8] = abs(cur_end_timestamp - cur_start_timestamp)
stream_temp[9] = abs(cur_start_timestamp - pre_start_timestamp)
stream_temp[10] = abs(cur_start_timestamp - pre_end_timestamp)
# 将第一个的label,作为本stream的label
stream_temp[11] = packet_list[0][10]
pre_start_timestamp = cur_start_timestamp
pre_end_timestamp = cur_end_timestamp
cur_start_timestamp = 0
cur_end_timestamp = 0
# Write the CSV file for the normal files.
with open(output_csv_dir + infect_input_file_list[infect_file_no - 1] + output_extension, 'ab+') \
as csv_file:
csv_writer = csv.writer(csv_file, dialect='excel')
csv_writer.writerow(stream_temp)
num_tcp_stream += 1
logger.info("Finish TCP CSV write, the sum of stream: " + str(num_tcp_stream))
########
#
# Main Process
#
#######
####################################################
# Logger Configuration
####################################################
# 创建一个logger
logger = logging.getLogger('mainlogger')
logger.setLevel(logging.DEBUG)
# 创建一个handler,用于写入日志文件
fh = logging.FileHandler(path + '/mainlog.log')
fh.setLevel(logging.DEBUG)
# 再创建一个handler,用于输出到控制台
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
# 定义handler的输出格式
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# 给logger添加handler
logger.addHandler(fh)
logger.addHandler(ch)
# 记录一条日志
logger.info('Test mainlog')
# Set Level of the logger,
# NOTSET < DEBUG < INFO < WARNING < ERROR < CRITICAL
# logger.setLevel(logging.WARNING) #Show Debug Information
# logger.setLevel(logging.INFO) #Show Debug Information
logger.setLevel(logging.DEBUG)
# logger.setLevel(logging.NOTSET) #Not show Any Information
# 加载参数
load_conf()
# 处理 normal 目录下文件,生成csv文件并创建 normal db
scan_input_data()
i = 1
for i in range(1, num_normal_input_file + 1):
logger.info("The current process for normal:")
logger.info(i)
normal_process(i)
i = 1
for i in range(1, num_infect_input_file + 1):
logger.info("The current process for infect:")
logger.info(i)
infect_process(i)
# 处理 malware 目录下文件,生成csv文件
| xinmeng1/MobileBotnetAnalysisLab | old/TrafficDataProcessAuto.py | TrafficDataProcessAuto.py | py | 37,434 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "os.path.split",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "os.path.realpath",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "os.path.split",
"line... |
2845928618 |
import cv2
import os
import time
from time import sleep
import numpy as np
import pytesseract
import threading
from actions import doAction
import nxbt
import copy
import torch
from predictor import divide, predict
#pytesseract.pytesseract.tesseract_cmd = r'C:/Program Files/Tesseract-OCR/tesseract.exe'
resetMacro = """
L R A 1s
"""
def grayscale(img):
original = np.array(img)
original = cv2.cvtColor(original, cv2.COLOR_RGB2BGR)
gray_scale = cv2.cvtColor(original, cv2.COLOR_BGR2GRAY)
return gray_scale
def checkText(img):
text = pytesseract.image_to_string(img)
#print(text)
if 'VAMPIRE' in text:
return True
return False
state = 0
ministate = 0
image =np.random.random((5, 5))
distance = 1
def getState():
global state
global ministate
global image
cap = cv2.VideoCapture(0)
# The device number might be 0 or 1 depending on the device and the webcam
while(True):
ret, frame = cap.read()
cv2.imshow('Switch', frame)
frame2 =copy.deepcopy(frame)
image = copy.deepcopy(frame)
state = np.moveaxis(frame, -1, 0).astype(np.double) # put channels in first dimension
state = torch.tensor(state.copy(), dtype=torch.double)
frame2 = cv2.resize(frame2, (100, 100), interpolation=cv2.INTER_CUBIC)
frame2 = np.moveaxis(frame2, -1, 0).astype(np.double) # put channels in first dimension
ministate = torch.tensor(frame2.copy(), dtype=torch.double)
#ministate = torch.unsqueeze(ministate, 0)
cv2.waitKey(1)
def predictDistance():
global distance
global image
while True:
imgs, w, h = divide(image, 5)
predictions = []
enemyx = 0
enemyy = 0
playerx = 0
playery = 0
for i in range(len(imgs)):
img, x, y = imgs[i]
pred = predict(img)
if pred == 1:
enemyx= x
enemyy= y
elif pred== 2:
playerx= x
playery = y
distance = abs(enemyx - playerx) + abs(enemyy - playery)
t = threading.Thread(target=getState)
t.start()
sleep(1)
i = threading.Thread(target=predictDistance)
i.start()
print(image.shape)
class env():
def __init__(self, inverted = False, controller_index = 0):
self.draw_window = True
self.inverted = inverted
self.curHp = 0
self.curEnemyHp = 0
self.last_hp = 0
self.last_enemy_hp = 0
self.gotHit = 0
self.frame = 0
self.hitEnemy = 0
input("Press Enter when ready to connect controller")
self.nx = nxbt.Nxbt()
self.controller_index = self.nx.create_controller(nxbt.PRO_CONTROLLER)
print("Controller index: " + str(self.controller_index))
self.nx.wait_for_connection(self.controller_index)
print("Connecting Controller Please Wait")
print(f"Shape of Input {state.shape}, {ministate.shape}")
print("Connected Controller")
input("Press Enter to start when ready")
print("Started")
def setHps(self):
self.curHp = state[1, 155, 435]
self.curEnemyHp = state[1, 295, 435]
if self.inverted:
self.curHp = state[1, 295, 435]
self.curEnemyHp = state[1, 155, 435]
def getReward(self, action):
reward = -1
self.setHps()
if(self.curHp != self.last_hp): # if hp changed
self.gotHit = 1
#reward -=20
hit = 0
if(self.curEnemyHp != self.last_enemy_hp): # if enemy hp changed
self.hitEnemy = 1
return reward
def step(self, action):
last_distance = distance
done = False
if self.frame >= 1000:
self.frame = 0
done = True
self.hitEnemy = 0
self.gothit = 0
reward = 0
self.last_hp = state[1, 155, 435]
self.last_enemy_hp = state[1, 295, 435]
lag, actionReward = doAction(self.nx, self.controller_index, action)
while(lag > 0 or self.gotHit!=1):
lag -= 1
reward += self.getReward(action)
self.frame += 1
time.sleep(0.01)
if(distance < last_distance & action <= 4):
reward +=5
elif(distance > last_distance & action > 4):
reward -=10
if self.hitEnemy == 1:
reward += actionReward
else:
reward -= actionReward
if self.gotHit:
reward -=20
next_state = ministate
info = []
return next_state, reward, done, info, distance
def reset(self):
self.gothit = 0
self.hitEnemy =0
time.sleep(.5)
self.nx.macro(self.controller_index, resetMacro)
next_state = ministate
return next_state
| yannik603/Smash-Ultimate-Bot | env.py | env.py | py | 5,142 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "numpy.array",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_RGB2BGR",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "cv2.cvtColor",
"l... |
20731047468 | from bs4 import BeautifulSoup as BSHTML
import os
import re
html_directory = "data/patent_htmls/"
fp_citations_dir = 'data/fp_citations_from_html'
if not os.path.exists(fp_citations_dir):
os.makedirs(fp_citations_dir)
citations_per_patent = dict() # key is patent_number; value is array of citation texts
ref_locations_per_patent = dict() # key is patent_number; value is array of reference locations (1 or 2; front page or in-text)
filecount = 0
for filename in os.listdir(html_directory):
patent_id = re.sub(".html","",filename)
html_patent_file = open(html_directory + filename, 'r',encoding='utf-8')
citations_in_unlabeled_outfile = open(fp_citations_dir + "/" + patent_id + ".fpcitations.txt", 'w',encoding='utf-8')
citations_in_unlabeled_outfile.write("\n\n"+patent_id+"\n\n")
filecount += 1
htmlcontent = html_patent_file.read().replace('\n', '')
htmlstruct = BSHTML(htmlcontent,"lxml")
patent_citations_in_html = []
nonpatent_citations_in_html = []
"""
Find citations in the HTML metadata (front-page citations)
"""
for meta in htmlstruct.findAll('meta',attrs={"name":"DC.relation"}):
#print("patent_citation in meta\t",meta['content'])
patent_citations_in_html.append(meta['content'])
for meta in htmlstruct.findAll('meta',attrs={"name":"citation_reference"}):
#print("nonpatent_citation in meta\t",meta['content'])
nonpatent_citations_in_html.append(meta['content'])
print (patent_id,
len(patent_citations_in_html),
len(nonpatent_citations_in_html),
sep="\t"
)
for citation_text in sorted(nonpatent_citations_in_html):
citations_in_unlabeled_outfile.write(citation_text+"\n")
html_patent_file.close()
citations_in_unlabeled_outfile.close()
print(filecount,"files read") | tmleiden/citation-extraction-with-flair | get_fp_citations_from_html.py | get_fp_citations_from_html.py | py | 1,861 | python | en | code | 5 | github-code | 1 | [
{
"api_name": "os.path.exists",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": ... |
10002081821 | import ipv6_utils
import log
from static import OF_TABLE_NUM
from static import WLAN_IFACE
from static import GW_IFACE
from switch import OFRule
from switch import Switch
from switch import Link
from switch import AccessPointConf
from switch import GatewayConf
from event import *
# End import from iJOIN solution files
# Start import from Ryu files
from ryu.base import app_manager
from ryu.controller.handler import set_ev_cls
from ryu.controller.handler import MAIN_DISPATCHER
from ryu.ofproto import ofproto_v1_3 as ofproto
from ryu.ofproto import ofproto_v1_3_parser as ofp_parser
from ryu.topology.api import get_switch
from ryu.topology.event import EventSwitchEnter as RyuEventSwitchEnter
from ryu.topology.event import EventSwitchLeave as RyuEventSwitchLeave
from ryu.topology.event import EventLinkAdd as RyuEventLinkAdd
from ryu.topology.event import EventLinkDelete as RyuEventLinkDelete
from ryu.topology.event import EventPortAdd as RyuEventPortAdd
from ryu.topology.event import EventPortModify as RyuEventPortModify
from ryu.topology.event import EventPortDelete as RyuEventPortDelete
# End import from Ryu files
class Nmm(app_manager.RyuApp):
"""
================ =========================================================
Attribute Description
================ =========================================================
OFP_VERSIONS Declaration of supported OFP version
_EVENTS The list of events provided by the RyuApp
================ =========================================================
"""
OFP_VERSIONS = [ofproto.OFP_VERSION]
_EVENTS = [EventTopologyUpdate, EventSwitchEnter, EventSwitchUpdate,
EventSwitchLeave, EventLinkAdd, EventLinkDelete]
def __init__(self, *args, **kwargs):
"""
================ =========================================================
Attribute Description
================ =========================================================
switches The dictionary storing the switches as
custom Switch isntance
================ =========================================================
"""
super(Nmm, self).__init__(*args, **kwargs)
self.logger = log.get_logger(self.name)
self.switches = {}
self.accesspoints = {}
self.gateways = {}
self.of_rules = {}
def _initialise_switch_of_tables(self, switch):
"""
Flush the flow table when the switch connects to the controller.
"""
# Flush OpenFlow entries
match = ofp_parser.OFPMatch()
instructions = []
actions = []
datapath = switch.switch.dp
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
for i in xrange(1, OF_TABLE_NUM):
mod = parser.OFPFlowMod(datapath = datapath, cookie = 0,
cookie_mask = 0, command = ofproto.OFPFC_DELETE,
idle_timeout = 0, hard_timeout = 0, table_id = i,
buffer_id = ofproto.OFPCML_NO_BUFFER, out_port = ofproto.OFPP_ANY,
out_group = ofproto.OFPG_ANY, match = match, instructions = instructions)
datapath.send_msg(mod)
for i in xrange(0, OF_TABLE_NUM-1):
instructions = [ofp_parser.OFPInstructionGotoTable(i+1)]
req = EventWriteOFRule(OFRule(switch, match, actions, instructions, table_id = i, priority = 0))
self.send_event(req.dst, req)
def _check_if_ap_port(self, port):
"""
Check if a switch port is an access port.
"""
return port.name.startswith(WLAN_IFACE)
def _check_if_gw_port(self, port):
"""
Check if a switch port is a gateway port.
"""
return port.name.startswith(GW_IFACE)
def _get_ap_conf(self, switch, port):
"""
Get AP configuration.
"""
ap_conf = AccessPointConf()
ap_conf.port = port
return ap_conf
def _get_gw_conf(self, switch, port):
"""
Get GW configuration.
"""
gw_conf = GatewayConf()
gw_conf.port = port
# FIXME HARDCODED
gw_conf.nw_prefix = ('2020', str(hex(switch.switch.dp.id))[-4:],'0','0','0','0','0','0')
gw_conf.nw_prefix_len = 32
return gw_conf
@set_ev_cls(EventPushPacket)
def _handler_push_packet(self, ev):
"""
Send the Packet pkt through the OFPort port.
"""
datapath = ev.switch.switch.dp
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
actions = [parser.OFPActionOutput(port = ev.port.port_no)]
out = parser.OFPPacketOut(datapath = datapath,
buffer_id = ofproto.OFP_NO_BUFFER,
in_port = ofproto.OFPP_CONTROLLER,
actions = actions, data = ev.pkt.data)
datapath.send_msg(out)
@set_ev_cls(EventProcessPacket)
def _handler_process_packet(self, ev):
"""
Send the Packet pkt through the OFPP_TABLE.
"""
datapath = ev.switch.switch.dp
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
actions = [parser.OFPActionOutput(port = ofproto.OFPP_TABLE)]
out = parser.OFPPacketOut(datapath = datapath,
buffer_id = ofproto.OFP_NO_BUFFER,
in_port = ofproto.OFPP_CONTROLLER,
actions = actions, data = ev.pkt.data)
datapath.send_msg(out)
@set_ev_cls(EventWriteOFRule)
def _handler_write_of_rule(self, ev):
"""
Write OpenFlow rule on switch.
If the rule is already present in the switch,
don't write it again
"""
try:
if ev.of_rule.key not in self.of_rules \
or self.of_rules[ev.of_rule.key].actions != ev.of_rule.actions:
datapath = ev.of_rule.switch.switch.dp
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
mod = parser.OFPFlowMod(datapath = datapath, table_id = ev.of_rule.table_id, priority = ev.of_rule.priority, match = ev.of_rule.match, instructions = ev.of_rule.instructions, cookie = ev.of_rule.cookie)
datapath.send_msg(mod)
self.of_rules[ev.of_rule.key] = ev.of_rule
except KeyError:
pass
@set_ev_cls(EventDelOFRule)
def _handler_del_of_rule(self, ev):
"""
Delete OpenFlow rule on switch.
If the rule has been already deleted from the switch,
don't delete it again
"""
try:
if ev.of_rule.key in self.of_rules:
datapath = ev.of_rule.switch.switch.dp
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
inst = []
mod = parser.OFPFlowMod(datapath = datapath, cookie = ev.of_rule.cookie,
cookie_mask = ev.of_rule.cookie_mask, table_id = ev.of_rule.table_id,
command = ofproto.OFPFC_DELETE, idle_timeout = 0, hard_timeout = 0,
priority = ev.of_rule.priority, buffer_id = ofproto.OFPCML_NO_BUFFER,
out_port = ofproto.OFPP_ANY, out_group = ofproto.OFPG_ANY,
match = ev.of_rule.match, instructions = inst)
datapath.send_msg(mod)
del self.of_rules[ev.of_rule.key]
except KeyError:
pass
@set_ev_cls(EventSwitchRequest)
def _handler_switch_request(self, req):
"""
Get a switch object given the dpid.
"""
switches = []
try:
if req.dpid is None:
# reply all list
switches = self.switches.values()
elif req.dpid in self.switches:
switches = [self.switches[req.dpid]]
except KeyError:
pass
rep = EventSwitchReply(req.src, switches)
self.reply_to_request(req, rep)
@set_ev_cls(EventLinkRequest)
def _handler_link_request(self, req):
"""
Get switch's links.
"""
links = []
try:
if req.dpid is None:
# reply all list
links = [in_li for sw in self.switches.itervalues() for li_li in sw.links.itervalues() for in_li in li_li]
elif req.dpid in self.switches:
links = [in_li for li_li in self.switches[req.dpid].links.itervalues() for in_li in li_li]
except KeyError:
pass
rep = EventLinkReply(req.src, links)
self.reply_to_request(req, rep)
@set_ev_cls(RyuEventSwitchEnter, MAIN_DISPATCHER)
def _handler_switch_enter(self, ev):
"""
Handler for event.EventSwitchEnter
Add a node to the topology.
"""
switch = Switch(ev.switch)
# Check switch capabilities
self.switches[ev.switch.dp.id] = switch
# HARDCODED
for p in switch.switch.ports:
if self._check_if_ap_port(p):
ap_conf = self._get_ap_conf(switch, p)
switch.is_ap = True
switch.ap_conf = ap_conf
self.accesspoints[ev.switch.dp.id] = switch
elif self._check_if_gw_port(p):
gw_conf = self._get_gw_conf(switch, p)
switch.is_gw = True
switch.gw_conf = gw_conf
self.gateways[ev.switch.dp.id] = switch
self._initialise_switch_of_tables(switch)
ev_tu = EventSwitchEnter(switch)
self.send_event_to_observers(ev_tu)
ev_tu = EventTopologyUpdate(self.switches)
self.send_event_to_observers(ev_tu)
self.logger.info("Switch <" + str(hex(switch.switch.dp.id)) + "> connected")
@set_ev_cls(RyuEventSwitchLeave, MAIN_DISPATCHER)
def _handler_switch_leave(self, ev):
"""
Handler for event.EventSwitchLeave
Delete a node from the topology.
"""
try:
ev_tu = EventSwitchLeave(self.switches[ev.switch.dp.id])
self.send_event_to_observers(ev_tu)
del self.switches[ev.switch.dp.id]
ev_tu = EventTopologyUpdate(self.switches)
self.send_event_to_observers(ev_tu)
self.logger.info("Switch <" + str(hex(ev.switch.dp.id)) + "> disconnected")
except KeyError:
pass
@set_ev_cls(RyuEventLinkAdd, MAIN_DISPATCHER)
def _handler_link_add(self, ev):
"""
Handler for event.EventLinkAdd
Add a link to the topology.
"""
if ev.link.dst.dpid not in self.switches[ev.link.src.dpid].links:
self.switches[ev.link.src.dpid].links[ev.link.dst.dpid] = []
link = Link(ev.link)
self.switches[ev.link.src.dpid].links[ev.link.dst.dpid].append(link)
ev_tu = EventLinkAdd(link)
self.send_event_to_observers(ev_tu)
ev_tu = EventTopologyUpdate(self.switches)
self.send_event_to_observers(ev_tu)
self.logger.info("Link <" + str(hex(ev.link.src.dpid)) + ":" + ev.link.src.name + "> => <" +
str(hex(ev.link.dst.dpid)) + ":" + ev.link.dst.name +"> appeared")
@set_ev_cls(RyuEventLinkDelete, MAIN_DISPATCHER)
def _handler_link_delete(self, ev):
"""
Handler for event.EventLinkDelete
Delete a link from the topology.
"""
try:
tmp_links = []
link_to_del = None
for l in self.switches[ev.link.src.dpid].links[ev.link.dst.dpid]:
if ((ev.link.src.dpid == l.link.src.dpid and ev.link.src.port_no == l.link.src.port_no)
and (ev.link.dst.dpid == l.link.dst.dpid and ev.link.dst.port_no == l.link.dst.port_no)):
link_to_del = l
else:
tmp_links.append(l)
if link_to_del:
self.switches[ev.link.src.dpid].links[ev.link.dst.dpid] = tmp_links
ev_tu = EventLinkDelete(link_to_del)
self.send_event_to_observers(ev_tu)
ev_tu = EventTopologyUpdate(self.switches)
self.send_event_to_observers(ev_tu)
self.logger.info("Link <" + str(hex(ev.link.src.dpid)) + ":" + ev.link.src.name + "> => <" +
str(hex(ev.link.dst.dpid)) + ":" + ev.link.dst.name +"> disappeared")
except KeyError:
pass
@set_ev_cls(RyuEventPortAdd, MAIN_DISPATCHER)
def _handler_port_add(self, ev):
"""
Overwrite the legacy Ryu Event.
"""
try:
switch = self.switches[ev.port.dpid]
if self._check_if_ap_port(ev.port):
ap_conf = self._get_ap_conf(switch, ev.port)
switch.is_ap = True
switch.ap_conf = ap_conf
self.accesspoints[ev.port.dpid] = switch
elif self._check_if_gw_port(ev.port):
gw_conf = self._get_gw_conf(switch, ev.port)
switch.is_gw = True
switch.gw_conf = gw_conf
self.gateways[ev.port.dpid] = switch
switch.switch = get_switch(self, ev.port.dpid)[0]
ev_tu = EventSwitchUpdate(switch)
self.send_event_to_observers(ev_tu)
self.logger.info("Port add: " + str(ev.port))
except KeyError:
pass
@set_ev_cls(RyuEventPortModify, MAIN_DISPATCHER)
def _handler_port_modify(self, ev):
"""
Overwrite the legacy Ryu Event.
"""
try:
switch = self.switches[ev.port.dpid]
switch.switch = get_switch(self, ev.port.dpid)[0]
ev_tu = EventSwitchUpdate(switch)
self.send_event_to_observers(ev_tu)
self.logger.info("Port modify: " + str(ev.port))
except KeyError:
pass
@set_ev_cls(RyuEventPortDelete, MAIN_DISPATCHER)
def _handler_port_delete(self, ev):
"""
Overwrite the legacy Ryu Event.
"""
try:
switch = self.switches[ev.port.dpid]
if self._check_if_ap_port(ev.port):
switch.is_ap = False
switch.ap_conf = AccessPointConf()
del self.accesspoints[ev.port.dpid]
elif self._check_if_gw_port(ev.port):
switch.is_gw = False
switch.gw_conf = GatewayConf()
del self.gateways[ev.port.dpid]
switch.switch = get_switch(self, ev.port.dpid)[0]
ev_tu = EventSwitchUpdate(switch)
self.send_event_to_observers(ev_tu)
self.logger.info("Port delete: " + str(ev.port))
except KeyError:
pass
| ODMM/openflow-dmm | nmm/nmm.py | nmm.py | py | 12,501 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "ryu.base.app_manager.RyuApp",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "ryu.base.app_manager",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "ryu.ofproto.ofproto_v1_3.OFP_VERSION",
"line_number": 45,
"usage_type": "attribute"... |
10453439788 | import torch.utils.data as data
import PIL.Image as Image
import os
import numpy as np
import torch
from torch.utils.data import DataLoader
from torch import autograd, optim
from torchvision.transforms import transforms
from networks.cenet import CE_Net_
import cv2
import nibabel as nib
import time
x_transforms = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5])
])
y_transforms = transforms.ToTensor()
# 是否使用cuda
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
#device = torch.device("cpu")
#liver 'ckp_challenge_only/CE_LITS_and_challenge_model_epoch138.pth'
#tumor 'ckp_challenge_only/CE_LITS_and_challenge_model_epoch142.pth'
def end_to_end(raw_path,liver_model_path,tumor_model_path):
volume = nib.load(raw_path).get_fdata()
volume = np.transpose(volume, [2,0,1])
liver_model = CE_Net_(3, 1).to(device)
liver_model.load_state_dict(torch.load(liver_model_path))
liver_model.to(device)
liver_model.eval()
liver_target = []
tumor_model = CE_Net_(3, 1).to(device)
tumor_model.load_state_dict(torch.load(tumor_model_path))
tumor_model.to(device)
tumor_model.eval()
tumor_target = []
for i in range(volume.shape[0]):
a = volume[i]
img_x = Image.fromarray(np.int16(np.array(volume[i])))
h,w = img_x.size
img_x = img_x.resize((512,512))
img_x = img_x.convert('RGB')
img_x = x_transforms(img_x)
img_x = img_x.to(device)
img_x = torch.unsqueeze(img_x, 0)
img_liver = liver_model(img_x)
img_tumor = tumor_model(img_x)
trann = transforms.ToPILImage()
img_liver = torch.squeeze(img_liver)
img_liver = img_liver.detach().cpu().numpy()
img_liver = np.transpose(img_liver,[1,2,0])
img_liver[img_liver>=0.5] = 255
img_liver = img_liver.astype(np.uint8)
img_liver = trann(img_liver)
img_liver = img_liver.convert('L')
img_liver = img_liver.resize((h,w))
img_liver = np.asarray(img_liver)
img_tumor = torch.squeeze(img_tumor)
img_tumor = img_tumor.detach().cpu().numpy()
img_tumor = np.transpose(img_tumor,[1,2,0])
img_tumor[img_tumor>=0.5] = 255
img_tumor = img_tumor.astype(np.uint8)
img_tumor = trann(img_tumor)
img_tumor = img_tumor.convert('L')
img_tumor = img_tumor.resize((h,w))
img_tumor = np.asarray(img_tumor)
#肝脏后处理
img_liver = img_liver.copy()
img_liver[img_tumor==255] = 255
kernel = np.ones((7,7),np.uint8)
img_liver[img_liver>=1] = 1
img_liver = cv2.erode(img_liver,kernel)#腐蚀
img_liver = cv2.dilate(img_liver,kernel,iterations = 1)#膨胀
img_liver[img_liver>=1] = 255
#肝脏后处理结束
#肿瘤后处理
img_tumor = img_tumor.copy()
img_tumor[img_liver!=255] = 0
#肿瘤后处理结束
img_liver[img_liver>=1] = 1
liver_target.append(img_liver)
img_tumor[img_tumor>=1] = 1
tumor_target.append(img_tumor)
liver_target = np.asarray(liver_target)
liver_target = np.transpose(liver_target, [1,2,0])
liver_nib = nib.Nifti1Image(liver_target, affine=np.eye(4))
tumor_target = np.asarray(tumor_target)
tumor_target = np.transpose(tumor_target, [1,2,0])
tumor_nib = nib.Nifti1Image(tumor_target, affine=np.eye(4))
return liver_nib,tumor_nib
def run():
liver_model_path = 'ckp_challenge_finally/liver_138.pth'
tumor_model_path = 'ckp_challenge_finally/tumor_142.pth'
liver_path = "D:/deep_learning/u_net_liver-test/liver"
ls = os.listdir(liver_path)
for i in range(len(ls)):
nii_path = os.path.join(liver_path,ls[i],"liver.nii")
liver_nib,tumor_nib = end_to_end(nii_path,liver_model_path,tumor_model_path)
save_liver = os.path.join(liver_path,ls[i],"liver_seg.nii")
save_tumor = os.path.join(liver_path,ls[i],"liver_nid.nii")
nib.save(liver_nib,save_liver)
nib.save(tumor_nib,save_tumor)
##处理一个病人10-15秒(GPU)
##处理一个病人169秒(CPU)
if __name__ == '__main__':
time_start = time.time()
liver_model_path = 'ckp_challenge_finally/liver_138.pth'
tumor_model_path = 'ckp_challenge_finally/tumor_142.pth'
nii_path = "liver/liver_1/liver.nii"
liver_nib,tumor_nib = end_to_end(nii_path,liver_model_path,tumor_model_path)
nib.save(liver_nib,"liver/liver_1/liver_seg.nii")
nib.save(tumor_nib,"liver/liver_1/liver_nid.nii")
time_end = time.time()
print(time_end-time_start) | 18792676595/U-Net-for-liver-tumor-segmentation | nii_to_endnii.py | nii_to_endnii.py | py | 4,742 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "torchvision.transforms.transforms.Compose",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms.transforms",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.transforms.ToTensor",
"line_number": 15,
... |
13649089846 | import json
from rest_framework import status
from api.constans import AutoNotificationConstants, TaskStageConstants, \
CopyFieldConstants
from api.models import *
from api.tests import GigaTurnipTestHelper, to_json
class CategoryTest(GigaTurnipTestHelper):
def test_list_categories(self):
products_category = Category.objects.create(
name="Producs"
)
e_commerce_category = Category.objects.create(
name="E-Commerce"
)
e_commerce_category.parents.add(self.category)
electronics_category = Category.objects.create(
name="Electronics"
)
pcs_category = Category.objects.create(
name="Personal computers"
)
pcs_devices_category = Category.objects.create(
name="Personal computers attributes."
)
pcs_mouses_category = Category.objects.create(
name="Mouses"
)
electronics_category.out_categories.add(pcs_category)
electronics_category.out_categories.add(pcs_devices_category)
pcs_devices_category.out_categories.add(pcs_mouses_category)
answer = [
{
'id': self.category.id,
'name': self.category.name,
'out_categories': [
e_commerce_category.id
]},
{
'id': e_commerce_category.id,
'name': e_commerce_category.name,
'out_categories': []},
{
'id': electronics_category.id,
'name': electronics_category.name,
'out_categories': [
pcs_category.id,
pcs_devices_category.id
]
},
{
'id': pcs_mouses_category.id,
'name': pcs_mouses_category.name,
'out_categories': []},
{
'id': pcs_category.id,
'name': pcs_category.name,
'out_categories': []
},
{
'id': pcs_devices_category.id,
'name': pcs_devices_category.name,
'out_categories': [pcs_mouses_category.id]
},
{
'id': products_category.id,
'name': products_category.name,
'out_categories': []
}
]
response = self.get_objects("category-list")
self.assertEqual(response.status_code, status.HTTP_200_OK)
content = to_json(response.content)
self.assertEqual(content["count"], Category.objects.count())
self.assertEqual(content["results"], answer)
| KloopMedia/GigaTurnip | api/tests/test_category.py | test_category.py | py | 2,712 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "api.tests.GigaTurnipTestHelper",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "rest_framework.status.HTTP_200_OK",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 80,
"usage_type": "name"
}... |
16284337583 | from os import environ as env
from setuptools import find_packages, setup
from setuptools.command.install import install
import sys
VERSION = "0.1.0"
with open("README.md", "r", encoding="utf-8") as rdm:
long_description = rdm.read()
class VerifyVersionCommand(install):
"""Custom command to verify that the git tag matches our version"""
description = 'verify that the git tag matches our version'
def run(self):
tag = env .get('CLIPENV_TAG')
if tag != VERSION:
info = f"Git tag: {tag} doesn't match with this version: {VERSION}"
sys.exit(info)
setup(
name="clipenv",
version=VERSION,
description="Attach your venv variables with a clip to your project",
long_description=long_description,
author="Ana Valeria Calderón Briz, Daniel Omar Vergara Pérez",
author_email="valerybriz@gmail.com, daniel.omar.vergara@gmail.com",
url="https://github.com/dany2691/clipenv",
license="MIT",
install_requires=[
"click",
"colored"
],
classifiers=[
# Trove classifiers
# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy'
],
packages=find_packages(exclude=('tests',)),
entry_points={
"console_scripts": [
"clipenv=clipenv.__main__:clipenv"
]
},
cmdclass={'verify': VerifyVersionCommand}
)
| BentoBox-Project/clipenv | setup.py | setup.py | py | 1,715 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "setuptools.command.install.install",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "os.environ.get",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "sys.exit... |
12632788840 | # -*- coding: utf-8 -*-
#http://labs.eecs.tottori-u.ac.jp/sd/Member/oyamada/OpenCV/html/py_tutorials/py_imgproc/py_morphological_ops/py_morphological_ops.html
#ROSとOpenCVの競合を避ける
import sys
try:
py_path = sys.path
ros_CVpath = '/opt/ros/kinetic/lib/python2.7/dist-packages'
if py_path[3] == ros_CVpath:
print("INFO : ROS and OpenCV are competing")
sys.path.remove(py_path[3])
except: pass
import cv2
import numpy as np
import time
import matplotlib.pyplot as plt
try:
import scipy.ndimage as ndimage
from scipy.optimize import curve_fit
except: pass
#+-----[トラックバー]------+#
t_init = False
#+-----[MedianBlur]-------+#
MB = True
#+-----[fill_holes]-------+#
fill_holes = False
#+-------[opening]--------+#
opening = True
#+-------[closing]--------+#
closing = True
#+----[特定色の消去]-------+#
ColorErase = False
#動画ファイルのパス
#Windowsは、コマンドライン引数を使用するときの
#絶対パスの指定が面倒くさいのでコメントアウトしておいてください
#videofile_path = 'test.mp4'
videofile_path = sys.argv[1]
#5x5のカーネル
kernel = np.ones((8,8),np.uint8)
'''
kernel = np.array([[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1]])
'''
#トラックバーの初期設定
def callback(x):
pass
def Trackbars_init():
cv2.namedWindow('value')
cv2.createTrackbar('H_Hue','value',179,179,callback)
cv2.createTrackbar('L_Hue','value',0,179,callback)
cv2.createTrackbar('H_Saturation','value',255,255,callback)
cv2.createTrackbar('L_Saturation','value',0,255,callback)
cv2.createTrackbar('H_Value','value',255,255,callback)
cv2.createTrackbar('L_Value','value',0,255,callback)
def color_detect(hsv, img):
hsv_min = np.array([15,127,0])
hsv_max = np.array([240,255,255])
mask = cv2.inRange(hsv, hsv_min, hsv_max)
return mask
#ブロブ解析
def analysis_blob(binary_img):
label = cv2.connectedComponentsWithStats(binary_img)
n = label[0] - 1
data = np.delete(label[2], 0, 0)
center = np.delete(label[3], 0, 0)
maxblob = {}
if len(data[:, 4]) is 0:
max_index = None
maxblob["center"] = [0, 0]
else:
max_index = np.argmax(data[:, 4])
maxblob["upper_left"] = (data[:, 0][max_index], data[:, 1][max_index])
maxblob["width"] = data[:, 2][max_index]
maxblob["height"] = data[:, 3][max_index]
maxblob["area"] = data[:, 4][max_index]
maxblob["center"] = center[max_index]
return data, center, maxblob
#データ出力
def data_plot(data):
data_np = np.array(data)
if len(data_np) <= 0:
print("too many indices for array")
f = 0
x = 0
y = 0
else:
f = data_np[:,0]
x = data_np[:,1]
y = data_np[:,2]
print(f, x, y)
plt.rcParams["font.family"] = "Times New Roman"
plt.plot(f, x, "r-", label="x")
plt.plot(f, y, "b-", label="y")
plt.xlabel("Frame [num]", fontsize=16)
plt.ylabel("Position[px]", fontsize=16)
plt.grid()
plt.legend(loc=1, fontsize=16)
plt.show()
#トラックバーからコールバックを受け取り、値を返す
def trackbars():
lowH = cv2.getTrackbarPos('L_Hue', 'value')
highH = cv2.getTrackbarPos('H_Hue', 'value')
lowS = cv2.getTrackbarPos('L_Saturation', 'value')
highS = cv2.getTrackbarPos('H_Saturation', 'value')
lowV = cv2.getTrackbarPos('L_Value', 'value')
highV = cv2.getTrackbarPos('H_Value', 'value')
return (lowH, lowS, lowV, highH, highS, highV)
#======================================================================
def median_blar(image, size):
_image = cv2.medianBlur(image, size)
return _image
def Fill_Holes(image):
_image = ndimage.binary_fill_holes(image).astype(int) * 255
return _image
def Opening(image):
opening = cv2.morphologyEx(image, cv2.MORPH_OPEN, kernel)
return opening
def Closing(image):
closing = cv2.morphologyEx(image, cv2.MORPH_CLOSE, kernel)
return closing
def color_eraser(image, RGBarray):
pass
#======================================================================
def resize_image(img, dsize, X, Y):
re_image = cv2.resize(img, dsize, fx=X, fy=Y)
return re_image
#メイン
def main():
n = 0
data = []
cap = cv2.VideoCapture(videofile_path)
if t_init is True:
Trackbars_init()
if cap.isOpened():
print("INFO : The Video loaded successfully.")
else:
print("INFO : LOAD ERROR ***Chack video path or name.***")
print("CAP : ", cap)
exit()
while(cap.isOpened()):
ret, frame = cap.read()
if frame is None:
print("frame is None")
break
#画像の認識範囲を狭めるためのトリミング操作
"""
h, w = frame.shape[:2]
frame = frame[80:h, 0:w]
"""
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
#hsv決め打ちの時はココを編集
if t_init is True:
Lh, Ls, Lv = trackbars()[:3]
Hh, Hs, Hv = trackbars()[3:]
else:
#青
Lh, Ls, Lv = (40, 40, 109)
Hh, Hs, Hv = (121, 255, 255)
hsv_min = np.array([Lh,Ls,Lv])
hsv_max = np.array([Hh,Hs,Hv])
"""
print( "H:{0} - {1}\nS:{2} - {3}\nV:{4} - {5}\n-------------"
.format(Lh, Hh, Ls, Hs, Lv, Hv))
"""
mask = cv2.inRange(hsv, hsv_min, hsv_max)
if MB is True: mask = median_blar(mask, 3)
#if fill_holes is True: mask = Fill_Holes(mask)
if opening is True: mask = Opening(mask)
if closing is True: mask = Closing(mask)
if ColorErase is True: mask = color_eraser(mask, None)
_, center, maxblob = analysis_blob(mask)
#print("target num:",len(center))
for i in center:
cv2.circle(frame, (int(i[0]), int(i[1])), 10, (255, 0, 0),
thickness=-3, lineType=cv2.LINE_AA)
center_x = int(maxblob["center"][0])
center_y = int(maxblob["center"][1])
print(center_x, center_y)
cv2.circle(frame, (center_x, center_y), 30, (0, 200, 0),
thickness=3, lineType=cv2.LINE_AA)
data.append([n, center_x, center_y])
re_frame=resize_image(frame, None, .4, .4)
cv2.imshow("Frame", re_frame)
mask = resize_image(mask, None, .4, .4)
cv2.imshow("image_mask", mask)
n += 1
#print(n)
print("----------")
if cv2.waitKey(1000) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
data_plot(data)
if __name__ == '__main__':
main()
| TANUKIpro/color_tracker | hsv_supporter.py | hsv_supporter.py | py | 6,909 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.path",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "sys.path.remove",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_numbe... |
5737721660 | import click
from screeps_loan import app
import screepsapi.screepsapi as screepsapi
from screeps_loan.models.db import get_conn
from screeps_loan.screeps_client import get_client
from screeps_loan.models import db
from screeps_loan.services.cache import cache
import screeps_loan.models.alliances as alliances
import screeps_loan.models.users as users
from random import shuffle, random
from time import sleep
import math
POWER_POW = 1.15
POWER_MULTIPLY = 1000
POWER_MAX = 750
powertotals = [{'level': 0, 'total': 0}]
powerlevels = {}
total = 0
for i in range(0, POWER_MAX):
needed = math.pow(i, POWER_POW) * POWER_MULTIPLY
total += needed
powertotals.append({
'level': i,
'total': total
})
powerlevels[i] = total
powertotals = list(reversed(powertotals))
@cache.cache('getUserControlPoints')
def getUserControlPoints(username):
user_info = getUserInfo(username)
if not user_info:
return 1
if 'user' in user_info:
if 'gcl' in user_info['user']:
return user_info['user']['gcl']
return 1
@cache.cache('getUserPowerPoints')
def getUserPowerPoints(username):
user_info = getUserInfo(username)
if not user_info:
return 1
if 'user' in user_info:
if 'power' in user_info['user']:
return user_info['user']['power']
return 0
@cache.cache('getUserInfo')
def getUserInfo(username):
screeps = get_client()
sleep(1)
return screeps.user_find(username)
class Rankings(object):
gclcache = {}
def run(self):
alliance_query = alliances.AllianceQuery()
all_alliances = alliance_query.getAll()
alliances_names = [item["shortname"] for item in all_alliances]
users_with_alliance = users.UserQuery().find_name_by_alliances(alliances_names)
query = "SELECT id FROM room_imports WHERE status LIKE 'complete' ORDER BY started_at DESC"
result = db.find_one(query)
self.room_import_id = result[0]
self.conn = get_conn()
self.start()
print(self.id)
for alliance in all_alliances:
users_with_alliance = self.find_name_by_alliances(alliances_names)
members = [user['name'] for user in users_with_alliance if user['alliance'] == alliance['shortname']]
filtered_members = [user for user in members if self.get_player_room_count(user) > 0]
# Not enough members.
if len(filtered_members) < 2:
continue
# Not enough rooms
if self.get_room_count(alliance['shortname']) < 2:
continue
rcl = self.getAllianceRCL(alliance['shortname'])
combined_gcl = sum(self.getUserGCL(user) for user in filtered_members)
control = sum(getUserControlPoints(user) for user in filtered_members)
alliance_gcl = self.convertGcl(control)
combined_power = sum(self.getUserPowerLevel(user) for user in filtered_members)
power = sum(getUserPowerPoints(user) for user in filtered_members)
alliance_power = self.convertPowerToLevel(power)
spawns = self.getAllianceSpawns(alliance['shortname'])
print('%s- %s, %s, %s, %s, %s, %s, %s' % (alliance['shortname'], combined_gcl, alliance_gcl, rcl, spawns, len(filtered_members), alliance_power, combined_power))
self.update(alliance['shortname'], alliance_gcl, combined_gcl, rcl, spawns, len(filtered_members), alliance_power, combined_power)
self.finish()
self.conn.commit()
def start(self):
query = "INSERT INTO rankings_imports(status) VALUES ('in progress') RETURNING id"
cursor = self.conn.cursor()
cursor.execute(query)
self.id = cursor.fetchone()[0]
def finish(self):
query = "UPDATE rankings_imports SET status='complete' WHERE id=(%s)"
cursor = self.conn.cursor()
cursor.execute(query, (self.id, ))
def update(self, alliance, alliance_gcl, combined_gcl, rcl, spawns, members, alliance_power, combined_power):
# Store info in db
cursor = self.conn.cursor()
query = "INSERT INTO rankings(import, alliance, alliance_gcl, combined_gcl, rcl, spawns, members, alliance_power, combined_power) VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s)"
cursor.execute(query, (self.id, alliance, alliance_gcl, combined_gcl, rcl, spawns, members, alliance_power, combined_power))
def getAllianceRCL(self, alliance):
query = "SELECT SUM(level) FROM rooms, users WHERE rooms.owner = users.id AND users.alliance=%s AND rooms.import=%s"
cursor = self.conn.cursor()
cursor.execute(query, (alliance, self.room_import_id))
result = cursor.fetchone()[0]
if result is not None:
return result
return 0
def getAllianceSpawns(self, alliance):
count = 0
query = "SELECT COUNT(*) FROM rooms, users WHERE rooms.owner = users.id AND users.alliance=%s AND level>=8 AND rooms.import=%s"
cursor = self.conn.cursor()
cursor.execute(query, (alliance, self.room_import_id))
result = cursor.fetchone()[0]
if result is not None:
if result:
count += result*3
query = "SELECT COUNT(*) FROM rooms, users WHERE rooms.owner = users.id AND users.alliance=%s AND level=7 AND rooms.import=%s"
cursor = self.conn.cursor()
cursor.execute(query, (alliance, self.room_import_id))
result = cursor.fetchone()[0]
if result is not None:
if result:
count += result*2
query = "SELECT COUNT(*) FROM rooms, users WHERE rooms.owner = users.id AND users.alliance=%s AND level>=1 AND level<7 AND rooms.import=%s"
cursor = self.conn.cursor()
cursor.execute(query, (alliance, self.room_import_id))
result = cursor.fetchone()[0]
if result is not None:
if result:
count += result
return count
def convertGcl(self, control):
return int((control/1000000) ** (1/2.4))+1
def getUserGCL(self, username):
return self.convertGcl(getUserControlPoints(username))
def convertPowerToLevel(self, power):
if power <= 0:
return 0
for powerdata in powertotals:
if powerdata['total'] < power:
return powerdata['level']
return 0
def getUserPowerLevel(self, username):
return self.convertPowerToLevel(getUserPowerPoints(username))
def find_name_by_alliances(self, alliance):
query = "SELECT ign, alliance FROM users where alliance = ANY(%s)"
cursor = self.conn.cursor()
cursor.execute(query, (alliance,))
result = cursor.fetchall()
return [{"name": row[0], "alliance": row[1]} for row in result]
def get_room_count(self, alliance):
query = '''
SELECT COUNT(DISTINCT rooms.name)
FROM rooms,users
WHERE rooms.owner=users.id
AND users.alliance=%s
AND rooms.import = (SELECT id
FROM room_imports
ORDER BY id desc
LIMIT 1
);
'''
cursor = self.conn.cursor()
cursor.execute(query, (alliance,))
result = cursor.fetchone()
return int(result[0])
def get_player_room_count(self, player):
query = '''
SELECT COUNT(DISTINCT rooms.name)
FROM rooms,users
WHERE rooms.owner=users.id
AND users.ign=%s
AND rooms.import = (SELECT id
FROM room_imports
ORDER BY id desc
LIMIT 1
);
'''
cursor = self.conn.cursor()
cursor.execute(query, (player,))
result = cursor.fetchone()
return int(result[0])
@app.cli.command()
def import_rankings():
click.echo("Generating Rankings")
r = Rankings()
r.run()
@app.cli.command()
def import_user_rankings():
click.echo("Generating User Rankings")
dbusers = users.get_all_users_for_importing()
for dbuser in dbusers:
# Only retrieve information if we don't have any or the player has some active rooms.
if not dbuser['gcl'] or users.get_player_room_count(dbuser['ign']) > 0 or random() < 0.05:
gcl = getUserControlPoints(dbuser['ign'])
power = getUserPowerPoints(dbuser['ign'])
print('%s has %s gcl and %s power' % (dbuser['ign'], gcl, power))
users.update_gcl_by_user_id(dbuser['id'], getUserControlPoints(dbuser['ign']))
users.update_power_by_user_id(dbuser['id'], getUserPowerPoints(dbuser['ign']))
sleep(1.5)
else:
print('Skipping user %s' % (dbuser['ign']))
| LeagueOfAutomatedNations/Screeps-LoAN | screeps_loan/cli/import_rankings.py | import_rankings.py | py | 9,032 | python | en | code | 13 | github-code | 1 | [
{
"api_name": "math.pow",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "screeps_loan.services.cache.cache.cache",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "screeps_loan.services.cache.cache",
"line_number": 37,
"usage_type": "name"
},
{
... |
41096561337 | # imports
import datetime as dt
import os
import os.path as osp
import time
import numpy as np
from .global_imports.smmpl_opcodes import *
from .quickscanpat_calc import quickscanpat_calc
from .sop import sigmampl_boot
# params
_nodoubleinit_l = [
'suncone'
]
# main func
def main(quickscantype=None, **quickscanparams):
'''
quick scan pattern parameters are adjusted in their respective scripts,
i.e. quickscanpat_calc.<quickscantype>
But quick scanpattern type, bin resolution and shot averaging time are
controlled in .params
'''
if not quickscantype:
quickscantype = QUICKSCANTYPE
# setting log file
now = dt.datetime.now()
mainlog = DIRCONFN(
MPLDATADIR, DATEFMT.format(now), QUICKSCANLOG.format(now)
)
SETLOGFN(mainlog)
today = dt.datetime.combine(dt.date.today(), dt.time())
mainlognext_dt = today + dt.timedelta(1) # start a new log the next day
# determining whether to perform double initialisation
if quickscantype in _nodoubleinit_l:
coldstart_boo = False
else:
coldstart_boo = True
# begin operation
for i in range(QUICKSCANTIMES):
print(f'starting {quickscantype} quickscan {i}...')
# updating logfile
now = dt.datetime.now()
if now >= mainlognext_dt:
mainlog = DIRCONFN(
MPLDATADIR, DATEFMT.format(now), QUICKSCANLOG.format(now)
)
SETLOGFN(mainlog)
mainlognext_dt += dt.timedelta(1)
# calculating scan pattern
scanpat_a = quickscanpat_calc(quickscantype, **quickscanparams)
# writing scan pattern to file
now = dt.datetime.now()
scanpatpar_dir = DIRCONFN(
MPLDATADIR, DATEFMT.format(now)
)
if not osp.isdir(scanpatpar_dir):
os.mkdir(scanpatpar_dir)
scanpat_dir = DIRCONFN(
scanpatpar_dir,
QUICKSCANFILE.format(now, quickscantype)
)
print(f'writing quick scanpattern to: {scanpat_dir}')
np.savetxt(scanpat_dir, scanpat_a,
fmt='%.2f', delimiter=', ', newline='\n\n')
# beginning init and measurement
sigmampl_boot(
coldstart_boo=coldstart_boo, scanpat_dir=scanpat_dir,
stdoutlog=mainlog
)
coldstart_boo = False
# waiting between next measurement
print(f'waittime: {QUICKSCANWAITTIME}min, next measurement time:'
f'{dt.datetime.now() + dt.timedelta(minutes=QUICKSCANWAITTIME)}\n')
time.sleep(QUICKSCANWAITTIME*60)
# testing
if __name__ == '__main__':
main()
| citypilgrim/smmpl_opcodes | quickscan_main.py | quickscan_main.py | py | 2,652 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "datetime.datetime.now",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.combine",
"line_number": 38,
"usage_type": "call"
},
{
"api_name":... |
36905506190 | import os
import torch
import numpy as np
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
from torchvision import transforms
from PIL import Image
from typing import List
from tqdm import tqdm
from two_tower_model.tower import TwoTowerModel
from two_tower_model.selectivesearch import selective_search, show_bbox
def adaptive_resize(batchOfImg: List[torch.Tensor]):
img_sizes = [[image.shape[-2], image.shape[-1]] for image in batchOfImg]
max_size = max([max(img_size[0], img_size[1]) for img_size in img_sizes]) # the maximal size
batch_shape = (len(batchOfImg), batchOfImg[0].shape[0], max_size, max_size) # wanted shape
padded_images = batchOfImg[0].new_full(batch_shape, 0.0)
for padded_img, img in zip(padded_images, batchOfImg):
h, w = img.shape[1:]
padded_img[..., :h, :w].copy_(img)
return padded_images
def resized_img(in_dirs: List[str], out_dir, size):
# resize images to size (256, 256)
transform = transforms.Compose([transforms.Resize(size=size)])
for in_dir in in_dirs:
files = os.listdir(in_dir)
bar = tqdm(files)
bar.set_description(in_dir)
for file in bar:
img = Image.open(os.path.join(in_dir, file)).convert('RGB')
img = transform(img)
img.save(os.path.join(out_dir, file))
def random_crop(in_dirs: List[str], out_dir, repeat=1):
transform = transforms.Compose([transforms.Resize(size=(1280, 1280)),
transforms.RandomCrop(size=(256, 256))])
for in_dir in in_dirs:
files = os.listdir(in_dir)
bar = tqdm(files)
bar.set_description(in_dir)
for file in bar:
img = Image.open(os.path.join(in_dir, file)).convert('RGB')
for i in range(repeat):
img2 = transform(img)
img2.save(os.path.join(out_dir, file.replace('.', '-{}.'.format(i + 1))))
def tSNE_visualize(gpu_accelerate=True):
device = torch.device('cuda:0' if gpu_accelerate and torch.cuda.is_available() else 'cpu')
model = TwoTowerModel(model_dir=None).to(device)
transform = transforms.Compose([transforms.Resize(size=(256, 256)),
transforms.ToTensor()])
model.load_state_dict(torch.load(r'D:\Project\PyCharmProjects\WFGN\weight\tower.pth'))
pos_dir = r'D:\Training Dataset\FurGenTMP\positive'
neg_dir = r'D:\Training Dataset\FurGenTMP\negative'
embeddings = []
pos_count, neg_count = 0, 0
with torch.no_grad():
for img in tqdm(os.listdir(pos_dir)):
image_tensor = transform(Image.open(os.path.join(pos_dir, img)).convert('RGB')).unsqueeze(dim=0).to(device)
embedding = model(image_tensor).squeeze(dim=0)
embeddings.append(embedding.cpu().numpy())
pos_count += 1
positive_mean = torch.tensor(np.array(embeddings))
positive_mean = torch.mean(positive_mean, dim=0)
print(positive_mean.shape)
torch.save(positive_mean, r'D:\Project\PyCharmProjects\WFGN\weight\positive_mean.pth')
for img in tqdm(os.listdir(neg_dir)):
image_tensor = transform(Image.open(os.path.join(neg_dir, img)).convert('RGB')).unsqueeze(dim=0).to(device)
embedding = model(image_tensor).squeeze(dim=0)
embeddings.append(embedding.cpu().numpy())
neg_count += 1
tsne = TSNE(n_components=2)
xy = tsne.fit_transform(embeddings)
x = xy[:, 0]
y = xy[:, 1]
c = [0 for _ in range(pos_count)] + [1 for _ in range(neg_count)]
plt.scatter(x, y, s=1, c=c)
plt.show()
if __name__ == '__main__':
'''
resized_img(in_dirs=[r'D:\Training Dataset\FurGenTMP\新建文件夹'],
out_dir=r'D:\Training Dataset\FurGenTMP', size=1200)
'''
'''
random_crop(in_dirs=['D:\\Training Dataset\\FurGen\\2'],
out_dir='D:\\Training Dataset\\FurGenTMP\\2333', repeat=2)
'''
# show_bbox(Image.open(r'D:\Training Dataset\FurGenTMP\新建文件夹\yao.jpg').convert('RGB'))
| RickyDoge/WFGN | dataset/data_prepare.py | data_prepare.py | py | 4,068 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "typing.List",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "torch.Tensor",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "typing.List",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.Comp... |
927501499 | from __future__ import print_function
import argparse
import random
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torchvision.utils as vutils
from torch.autograd import Variable
import os
import json
from tqdm import tqdm
import models.dcgan as dcgan
import models.mlp as mlp
def generate_samples(args):
set_seed = 42
random.seed(set_seed)
torch.manual_seed(set_seed)
torch.cuda.manual_seed(set_seed)
torch.cuda.manual_seed_all(set_seed)
#with open(args.params_path, 'r') as gencfg:
# generator_config = json.loads(gencfg.read())
generator_config = vars(args)
imageSize = generator_config["imageSize"]
nz = generator_config["nz"]
nc = generator_config["nc"]
ngf = generator_config["ngf"]
noBN = generator_config["noBN"]
ngpu = generator_config["ngpu"]
mlp_G = generator_config["mlp_G"]
n_extra_layers = generator_config["n_extra_layers"]
invert_p = generator_config["invert_p"]
if noBN:
netG = dcgan.DCGAN_G_nobn(imageSize, nz, nc, ngf, ngpu, n_extra_layers)
elif mlp_G:
netG = mlp.MLP_G(imageSize, nz, nc, ngf, ngpu)
else:
netG = dcgan.DCGAN_G(imageSize, nz, nc, ngf, ngpu, n_extra_layers)
# initialize noise
fixed_noise = torch.FloatTensor(args.num_samples, nz, 1, 1).normal_(0, 1)
if args.cuda:
netG.load_state_dict(torch.load(args.weights_path)['netG_state_dict'])
netG.cuda()
fixed_noise = fixed_noise.cuda()
else:
netG.load_state_dict(torch.load(args.weights_path, map_location=torch.device('cpu'))['netG_state_dict'])
fake = netG(fixed_noise)
fake.data = fake.data.mul(0.5).add(0.5)
folder_name = args.weights_path.parent.name
save_dir = str(args.output_dir / f"wgan_{folder_name}")
print(f"Saving on {save_dir}")
if not os.path.exists(save_dir):
os.makedirs(save_dir)
for i in tqdm(range(args.num_samples)):
vutils.save_image(fake.data[i, ...].reshape((1, nc, imageSize, imageSize)), os.path.join(save_dir, "generated_%02d.png"%i))
if __name__=="__main__":
parser = argparse.ArgumentParser()
#parser.add_argument('-c', '--params_path', required=True, type=str, help='path to generator config .json file')
#parser.add_argument('-w', '--weights_path', required=True, type=str, help='path to generator weights .pth file')
#parser.add_argument('-o', '--output_dir', required=True, type=str, help="path to to output directory")
parser.add_argument('-n', '--num_samples', type=int, help="number of images to generate", default=1)
parser.add_argument('--cuda', action='store_true', help='enables cuda')
args = parser.parse_args()
generate_samples(args)
| coimbra574/Projeto_IA376 | src/data/generate_samples_WGAN.py | generate_samples_WGAN.py | py | 2,902 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "random.seed",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "torch.manual_seed",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "torch.cuda.manual_seed",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
... |
1918061715 | #!/usr/bin/env python
# coding=utf-8
"""
TBW
"""
from __future__ import absolute_import
from __future__ import print_function
from collections import MutableSequence
import io
import os
import random
from ipapy.compatibility import to_str
from ipapy.compatibility import to_unicode_string
__author__ = "Alberto Pettarin"
__copyright__ = "Copyright 2016, Alberto Pettarin (www.albertopettarin.it)"
__license__ = "MIT"
__email__ = "alberto@albertopettarin.it"
class LexiconEntry(object):
"""
TBW
"""
def __init__(self, raw_values, lowercase=False):
self.key = None
self.value = None
self.valid = False
self._set_key(raw_values=raw_values, lowercase=lowercase)
self._set_value(raw_values=raw_values, lowercase=lowercase)
def _set_key(self, raw_values, lowercase):
if len(raw_values) >= 1:
self.key = raw_values[0]
if lowercase:
self.key = self.key.lower()
def _set_value(self, raw_values, lowercase):
if len(raw_values) >= 2:
self.valid = True
self.value = raw_values[1]
class Lexicon(MutableSequence):
"""
TBW
"""
ENTRY_TYPE = LexiconEntry
def __init__(self, entries=None, lowercase=False):
self.__ordered_dict = None
self.entries = [] if entries is None else entries
self.lowercase = lowercase
self.train_lexicon = None
self.test_lexicon = None
self._update_ordered_dict()
def _update_ordered_dict(self):
self.__ordered_dict = dict()
for i, e in enumerate(self.entries):
self._add_entry_to_ordered_dict(i, e)
def _add_entry_to_ordered_dict(self, i, e):
k = e.key
try:
self.__ordered_dict[k].append(i)
except KeyError:
self.__ordered_dict[k] = [i]
def _remove_entry_from_ordered_dict(self, i):
k = self[i].key
l = self.__ordered_dict[k]
del l[l.index(k)]
if len(l) == 0:
del self.__ordered_dict[k]
def __str__(self):
return to_str(u"".join([e.__str__() for e in self.entries]))
def __unicode__(self):
return u"".join([e.__unicode__() for e in self.entries])
def __repr__(self):
return u"\n".join([e.__repr__() for e in self.entries])
def __iter__(self):
for e in self.entries:
yield e
def __len__(self):
return len(self.entries)
def __getitem__(self, i):
return self.entries[i]
def __delitem__(self, i):
self._remove_entry_from_ordered_dict(i)
del self.entries[i]
def _check(self, value):
if not isinstance(value, self.ENTRY_TYPE):
raise TypeError(u"Objects stored in this lexicon must have type '%s'. (Got: '%s')" % (self.ENTRY_TYPE, type(value)))
def __setitem__(self, i, value):
self._check(value)
self._remove_entry_from_ordered_dict(i)
self.entries[i] = value
self._add_entry_to_ordered_dict(i, value)
def insert(self, i, value):
self._check(value)
self.entries.insert(i, value)
self._update_ordered_dict()
@property
def keys(self):
return [e.key for e in self]
@property
def unique_keys(self):
return list(self.__ordered_dict.keys())
@property
def has_unique_entries(self):
return len(self) == len(self.unique_keys)
def entries_for_key(self, key):
try:
return [self[i] for i in self.__ordered_dict[key]]
except:
return []
def read_file(self, lexicon_file_path, comment=u"#", delimiter=u"\t", indices=[0, 1]):
if (lexicon_file_path is None) or (not os.path.isfile(lexicon_file_path)):
raise ValueError("The lexicon file path must exist. (Got '%s')" % lexicon_file_path)
comment = to_unicode_string(comment)
delimiter = to_unicode_string(delimiter)
with io.open(lexicon_file_path, "r", encoding="utf-8") as lexicon_file:
for line in lexicon_file:
line = line.strip()
if (comment is not None) and line.startswith(comment):
# commented line, skip
pass
else:
acc = line.split(delimiter)
if len(acc) > 0:
self.entries.append(
self.ENTRY_TYPE(raw_values=[acc[i] for i in indices], lowercase=self.lowercase)
)
self._update_ordered_dict()
def shuffle(self):
copy = list(self.entries)
random.shuffle(copy)
cls = type(self)
return cls(entry_type=self.ENTRY_TYPE, entries=copy, lowercase=self.lowercase)
def shuffle_and_partition(self, size=0.9, store=False):
copy = list(self.entries)
random.shuffle(copy)
if isinstance(size, int) and size > len(copy):
raise ValueError(u"The given size (%d) exceeds the number of entries (%d)." % (size, len(copy)))
if isinstance(size, float):
if (size < 0.0) or (size > 1.0):
raise ValueError(u"The size, when expressed as a fraction, must be in [0.0, 1.0]. (Got: '%.3f')" % size)
size = int(len(copy) * size)
cls = type(self)
ret = (
cls(entries=copy[0:size], lowercase=self.lowercase),
cls(entries=copy[size:], lowercase=self.lowercase),
)
if not store:
return ret
self.train_lexicon, self.test_lexicon = ret
| pettarin/wiktts | wiktts/lexicon.py | lexicon.py | py | 5,575 | python | en | code | 5 | github-code | 1 | [
{
"api_name": "collections.MutableSequence",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "ipapy.compatibility.to_str",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 139,
"usage_type": "call"
},
{
"api_name":... |
26025847015 | from inspect import getcomments
from pickle import FALSE, TRUE
import ee
import geemap
import datetime
import pandas as pd
import shapely.wkt
import multiprocessing
ee.Initialize()
visParamsTrue = {'bands': ['B4', 'B3', 'B2'], min: 0, max: 2000}
CLOUD_FILTER = 50
CLD_PRB_THRESH = 50
NIR_DRK_THRESH = 0.15
CLD_PRJ_DIST = 1
BUFFER = 5
def transformPolygon(polygon):
Polygon = shapely.wkt.loads(polygon)
input_polygon = shapely.wkt.dumps(Polygon)[10:-2]
split_polygon = input_polygon.split(",")
return_arr = []
for sub_arr in split_polygon:
split_subarr = sub_arr.split(" ")
clean_split_subbarr = list(filter(None, split_subarr))
return_arr.append(clean_split_subbarr)
return_str = [[float(c) for c in row] for row in return_arr]
return return_str
def get_s2_sr_cld_col(geom, start_date, end_date):
s2_sr_col = (ee.ImageCollection('COPERNICUS/S2_SR')
.filterBounds(geom)
.filterDate(start_date, end_date)
.sort('CLOUD_COVERAGE_ASSESSMENT'))
# .filter(ee.Filter.lte('CLOUDY_PIXEL_PERCENTAGE', CLOUD_FILTER)))
s2_cloudless_col = (ee.ImageCollection('COPERNICUS/S2_CLOUD_PROBABILITY')
.filterBounds(geom)
.filterDate(start_date, end_date))
return ee.ImageCollection(ee.Join.saveFirst('s2cloudless').apply(
primary=s2_sr_col,
secondary=s2_cloudless_col,
condition=ee.Filter.equals(
leftField='system:index',
rightField='system:index'
)
))
def add_cloud_bands(img):
cld_prb = ee.Image(img.get('s2cloudless')).select('probability')
is_cloud = cld_prb.gt(CLD_PRB_THRESH).rename('clouds')
return img.addBands(ee.Image([cld_prb, is_cloud]))
def add_shadow_bands(img):
not_water = img.select('SCL').neq(6)
SR_BAND_SCALE = 1e4
dark_pixels = img.select('B8').lt(
NIR_DRK_THRESH*SR_BAND_SCALE).multiply(not_water).rename('dark_pixels')
shadow_azimuth = ee.Number(90).subtract(
ee.Number(img.get('MEAN_SOLAR_AZIMUTH_ANGLE')))
cld_proj = (img.select('clouds').directionalDistanceTransform(shadow_azimuth, CLD_PRJ_DIST*10)
# .reproject({'crs': img.select(0).projection(), 'scale': 100})
.select('distance')
.mask()
.rename('cloud_transform'))
shadows = cld_proj.multiply(dark_pixels).rename('shadows')
return img.addBands(ee.Image([dark_pixels, cld_proj, shadows]))
def add_cld_shdw_mask(img):
img_cloud = add_cloud_bands(img)
img_cloud_shadow = add_shadow_bands(img_cloud)
is_cld_shdw = img_cloud_shadow.select('clouds').add(
img_cloud_shadow.select('shadows')).gt(0)
is_cld_shdw2 = (is_cld_shdw.focal_min(2).focal_max(BUFFER*2/20)
# .reproject({'crs': img.select([0]).projection(), 'scale': 20})
.rename('cloudmask'))
return img_cloud_shadow.addBands(is_cld_shdw2)
def apply_cld_shdw_mask(img):
not_cld_shdw = img.select('cloudmask').Not()
return img.select('B.*').updateMask(not_cld_shdw)
def calculate_ndvi_ndwi(image_masked, geom):
ndwi = image_masked.normalizedDifference(['B3', 'B8']).rename('NDWI')
ndvi = image_masked.normalizedDifference(['B8', 'B4']).rename('NDVI')
# print('NDWI')
avg_ndwi_mosaic = ndwi.reduceRegion(
ee.Reducer.mean(), geom, 10).get('NDWI').getInfo()
# max = ndwi.reduceRegion(ee.Reducer.max(), geom, 10).get('NDWI').getInfo()
# min = ndwi.reduceRegion(ee.Reducer.min(), geom, 10).get('NDWI').getInfo()
# print('avg', avg_ndwi_mosaic, 'min', min, 'max', max)
ndwiParams = {min: -1, max: 1, 'palette': ['8E3200', '#A64B2A', 'D7A86E',
'#FFEBC1', '#E3CAA5', '#d7efa9', '#9ecae1', '#4292c6', '#2171b5', '#08519c']}
# if (avg_ndwi_mosaic >= -1 and avg_ndwi_mosaic <= -0.3):
# print("NDWI result: Drought")
# elif(avg_ndwi_mosaic >= -0.3 and avg_ndwi_mosaic <= 0.0):
# print("NDWI result: Moderately Drought")
# elif(avg_ndwi_mosaic >= 0.0 and avg_ndwi_mosaic <= 0.2):
# print("NDWI result: Flood")
# else:
# print("NDWI result: Water surface")
flood_threshold_ndwi = 0
flood_ndwi = ndwi.gte(flood_threshold_ndwi).selfMask()
flood_pixel_area_ndwi = flood_ndwi.multiply(ee.Image.pixelArea())
flood_area_ndwi = flood_pixel_area_ndwi.reduceRegion(**{
'reducer': ee.Reducer.sum(),
'geometry': geom,
'scale': 10
})
# print(flood_area_ndwi.getInfo(), 'flood area ndwi')
drought_threshold1_ndwi = 0
drought_ndwi = ndwi.lt(drought_threshold1_ndwi).selfMask()
drought_pixel_area_ndwi = drought_ndwi.multiply(ee.Image.pixelArea())
drought_area_ndwi = drought_pixel_area_ndwi.reduceRegion(**{
'reducer': ee.Reducer.sum(),
'geometry': geom,
'scale': 10
})
# print(drought_area_ndwi.getInfo(), 'drought area ndwi')
# print('NDVI')
avg_ndvi_mosaic = ndvi.reduceRegion(
ee.Reducer.mean(), geom, 10).get('NDVI').getInfo()
# max = ndvi.reduceRegion(ee.Reducer.max(), geom, 10).get('NDVI').getInfo()
# min = ndvi.reduceRegion(ee.Reducer.min(), geom, 10).get('NDVI').getInfo()
# print('avg', avg_ndvi_mosaic, 'min', min, 'max', max)
ndviParams = {min: -1, max: 1, 'palette': ['#08306b', '#0850d1', '#2171b5',
'#4292c6', '#9ecae1', '#b8d4e1', '#d7a86e', '#e9efc0', '#83bd75', '#4e944f']}
# if (avg_ndvi_mosaic >= 0.1 and avg_ndvi_mosaic <= 0.4):
# print("NDVI result: Drought")
# elif(avg_ndvi_mosaic > 0.4 and avg_ndvi_mosaic <= 1):
# print("NDVI result: Vegetation")
# elif(avg_ndvi_mosaic >= -1 and avg_ndvi_mosaic < 0.1):
# print("NDVI result: Flood")
flood_threshold_ndvi = 0.17
flood_ndvi = ndvi.lt(flood_threshold_ndvi).selfMask()
flood_pixel_area_ndvi = flood_ndvi.multiply(ee.Image.pixelArea())
flood_area_ndvi = flood_pixel_area_ndvi.reduceRegion(**{
'reducer': ee.Reducer.sum(),
'geometry': geom,
'scale': 10
})
# print(flood_area_ndvi.getInfo(), 'flood area ndvi')
drought_threshold1_ndvi = 0.17
drought_threshold2_ndvi = 0.45
drought_ndvi = ndvi.gte(drought_threshold1_ndvi).And(
ndvi.lte(drought_threshold2_ndvi)).selfMask()
drought_pixel_area_ndvi = drought_ndvi.multiply(ee.Image.pixelArea())
drought_area_ndvi = drought_pixel_area_ndvi.reduceRegion(**{
'reducer': ee.Reducer.sum(),
'geometry': geom,
'scale': 10
})
# print(drought_area_ndvi.getInfo(), 'drought area ndvi')
vegetation_threshold1_ndvi = 0.45
vegetation_ndvi = ndvi.gt(vegetation_threshold1_ndvi).selfMask()
vegetation_pixel_area_ndvi = vegetation_ndvi.multiply(ee.Image.pixelArea())
vegetation_area_ndvi = vegetation_pixel_area_ndvi.reduceRegion(**{
'reducer': ee.Reducer.sum(),
'geometry': geom,
'scale': 10
})
# print(vegetation_area_ndvi.getInfo(), 'vegetation area ndvi')
return {'flood_area_ndwi': flood_area_ndwi.getInfo()['NDWI'], 'drought_area_ndwi': drought_area_ndwi.getInfo()['NDWI'], 'avg_ndwi_mosaic':avg_ndwi_mosaic, 'flood_area_ndvi': flood_area_ndvi.getInfo()['NDVI'], 'drought_area_ndvi': drought_area_ndvi.getInfo()['NDVI'], 'vegetation_area_ndvi': vegetation_area_ndvi.getInfo()['NDVI'],'avg_ndvi_mosaic':avg_ndvi_mosaic}
def compute_NDVI_NDWI(geom, input_date):
try:
rawPolygon = ee.Geometry.Polygon(transformPolygon(geom))
START_DATE = datetime.datetime.strptime(input_date, "%Y-%m-%d")
START_DATE = ee.Date(START_DATE)
END_DATE = START_DATE.advance(5, 'days')
s2_sr_cld_col_eval = get_s2_sr_cld_col(
rawPolygon, START_DATE, END_DATE)
img = s2_sr_cld_col_eval
try:
epochtime = img.aggregate_array('system:time_start').getInfo()[-1]
except:
epochtime = 0
img = s2_sr_cld_col_eval.map(add_cld_shdw_mask)
img = img.first()
image_masked = apply_cld_shdw_mask(img)
area_after_masking = image_masked.reduceRegion(
ee.Reducer.sum(), rawPolygon, 10).get('B1').getInfo()
# print(area_after_masking, 'Area after masking within 5 days')
area_before_masking = img.reduceRegion(
ee.Reducer.sum(), rawPolygon, 10).get('B1').getInfo()
# print(area_before_masking, 'Area before masking within 5 days')
if(area_after_masking == area_before_masking):
calc_ndvi_ndwi = calculate_ndvi_ndwi(image_masked, rawPolygon)
return calc_ndvi_ndwi, epochtime
else:
return 0, 0
except:
return 'error', 0
def compute(dataframe):
for index, row in dataframe.iterrows():
print("Working On index: ", index)
result, epochtime = compute_NDVI_NDWI(
row['geom'], row['warranty_dmgdate'])
if(result != 0 and result != 'error'):
dataframe.at[index, 'flood_area_ndwi'] = str(
result['flood_area_ndwi'])
dataframe.at[index, 'drought_area_ndwi'] = str(
result['drought_area_ndwi'])
dataframe.at[index, 'avg_ndwi_mosaic'] = str(
result['avg_ndwi_mosaic'])
dataframe.at[index, 'flood_area_ndvi'] = str(
result['flood_area_ndvi'])
dataframe.at[index, 'drought_area_ndvi'] = str(
result['drought_area_ndvi'])
dataframe.at[index, 'vegetation_area_ndvi'] = str(
result['vegetation_area_ndvi'])
dataframe.at[index, 'avg_ndvi_mosaic'] = str(
result['avg_ndvi_mosaic'])
else:
dataframe.at[index, 'flood_area_ndwi'] = "-"
dataframe.at[index, 'drought_area_ndwi'] = "-"
dataframe.at[index, 'avg_ndwi_mosaic'] = "-"
dataframe.at[index, 'flood_area_ndvi'] = "-"
dataframe.at[index, 'drought_area_ndvi'] = "-"
dataframe.at[index, 'vegetation_area_ndvi'] = "-"
dataframe.at[index, 'avg_ndvi_mosaic'] = "-"
if(epochtime != 0 and result != 'error'):
normalTime = datetime.datetime.fromtimestamp(
epochtime/1000).strftime("%Y-%m-%d")
dataframe.at[index, 'sat_time'] = normalTime
elif(result == 'error'):
dataframe.at[index, 'sat_time'] = "Calculation Error"
else:
dataframe.at[index, 'sat_time'] = "Cloudy"
return dataframe
# geom = ee.Geometry.Polygon([[102.5633930333, 15.3911126506452], [102.5633412905, 15.3911006621863], [102.5632589747, 15.3910814856279], [102.5630058721, 15.3910283104296], [
# 102.5628613992, 15.3909717388604], [102.5629181022, 15.3884164751931], [102.5631823462, 15.3885207260593], [102.5635976384, 15.3886884700794], [102.5633930333, 15.3911126506452]])
# START_DATE = ee.Date('2021-5-5')
if __name__ == "__main__":
df = pd.read_csv('./raw_data_800.csv', header=0)
num_processes = multiprocessing.cpu_count()
chunk_size = df.shape[0] if(
int(df.shape[0]/num_processes) == 0) else int(df.shape[0]/num_processes)
chunks = [df.iloc[df.index[i:i + chunk_size]]
for i in range(0, df.shape[0], chunk_size)]
pool = multiprocessing.Pool(processes=num_processes)
result = pool.map(compute, chunks)
df2 = pd.DataFrame()
for i in range(len(result)):
df2 = pd.concat([df2, result[i]], axis=0)
df2.to_csv(r'C:\Users\geoma\Downloads\ee\ee\testndvindwi.csv',
index=False, header=True)
| aakashthapa22/Parcel-Level-Flood-and-Drought-Detection-using-AI | NDVI and NDWI on Sentinel-2A images/NDVINDWI.py | NDVINDWI.py | py | 11,646 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "ee.Initialize",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "shapely.wkt.wkt.loads",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "shapely.wkt.wkt",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "shapely.wkt",... |
10121361892 | import socket
import numpy as np
from gym import spaces
import pickle
#import paramiko
import copy
import os
class DCS_env:
def __init__(self, host_ip='192.168.3.37', host_port=30000, size=1024):
# 参数调整
self.state_dim = 12
self.action_dim = 7
self.observation_space = spaces.Box(low=-np.inf, high=np.inf, shape=(self.state_dim,), dtype=np.float32)
self.action_space = spaces.Box(low=-1, high=1, shape=(self.action_dim,), dtype=np.float32)
self.HOST = host_ip
self.PORT = host_port
self.BufferSize = size
# socket.AF_INET (IPV4)
# socket.SOCK_STREAM (TCP)
#s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# 绑定客户端IP和端口号
#s.bind((self.HOST, self.PORT))
# 最大允许连接数量
#s.listen(3)
# 阻塞,当出现客户端的请求完成连接,并获取数据和客户端的信息
#self.conn, self.addr = s.accept()
#client = paramiko.SSHClient
self.recvfile_path = '/home/tete/work/RLP2021/server/transfile/observation.txt'
self.sendfile_path = '/home/tete/work/RLP2021/server/transfile/action.txt'
self.timestamp = None
def read_trans(self):
while True:
timeinfo = os.stat(self.recvfile_path).st_mtime
if timeinfo != self.timestamp and os.path.getsize(self.recvfile_path) > 0:
#print('size',os.path.getsize(self.recvfile_path))
self.timestamp = timeinfo
#print('timeinfo', timeinfo)
trans_file = open(self.recvfile_path,'r+')
valid_trans = trans_file.read()
trans_file.close()
#print('valid_trans', valid_trans)
break
return valid_trans
def send_trans(self, data):
file = open(self.sendfile_path, 'wb+')
file.write(data.encode())
file.close()
def reset(self):
#self.conn.sendall('Ready'.encode('gb2312'))
self.send_trans('Ready')
while True:
trans = self.read_trans()
_, ourplane, _, _, _ = self.obs_parser(trans)
#print('ourplane', ourplane)
if ourplane[2] > 50: break
next_obserstr = self.read_trans()
initial_obs, _, _, _, _ = self.obs_parser(next_obserstr)
print('successfully send reset command to client')
return initial_obs
def step(self, a):
#a = [1,2,3,4,5,6,7]
action = '1' + '@' + str(a[0]) + ',' + str(a[1]) + ',' + str(a[2]) + ',' + str(a[3]) + ',' + str(a[4]) + ',' + str(a[5]) + ',' + str(a[6])
#print(action)
self.send_trans(action)
#print('successfully send action to client')
next_obserstr = self.read_trans()
#print('next_obserstr', next_obserstr)
full_next_obs, next_ourplane, _, _, _ = self.obs_parser(next_obserstr)
rwd = self.reward(next_obserstr)
done = 1 if next_ourplane[2] < 20 else 0
# 关闭客户端连接
# conn.colse()
return full_next_obs, rwd, done, 0
def reward(self, obsstr):
_, ourplane, allyplane, enemyplane1, enemyplane2 = self.obs_parser(obsstr)
#print('duudwdwd', np.linalg.norm(ourplane - enemyplane1))
#print('debug', min(np.linalg.norm(ourplane - enemyplane1, 2), np.linalg.norm(ourplane - enemyplane2, 2)))
rwd = -min(np.linalg.norm(np.array(ourplane) - np.array(enemyplane1), 2), np.linalg.norm(np.array(ourplane) - np.array(enemyplane2), 2))
return rwd
def obs_parser(self, obsstr):
#obsstr_sib = copy.deepcopy(obsstr)
#obsstr = obsstr.decode('gb2312')
#print("obsstr", obsstr)
ourplane = [float(str(obsstr).split('@', 4)[0].split(':', 3)[0]), float(str(obsstr).split('@', 4)[0].split(':', 3)[1]), float(str(obsstr).split('@', 4)[0].split(':', 3)[2])]
allyplane = [float(str(obsstr).split('@', 4)[1].split(':', 3)[0]), float(str(obsstr).split('@', 4)[1].split(':', 3)[1]), float(str(obsstr).split('@', 4)[1].split(':', 3)[2])]
enemyplane1 = [float(str(obsstr).split('@', 4)[1].split(':', 3)[0]), float(str(obsstr).split('@', 4)[1].split(':', 3)[1]), float(str(obsstr).split('@', 4)[1].split(':', 3)[2])]
enemyplane2 = [float(str(obsstr).split('@', 4)[2].split(':', 3)[0]), float(str(obsstr).split('@', 4)[2].split(':', 3)[1]), float(str(obsstr).split('@', 4)[2].split(':', 3)[2])]
observation = ourplane.copy()
observation.extend(allyplane)
observation.extend(enemyplane1)
observation.extend(enemyplane2)
#print('observation', observation)
return observation, ourplane, allyplane, enemyplane1, enemyplane2
| BillChan226/RL_Plane_Strategy | rl_algorithms/algos/ppo/DCS_environment.py | DCS_environment.py | py | 4,736 | python | en | code | 4 | github-code | 1 | [
{
"api_name": "gym.spaces.Box",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "gym.spaces",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "numpy.inf",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "numpy.float32",
"line_nu... |
19802050508 | import os
import argparse
import joblib
import pandas as pd
from sklearn import metrics
from sklearn import tree
import config
import model_dispatcher
def run(fold, model):
#read the training data with folds
df = pd.read_csv(config.TRAINING_FILE)
#to replace by feature engineering
df = df.drop(['Name', 'Sex','Embarked','Ticket', 'Fare', 'Cabin'], axis=1)
df.dropna(inplace=True)
#training data
df_train = df[df.kfold != fold].reset_index(drop=True)
#validation data
df_valid = df[df.kfold == fold].reset_index(drop=True)
#drop label column from df and convert to array
x_train= df_train.drop('Survived', axis=1).values
x_valid= df_valid.drop('Survived', axis=1).values
#target is label column
y_train = df_train.Survived.values
y_valid = df_valid.Survived.values
#simple Decision Tree model
clf = model_dispatcher.models[model]
#fit the model using
clf.fit(x_train, y_train)
#create predictions
preds = clf.predict(x_valid)
#calculate and print accuracy
accuracy = metrics.accuracy_score(y_valid, preds)
print(f'Fold={fold}, Accuracy={accuracy}')
#save the model
joblib.dump(
clf,
os.path.join(config.MODEL_OUTPUT, f'df_{fold}.bin')
)
if __name__ == '__main__':
#initialize ArgumentParser class
parser = argparse.ArgumentParser()
#add the arguments needed
parser.add_argument(
'--fold',
type=int)
parser.add_argument(
'--model',
type=str)
#read the arguments from the comand Lines
args = parser.parse_args()
#run the fold specified by command line arguments
run(
fold=args.fold,
model=args.model
) | vitormnsousa/ml-template | src/train.py | train.py | py | 1,731 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pandas.read_csv",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "config.TRAINING_FILE",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "model_dispatcher.models",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name":... |
29614390408 | import librosa, librosa.display
import os
import pandas as pd
import numpy as np
def normalize_volume(file_path):
y, sr = librosa.load(file_path)
y_norm = librosa.util.normalize(y, axis=0)
return y_norm, sr
def extract_features(y_norm, sr):
features = []
# Tempo and beats
tempo, beats = librosa.beat.beat_track(y=y_norm, sr=sr)
beats_mean = beats.mean()
beats_var = beats.var()
features.extend((tempo, beats_mean, beats_var))
# Zero crossings
zero_crossings = librosa.zero_crossings(y=y_norm, pad=False)
zero_crossings_mean = zero_crossings.mean()
zero_crossings_var = zero_crossings.var()
features.extend((zero_crossings_mean, zero_crossings_var))
# Spectral centroid
spectral_centroids = librosa.feature.spectral_centroid(y=y_norm, sr=sr)[0]
spectral_centroids_mean = spectral_centroids.mean()
spectral_centroids_var = spectral_centroids.var()
features.extend((spectral_centroids_mean,spectral_centroids_var))
# Specral Rolloff
spectral_rolloff = librosa.feature.spectral_rolloff(y=y_norm, sr=sr)[0]
spectral_rolloff_mean = spectral_rolloff.mean()
spectral_rolloff_var = spectral_rolloff.var()
features.extend((spectral_rolloff_mean, spectral_rolloff_var))
# MFCCs
mfccs = librosa.feature.mfcc(y=y_norm, sr=sr, n_mfcc=40)
for mfcc in mfccs:
features.append(mfcc.mean())
features.append(mfcc.var())
return features
| rsmassey/mcats | mcats/wav_extraction/feature_extraction.py | feature_extraction.py | py | 1,454 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "librosa.load",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "librosa.util.normalize",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "librosa.util",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "librosa.beat.beat_... |
11324625878 | from __future__ import annotations
from typing import Any, List, Literal, Optional, TYPE_CHECKING, Tuple, Type, TypeVar, Callable, Union, Dict, overload
from contextvars import ContextVar
import inspect
import os
from .item import Item, ItemCallbackType
from ..enums import ChannelType, ComponentType
from ..partial_emoji import PartialEmoji
from ..emoji import Emoji
from ..utils import MISSING
from ..components import (
SelectOption,
SelectMenu,
)
from ..app_commands.namespace import Namespace
__all__ = (
'Select',
'UserSelect',
'RoleSelect',
'MentionableSelect',
'ChannelSelect',
'select',
)
if TYPE_CHECKING:
from typing_extensions import TypeAlias, Self
from .view import View
from ..types.components import SelectMenu as SelectMenuPayload
from ..types.interactions import SelectMessageComponentInteractionData
from ..app_commands import AppCommandChannel, AppCommandThread
from ..member import Member
from ..role import Role
from ..user import User
from ..interactions import Interaction
ValidSelectType: TypeAlias = Literal[
ComponentType.string_select,
ComponentType.user_select,
ComponentType.role_select,
ComponentType.channel_select,
ComponentType.mentionable_select,
]
PossibleValue: TypeAlias = Union[
str, User, Member, Role, AppCommandChannel, AppCommandThread, Union[Role, Member], Union[Role, User]
]
V = TypeVar('V', bound='View', covariant=True)
BaseSelectT = TypeVar('BaseSelectT', bound='BaseSelect[Any]')
SelectT = TypeVar('SelectT', bound='Select[Any]')
UserSelectT = TypeVar('UserSelectT', bound='UserSelect[Any]')
RoleSelectT = TypeVar('RoleSelectT', bound='RoleSelect[Any]')
ChannelSelectT = TypeVar('ChannelSelectT', bound='ChannelSelect[Any]')
MentionableSelectT = TypeVar('MentionableSelectT', bound='MentionableSelect[Any]')
SelectCallbackDecorator: TypeAlias = Callable[[ItemCallbackType[V, BaseSelectT]], BaseSelectT]
selected_values: ContextVar[Dict[str, List[PossibleValue]]] = ContextVar('selected_values')
class BaseSelect(Item[V]):
"""The base Select model that all other Select models inherit from.
This class inherits from :class:`Item` and implements the common attributes.
The following implement this class:
- :class:`~discord.ui.Select`
- :class:`~discord.ui.ChannelSelect`
- :class:`~discord.ui.RoleSelect`
- :class:`~discord.ui.MentionableSelect`
- :class:`~discord.ui.UserSelect`
.. versionadded:: 2.1
Attributes
------------
row: Optional[:class:`int`]
The relative row this select menu belongs to. A Discord component can only have 5
rows. By default, items are arranged automatically into those 5 rows. If you'd
like to control the relative positioning of the row then passing an index is advised.
For example, row=1 will show up before row=2. Defaults to ``None``, which is automatic
ordering. The row number must be between 0 and 4 (i.e. zero indexed).
"""
__slots__ = ('_provided_custom_id', '_underlying', 'row', '_values')
__item_repr_attributes__: Tuple[str, ...] = (
'placeholder',
'min_values',
'max_values',
'disabled',
)
def __init__(
self,
type: ValidSelectType,
*,
custom_id: str = MISSING,
row: Optional[int] = None,
placeholder: Optional[str] = None,
min_values: Optional[int] = None,
max_values: Optional[int] = None,
disabled: bool = False,
options: List[SelectOption] = MISSING,
channel_types: List[ChannelType] = MISSING,
) -> None:
super().__init__()
self._provided_custom_id = custom_id is not MISSING
custom_id = os.urandom(16).hex() if custom_id is MISSING else custom_id
if not isinstance(custom_id, str):
raise TypeError(f'expected custom_id to be str not {custom_id.__class__.__name__}')
self._underlying = SelectMenu._raw_construct(
type=type,
custom_id=custom_id,
placeholder=placeholder,
min_values=min_values,
max_values=max_values,
disabled=disabled,
channel_types=[] if channel_types is MISSING else channel_types,
options=[] if options is MISSING else options,
)
self.row = row
self._values: List[PossibleValue] = []
@property
def values(self) -> List[PossibleValue]:
values = selected_values.get({})
return values.get(self.custom_id, self._values)
@property
def custom_id(self) -> str:
""":class:`str`: The ID of the select menu that gets received during an interaction."""
return self._underlying.custom_id
@custom_id.setter
def custom_id(self, value: str) -> None:
if not isinstance(value, str):
raise TypeError('custom_id must be a str')
self._underlying.custom_id = value
self._provided_custom_id = True
@property
def placeholder(self) -> Optional[str]:
"""Optional[:class:`str`]: The placeholder text that is shown if nothing is selected, if any."""
return self._underlying.placeholder
@placeholder.setter
def placeholder(self, value: Optional[str]) -> None:
if value is not None and not isinstance(value, str):
raise TypeError('placeholder must be None or str')
self._underlying.placeholder = value
@property
def min_values(self) -> int:
""":class:`int`: The minimum number of items that must be chosen for this select menu."""
return self._underlying.min_values
@min_values.setter
def min_values(self, value: int) -> None:
self._underlying.min_values = int(value)
@property
def max_values(self) -> int:
""":class:`int`: The maximum number of items that can be chosen for this select menu."""
return self._underlying.max_values
@max_values.setter
def max_values(self, value: int) -> None:
self._underlying.max_values = int(value)
@property
def disabled(self) -> bool:
""":class:`bool`: Whether the select is disabled or not."""
return self._underlying.disabled
@disabled.setter
def disabled(self, value: bool) -> None:
self._underlying.disabled = bool(value)
@property
def width(self) -> int:
return 5
def to_component_dict(self) -> SelectMenuPayload:
return self._underlying.to_dict()
def _refresh_component(self, component: SelectMenu) -> None:
self._underlying = component
def _refresh_state(self, interaction: Interaction, data: SelectMessageComponentInteractionData) -> None:
values = selected_values.get({})
payload: List[PossibleValue]
try:
resolved = Namespace._get_resolved_items(interaction, data['resolved'])
payload = list(resolved.values())
except KeyError:
payload = data.get("values", []) # type: ignore
self._values = values[self.custom_id] = payload
selected_values.set(values)
def is_dispatchable(self) -> bool:
return True
@classmethod
def from_component(cls, component: SelectMenu) -> Self:
return cls(
**{k: getattr(component, k) for k in cls.__item_repr_attributes__},
row=None,
)
class Select(BaseSelect[V]):
"""Represents a UI select menu with a list of custom options. This is represented
to the user as a dropdown menu.
.. versionadded:: 2.0
Parameters
------------
custom_id: :class:`str`
The ID of the select menu that gets received during an interaction.
If not given then one is generated for you.
placeholder: Optional[:class:`str`]
The placeholder text that is shown if nothing is selected, if any.
min_values: :class:`int`
The minimum number of items that must be chosen for this select menu.
Defaults to 1 and must be between 0 and 25.
max_values: :class:`int`
The maximum number of items that must be chosen for this select menu.
Defaults to 1 and must be between 1 and 25.
options: List[:class:`discord.SelectOption`]
A list of options that can be selected in this menu.
disabled: :class:`bool`
Whether the select is disabled or not.
row: Optional[:class:`int`]
The relative row this select menu belongs to. A Discord component can only have 5
rows. By default, items are arranged automatically into those 5 rows. If you'd
like to control the relative positioning of the row then passing an index is advised.
For example, row=1 will show up before row=2. Defaults to ``None``, which is automatic
ordering. The row number must be between 0 and 4 (i.e. zero indexed).
"""
__item_repr_attributes__ = BaseSelect.__item_repr_attributes__ + ('options',)
def __init__(
self,
*,
custom_id: str = MISSING,
placeholder: Optional[str] = None,
min_values: int = 1,
max_values: int = 1,
options: List[SelectOption] = MISSING,
disabled: bool = False,
row: Optional[int] = None,
) -> None:
super().__init__(
self.type,
custom_id=custom_id,
placeholder=placeholder,
min_values=min_values,
max_values=max_values,
disabled=disabled,
options=options,
row=row,
)
@property
def values(self) -> List[str]:
"""List[:class:`str`]: A list of values that have been selected by the user."""
return super().values # type: ignore
@property
def type(self) -> Literal[ComponentType.string_select]:
""":class:`.ComponentType`: The type of this component."""
return ComponentType.string_select
@property
def options(self) -> List[SelectOption]:
"""List[:class:`discord.SelectOption`]: A list of options that can be selected in this menu."""
return self._underlying.options
@options.setter
def options(self, value: List[SelectOption]) -> None:
if not isinstance(value, list):
raise TypeError('options must be a list of SelectOption')
if not all(isinstance(obj, SelectOption) for obj in value):
raise TypeError('all list items must subclass SelectOption')
self._underlying.options = value
def add_option(
self,
*,
label: str,
value: str = MISSING,
description: Optional[str] = None,
emoji: Optional[Union[str, Emoji, PartialEmoji]] = None,
default: bool = False,
) -> None:
"""Adds an option to the select menu.
To append a pre-existing :class:`discord.SelectOption` use the
:meth:`append_option` method instead.
Parameters
-----------
label: :class:`str`
The label of the option. This is displayed to users.
Can only be up to 100 characters.
value: :class:`str`
The value of the option. This is not displayed to users.
If not given, defaults to the label. Can only be up to 100 characters.
description: Optional[:class:`str`]
An additional description of the option, if any.
Can only be up to 100 characters.
emoji: Optional[Union[:class:`str`, :class:`.Emoji`, :class:`.PartialEmoji`]]
The emoji of the option, if available. This can either be a string representing
the custom or unicode emoji or an instance of :class:`.PartialEmoji` or :class:`.Emoji`.
default: :class:`bool`
Whether this option is selected by default.
Raises
-------
ValueError
The number of options exceeds 25.
"""
option = SelectOption(
label=label,
value=value,
description=description,
emoji=emoji,
default=default,
)
self.append_option(option)
def append_option(self, option: SelectOption) -> None:
"""Appends an option to the select menu.
Parameters
-----------
option: :class:`discord.SelectOption`
The option to append to the select menu.
Raises
-------
ValueError
The number of options exceeds 25.
"""
if len(self._underlying.options) > 25:
raise ValueError('maximum number of options already provided')
self._underlying.options.append(option)
class UserSelect(BaseSelect[V]):
"""Represents a UI select menu with a list of predefined options with the current members of the guild.
If this is sent a private message, it will only allow the user to select the client
or themselves. Every selected option in a private message will resolve to
a :class:`discord.User`.
.. versionadded:: 2.1
Parameters
------------
custom_id: :class:`str`
The ID of the select menu that gets received during an interaction.
If not given then one is generated for you.
placeholder: Optional[:class:`str`]
The placeholder text that is shown if nothing is selected, if any.
min_values: :class:`int`
The minimum number of items that must be chosen for this select menu.
Defaults to 1 and must be between 0 and 25.
max_values: :class:`int`
The maximum number of items that must be chosen for this select menu.
Defaults to 1 and must be between 1 and 25.
disabled: :class:`bool`
Whether the select is disabled or not.
row: Optional[:class:`int`]
The relative row this select menu belongs to. A Discord component can only have 5
rows. By default, items are arranged automatically into those 5 rows. If you'd
like to control the relative positioning of the row then passing an index is advised.
For example, row=1 will show up before row=2. Defaults to ``None``, which is automatic
ordering. The row number must be between 0 and 4 (i.e. zero indexed).
"""
def __init__(
self,
*,
custom_id: str = MISSING,
placeholder: Optional[str] = None,
min_values: int = 1,
max_values: int = 1,
disabled: bool = False,
row: Optional[int] = None,
) -> None:
super().__init__(
self.type,
custom_id=custom_id,
placeholder=placeholder,
min_values=min_values,
max_values=max_values,
disabled=disabled,
row=row,
)
@property
def type(self) -> Literal[ComponentType.user_select]:
""":class:`.ComponentType`: The type of this component."""
return ComponentType.user_select
@property
def values(self) -> List[Union[Member, User]]:
"""List[Union[:class:`discord.Member`, :class:`discord.User`]]: A list of members
and users that have been selected by the user.
If this is sent a private message, it will only allow
the user to select the client or themselves. Every selected option in a private
message will resolve to a :class:`discord.User`.
If invoked in a guild, the values will always resolve to :class:`discord.Member`.
"""
return super().values # type: ignore
class RoleSelect(BaseSelect[V]):
"""Represents a UI select menu with a list of predefined options with the current roles of the guild.
Please note that if you use this in a private message with a user, no roles will be displayed to the user.
.. versionadded:: 2.1
Parameters
------------
custom_id: :class:`str`
The ID of the select menu that gets received during an interaction.
If not given then one is generated for you.
placeholder: Optional[:class:`str`]
The placeholder text that is shown if nothing is selected, if any.
min_values: :class:`int`
The minimum number of items that must be chosen for this select menu.
Defaults to 1 and must be between 0 and 25.
max_values: :class:`int`
The maximum number of items that must be chosen for this select menu.
Defaults to 1 and must be between 1 and 25.
disabled: :class:`bool`
Whether the select is disabled or not.
row: Optional[:class:`int`]
The relative row this select menu belongs to. A Discord component can only have 5
rows. By default, items are arranged automatically into those 5 rows. If you'd
like to control the relative positioning of the row then passing an index is advised.
For example, row=1 will show up before row=2. Defaults to ``None``, which is automatic
ordering. The row number must be between 0 and 4 (i.e. zero indexed).
"""
def __init__(
self,
*,
custom_id: str = MISSING,
placeholder: Optional[str] = None,
min_values: int = 1,
max_values: int = 1,
disabled: bool = False,
row: Optional[int] = None,
) -> None:
super().__init__(
self.type,
custom_id=custom_id,
placeholder=placeholder,
min_values=min_values,
max_values=max_values,
disabled=disabled,
row=row,
)
@property
def type(self) -> Literal[ComponentType.role_select]:
""":class:`.ComponentType`: The type of this component."""
return ComponentType.role_select
@property
def values(self) -> List[Role]:
"""List[:class:`discord.Role`]: A list of roles that have been selected by the user."""
return super().values # type: ignore
class MentionableSelect(BaseSelect[V]):
"""Represents a UI select menu with a list of predefined options with the current members and roles in the guild.
If this is sent in a private message, it will only allow the user to select
the client or themselves. Every selected option in a private message
will resolve to a :class:`discord.User`. It will not give the user any roles
to select.
.. versionadded:: 2.1
Parameters
------------
custom_id: :class:`str`
The ID of the select menu that gets received during an interaction.
If not given then one is generated for you.
placeholder: Optional[:class:`str`]
The placeholder text that is shown if nothing is selected, if any.
min_values: :class:`int`
The minimum number of items that must be chosen for this select menu.
Defaults to 1 and must be between 0 and 25.
max_values: :class:`int`
The maximum number of items that must be chosen for this select menu.
Defaults to 1 and must be between 1 and 25.
disabled: :class:`bool`
Whether the select is disabled or not.
row: Optional[:class:`int`]
The relative row this select menu belongs to. A Discord component can only have 5
rows. By default, items are arranged automatically into those 5 rows. If you'd
like to control the relative positioning of the row then passing an index is advised.
For example, row=1 will show up before row=2. Defaults to ``None``, which is automatic
ordering. The row number must be between 0 and 4 (i.e. zero indexed).
"""
def __init__(
self,
*,
custom_id: str = MISSING,
placeholder: Optional[str] = None,
min_values: int = 1,
max_values: int = 1,
disabled: bool = False,
row: Optional[int] = None,
) -> None:
super().__init__(
self.type,
custom_id=custom_id,
placeholder=placeholder,
min_values=min_values,
max_values=max_values,
disabled=disabled,
row=row,
)
@property
def type(self) -> Literal[ComponentType.mentionable_select]:
""":class:`.ComponentType`: The type of this component."""
return ComponentType.mentionable_select
@property
def values(self) -> List[Union[Member, User, Role]]:
"""List[Union[:class:`discord.Role`, :class:`discord.Member`, :class:`discord.User`]]: A list of roles, members,
and users that have been selected by the user.
If this is sent a private message, it will only allow
the user to select the client or themselves. Every selected option in a private
message will resolve to a :class:`discord.User`.
If invoked in a guild, the values will always resolve to :class:`discord.Member`.
"""
return super().values # type: ignore
class ChannelSelect(BaseSelect[V]):
"""Represents a UI select menu with a list of predefined options with the current channels in the guild.
Please note that if you use this in a private message with a user, no channels will be displayed to the user.
.. versionadded:: 2.1
Parameters
------------
custom_id: :class:`str`
The ID of the select menu that gets received during an interaction.
If not given then one is generated for you.
channel_types: List[:class:`~discord.ChannelType`]
The types of channels to show in the select menu. Defaults to all channels.
placeholder: Optional[:class:`str`]
The placeholder text that is shown if nothing is selected, if any.
min_values: :class:`int`
The minimum number of items that must be chosen for this select menu.
Defaults to 1 and must be between 0 and 25.
max_values: :class:`int`
The maximum number of items that must be chosen for this select menu.
Defaults to 1 and must be between 1 and 25.
disabled: :class:`bool`
Whether the select is disabled or not.
row: Optional[:class:`int`]
The relative row this select menu belongs to. A Discord component can only have 5
rows. By default, items are arranged automatically into those 5 rows. If you'd
like to control the relative positioning of the row then passing an index is advised.
For example, row=1 will show up before row=2. Defaults to ``None``, which is automatic
ordering. The row number must be between 0 and 4 (i.e. zero indexed).
"""
__item_repr_attributes__ = BaseSelect.__item_repr_attributes__ + ('channel_types',)
def __init__(
self,
*,
custom_id: str = MISSING,
channel_types: List[ChannelType] = MISSING,
placeholder: Optional[str] = None,
min_values: int = 1,
max_values: int = 1,
disabled: bool = False,
row: Optional[int] = None,
) -> None:
super().__init__(
self.type,
custom_id=custom_id,
placeholder=placeholder,
min_values=min_values,
max_values=max_values,
disabled=disabled,
row=row,
channel_types=channel_types,
)
@property
def type(self) -> Literal[ComponentType.channel_select]:
""":class:`.ComponentType`: The type of this component."""
return ComponentType.channel_select
@property
def channel_types(self) -> List[ChannelType]:
"""List[:class:`~discord.ChannelType`]: A list of channel types that can be selected."""
return self._underlying.channel_types
@channel_types.setter
def channel_types(self, value: List[ChannelType]) -> None:
if not isinstance(value, list):
raise TypeError('channel_types must be a list of ChannelType')
if not all(isinstance(obj, ChannelType) for obj in value):
raise TypeError('all list items must be a ChannelType')
self._underlying.channel_types = value
@property
def values(self) -> List[Union[AppCommandChannel, AppCommandThread]]:
"""List[Union[:class:`~discord.app_commands.AppCommandChannel`, :class:`~discord.app_commands.AppCommandThread`]]: A list of channels selected by the user."""
return super().values # type: ignore
@overload
def select(
*,
cls: Type[SelectT] = Select[V],
options: List[SelectOption] = MISSING,
channel_types: List[ChannelType] = ...,
placeholder: Optional[str] = ...,
custom_id: str = ...,
min_values: int = ...,
max_values: int = ...,
disabled: bool = ...,
row: Optional[int] = ...,
) -> SelectCallbackDecorator[V, SelectT]:
...
@overload
def select(
*,
cls: Type[UserSelectT] = UserSelect[V],
options: List[SelectOption] = MISSING,
channel_types: List[ChannelType] = ...,
placeholder: Optional[str] = ...,
custom_id: str = ...,
min_values: int = ...,
max_values: int = ...,
disabled: bool = ...,
row: Optional[int] = ...,
) -> SelectCallbackDecorator[V, UserSelectT]:
...
@overload
def select(
*,
cls: Type[RoleSelectT] = RoleSelect[V],
options: List[SelectOption] = MISSING,
channel_types: List[ChannelType] = ...,
placeholder: Optional[str] = ...,
custom_id: str = ...,
min_values: int = ...,
max_values: int = ...,
disabled: bool = ...,
row: Optional[int] = ...,
) -> SelectCallbackDecorator[V, RoleSelectT]:
...
@overload
def select(
*,
cls: Type[ChannelSelectT] = ChannelSelect[V],
options: List[SelectOption] = MISSING,
channel_types: List[ChannelType] = ...,
placeholder: Optional[str] = ...,
custom_id: str = ...,
min_values: int = ...,
max_values: int = ...,
disabled: bool = ...,
row: Optional[int] = ...,
) -> SelectCallbackDecorator[V, ChannelSelectT]:
...
@overload
def select(
*,
cls: Type[MentionableSelectT] = MentionableSelect[V],
options: List[SelectOption] = MISSING,
channel_types: List[ChannelType] = MISSING,
placeholder: Optional[str] = ...,
custom_id: str = ...,
min_values: int = ...,
max_values: int = ...,
disabled: bool = ...,
row: Optional[int] = ...,
) -> SelectCallbackDecorator[V, MentionableSelectT]:
...
def select(
*,
cls: Type[BaseSelectT] = Select[V],
options: List[SelectOption] = MISSING,
channel_types: List[ChannelType] = MISSING,
placeholder: Optional[str] = None,
custom_id: str = MISSING,
min_values: int = 1,
max_values: int = 1,
disabled: bool = False,
row: Optional[int] = None,
) -> SelectCallbackDecorator[V, BaseSelectT]:
"""A decorator that attaches a select menu to a component.
The function being decorated should have three parameters, ``self`` representing
the :class:`discord.ui.View`, the :class:`discord.Interaction` you receive and
the chosen select class.
To obtain the selected values inside the callback, you can use the ``values`` attribute of the chosen class in the callback. The list of values
will depend on the type of select menu used. View the table below for more information.
+----------------------------------------+-----------------------------------------------------------------------------------------------------------------+
| Select Type | Resolved Values |
+========================================+=================================================================================================================+
| :class:`discord.ui.Select` | List[:class:`str`] |
+----------------------------------------+-----------------------------------------------------------------------------------------------------------------+
| :class:`discord.ui.UserSelect` | List[Union[:class:`discord.Member`, :class:`discord.User`]] |
+----------------------------------------+-----------------------------------------------------------------------------------------------------------------+
| :class:`discord.ui.RoleSelect` | List[:class:`discord.Role`] |
+----------------------------------------+-----------------------------------------------------------------------------------------------------------------+
| :class:`discord.ui.MentionableSelect` | List[Union[:class:`discord.Role`, :class:`discord.Member`, :class:`discord.User`]] |
+----------------------------------------+-----------------------------------------------------------------------------------------------------------------+
| :class:`discord.ui.ChannelSelect` | List[Union[:class:`~discord.app_commands.AppCommandChannel`, :class:`~discord.app_commands.AppCommandThread`]] |
+----------------------------------------+-----------------------------------------------------------------------------------------------------------------+
.. versionchanged:: 2.1
Added the following keyword-arguments: ``cls``, ``channel_types``
Example
---------
.. code-block:: python3
class View(discord.ui.View):
@discord.ui.select(cls=ChannelSelect, channel_types=[discord.ChannelType.text])
async def select_channels(self, interaction: discord.Interaction, select: ChannelSelect):
return await interaction.response.send_message(f'You selected {select.values[0].mention}')
Parameters
------------
cls: Union[Type[:class:`discord.ui.Select`], Type[:class:`discord.ui.UserSelect`], Type[:class:`discord.ui.RoleSelect`], \
Type[:class:`discord.ui.MentionableSelect`], Type[:class:`discord.ui.ChannelSelect`]]
The class to use for the select menu. Defaults to :class:`discord.ui.Select`. You can use other
select types to display different select menus to the user. See the table above for the different
values you can get from each select type. Subclasses work as well, however the callback in the subclass will
get overridden.
placeholder: Optional[:class:`str`]
The placeholder text that is shown if nothing is selected, if any.
custom_id: :class:`str`
The ID of the select menu that gets received during an interaction.
It is recommended not to set this parameter to prevent conflicts.
row: Optional[:class:`int`]
The relative row this select menu belongs to. A Discord component can only have 5
rows. By default, items are arranged automatically into those 5 rows. If you'd
like to control the relative positioning of the row then passing an index is advised.
For example, row=1 will show up before row=2. Defaults to ``None``, which is automatic
ordering. The row number must be between 0 and 4 (i.e. zero indexed).
min_values: :class:`int`
The minimum number of items that must be chosen for this select menu.
Defaults to 1 and must be between 0 and 25.
max_values: :class:`int`
The maximum number of items that must be chosen for this select menu.
Defaults to 1 and must be between 1 and 25.
options: List[:class:`discord.SelectOption`]
A list of options that can be selected in this menu. This can only be used with
:class:`Select` instances.
channel_types: List[:class:`~discord.ChannelType`]
The types of channels to show in the select menu. Defaults to all channels. This can only be used
with :class:`ChannelSelect` instances.
disabled: :class:`bool`
Whether the select is disabled or not. Defaults to ``False``.
"""
def decorator(func: ItemCallbackType[V, BaseSelectT]) -> ItemCallbackType[V, BaseSelectT]:
if not inspect.iscoroutinefunction(func):
raise TypeError('select function must be a coroutine function')
callback_cls = getattr(cls, '__origin__', cls)
if not issubclass(callback_cls, BaseSelect):
supported_classes = ", ".join(["ChannelSelect", "MentionableSelect", "RoleSelect", "Select", "UserSelect"])
raise TypeError(f'cls must be one of {supported_classes} or a subclass of one of them, not {cls!r}.')
func.__discord_ui_model_type__ = callback_cls
func.__discord_ui_model_kwargs__ = {
'placeholder': placeholder,
'custom_id': custom_id,
'row': row,
'min_values': min_values,
'max_values': max_values,
'disabled': disabled,
}
if issubclass(callback_cls, Select):
func.__discord_ui_model_kwargs__['options'] = options
if issubclass(callback_cls, ChannelSelect):
func.__discord_ui_model_kwargs__['channel_types'] = channel_types
return func
return decorator # type: ignore
| Rapptz/discord.py | discord/ui/select.py | select.py | py | 32,941 | python | en | code | 13,719 | github-code | 1 | [
{
"api_name": "typing.TYPE_CHECKING",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "typing_extensions.TypeAlias",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "typing.Literal",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "enums... |
19235227239 | from IAF.flows.iaf import IAF_mod
from torch import nn
import torch
class IAF_flow(nn.Module):
def __init__(self, dim, n_flows,tanh_flag,C=100):
super().__init__()
self.flow = nn.ModuleList([
IAF_mod(dim,dim,dim) for _ in range(n_flows)
])
self.C = C
self.tanh_flag = tanh_flag
def forward(self, z0,h=None):
log_det = 0
zk = z0
for f in self.flow:
zk,ld = f(zk,h)
log_det= log_det+ld
if self.tanh_flag:
return self.C*torch.tanh(zk/self.C),log_det
else:
return zk,log_det
| MrHuff/DIF-NLDL | IAF/IAF.py | IAF.py | py | 621 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "torch.nn.Module",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "torch.nn.ModuleList",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_n... |
21087590741 | import argparse
import matplotlib.pyplot as plt
import numpy as np
import h5py
import gusto_dataset
global_vars = dict(num_files=0, file_index=0)
def plot_faces(filename, args):
dset = gusto_dataset.GustoDataset(filename)
segments = dset.get_face_segments()
for seg in segments:
plt.plot(seg[:,0], seg[:,2], '-o', c='k')
plt.axis('equal')
plt.margins(0.1)
def plot_1d(filename, args):
dset = gusto_dataset.GustoDataset(filename)
x = dset.get_cell_variable('x3', row=args.row)
y = dset.get_cell_variable(args.data, row=args.row)
c = float(global_vars['file_index']) / global_vars['num_files']
plt.plot(x, y, '-o', c=[c]*3)
plt.xlabel('z')
plt.ylabel(args.data)
def triangle_variable_plot(filename, args):
dset = gusto_dataset.GustoDataset(filename)
x = dset.get_cell_variable('x1')
z = dset.get_cell_variable('x3')
f = dset.get_cell_variable('dg')
plt.tripcolor(x, z, f)
plt.axis('equal')
plt.colorbar()
def triangle_mesh_plot(filename, args):
dset = gusto_dataset.GustoDataset(filename)
x = dset.get_cell_variable('x1')
z = dset.get_cell_variable('x3')
plt.triplot(x, z)
plt.axis('equal')
def triangle_vert_plot(filename, args):
dset = gusto_dataset.GustoDataset(filename)
x = dset.get_vert_variable('x1')
z = dset.get_vert_variable('x3')
f = dset.get_vert_variable(args.data)
plt.tripcolor(x, z, f)
plt.axis('equal')
def mesh_plot(filename, args):
from matplotlib.collections import PolyCollection
dset = gusto_dataset.GustoDataset(filename)
vert = dset.get_cell_polygons()
data = dset.get_cell_variable(args.data, log=args.log)
# u0 = dset.get_cell_variable('u0')
# b0 = dset.get_cell_variable('b0')
# b1 = dset.get_cell_variable('b1')
# b2 = dset.get_cell_variable('b2')
# b3 = dset.get_cell_variable('b3')
# dg = dset.get_cell_variable('dg')
# bb = b1*b1 + b2*b2 + b3*b3 - b0*b0
# data = np.log10(bb / (dg * u0))
# u1 = dset.get_cell_variable('u1')
# u3 = dset.get_cell_variable('u3')
# up = (u1**2 + u3**2)**0.5
# data = up
# data = np.log10(data)
cells = PolyCollection(vert, array=data, cmap=args.cmap,
linewidths=0.0, antialiased=True)
fig = plt.figure()
ax0 = fig.add_subplot(1, 1, 1)
ax0.add_collection(cells)
ax0.autoscale_view()
ax0.set_aspect('equal')
fig.colorbar(cells)
plt.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('command')
parser.add_argument('filenames', nargs='+')
parser.add_argument('-d', '--data', default='dg')
parser.add_argument('--cmap', default='jet')
parser.add_argument('--row', default=0, type=int)
parser.add_argument('--log', action='store_true', default=False)
args = parser.parse_args()
plots = {'1d': plot_1d,
'vert': triangle_vert_plot,
'trimesh': triangle_mesh_plot,
'triplot': triangle_variable_plot,
'mesh': mesh_plot,
'faces': plot_faces}
global_vars['num_files'] = len(args.filenames)
for n, f in enumerate(args.filenames):
global_vars['file_index'] = n
plots[args.command](f, args)
plt.show()
| jzrake/gusto | plot.py | plot.py | py | 3,290 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "gusto_dataset.GustoDataset",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "m... |
8558487936 | from gensim.models.doc2vec import TaggedDocument
from utils import ExecutionTime
def tagging(cleaned_text: list):
print('Tagging started...')
t = ExecutionTime()
t.start()
idx = [str(i) for i in range(len(cleaned_text))]
tagged_text = []
for i in range(len(cleaned_text)):
tagged_text.append(TaggedDocument(cleaned_text[i], [idx[i]]))
t.end()
print(f'Tagging completed in {t.get_exec_time():.2f} sec')
return tagged_text
| Cashaqu/wine_advisor | tagging.py | tagging.py | py | 467 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "utils.ExecutionTime",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "gensim.models.doc2vec.TaggedDocument",
"line_number": 12,
"usage_type": "call"
}
] |
12922555819 | # De Django
from django.urls import path
# Propios
from . import views
urlpatterns = [
path('',views.home,name='home'),
path('manual',views.manual,name='manual'),
path('login',views.login_page,name='login-page'),
path('register',views.register,name='register'),
path('logout',views.logout_staff,name='logout'),
path('personal',views.staff_page,name='staff'),
path('clientes',views.clients,name='clients'),
path('inventario',views.invetory,name='inventory'),
path('settings',views.settings,name='settings')
] | Haziel-Soria-Trejo/GymAdmin | base/urls.py | urls.py | py | 544 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
... |
14152913313 | # -*- coding: utf-8 -*-
import re
from flask import redirect, url_for
id_check = re.compile('([0-9a-f]+)')
genre_check = re.compile('([0-9a-z_]+)')
zip_check = re.compile('([0-9a-zA-Z_.-]+.zip)')
fb2_check = re.compile('([ 0-9a-zA-ZА-Яа-я_,.:!-]+.fb2)')
def unurl(s: str):
tr = {
'%22': '"',
'%27': "'",
'%2E': ".",
'%2F': '/'
}
ret = s
if ret is not None:
for r, v in tr.items():
ret = ret.replace(r, v)
return ret
def redir_invalid(redir_name):
location = url_for(redir_name)
code = 302 # for readers
return redirect(location, code, Response=None)
def validate_id(s: str):
ret = s
if id_check.match(s):
return ret
return None
# simple prefix validation in .../sequenceindes and .../authorsindex
def validate_prefix(s: str):
ret = s.replace('"', '`').replace("'", '`') # no "' quotes in database
if len(ret) > 10:
return None
return ret
def validate_genre(s: str):
ret = s
if genre_check.match(s):
return ret
return None
def validate_genre_meta(s: str):
ret = s
if genre_check.match(s):
return ret
return None
# search pattern some normalization
def validate_search(s: str):
if s is None:
return ""
ret = unurl(s).replace('"', '`').replace("'", '`').replace(';', '')
if len(ret) > 128:
ret = ret[:128]
return ret
def validate_zip(s: str):
ret = s
if zip_check.match(s):
return ret
return None
def validate_fb2(s: str):
ret = unurl(s)
if fb2_check.match(s):
return ret
return None
| stanislavvv/fb2_srv_pseudostatic | app/validate.py | validate.py | py | 1,643 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "re.compile",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 9,
... |
24253712581 | import maze
from PIL import Image, ImageDraw
import time
from sys import argv
def print_maze(side, mymaze):
print("|-" * side + "|")
for i, slot in enumerate(mymaze):
print(slot, end=' ') if slot is not None else print('X', end=' ')
if (i + 1) % (side) == 0:
print("")
print("|-" * side + "|")
def draw_polygon(size, draw, ind, block_size, color):
draw.polygon([(ind % size) * block_size,
(ind // size) * block_size,
(ind % size) * block_size + block_size,
(ind // size) * block_size,
(ind % size) * block_size + block_size,
(ind // size) * block_size + block_size,
(ind % size) * block_size,
(ind // size) * block_size + block_size],
fill=color)
def paint_solution_manhattan(array, size, block_size, path):
start_time = time.time()
img = Image.new(
'L',
(size * block_size,
size * block_size),
color=255)
draw = ImageDraw.Draw(img)
for i,v in enumerate(array):
draw_polygon(size, draw, i, block_size, 255-v)
print("Painting solution... {}%".format((i*100)//(len(array)+1)),end='\r')
print("Painting solution... 100%")
img.save("{}.bmp".format(path), quality=100, subsampling=0)
elapsed_time = time.time() - start_time
print("Elapsed time painting solution : {}".format(elapsed_time))
def main(side):
maze_ = maze.Maze(side)
mazeSum = [0]*(side*side)
for i in range(255):
maze_.generate_new_maze()
maze_.solve_maze()
mazeSum = [mazeSum[i] + (1 if v==2 else 0) for i,v in enumerate(maze_.pathing)]
print("Working... {}%".format((i*100)//255),end='\r')
print()
#print_maze(SIDE, mazeSum)
paint_solution_manhattan(mazeSum, side, 15, "grey_scale")
if __name__ == "__main__":
try:
main(int(argv[1]))
except (ValueError, IndexError):
print("Usage is: python3 {} line_size".format(argv[0]))
| Araggar/INE5417-MazeRunner | greyscale.py | greyscale.py | py | 2,073 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "time.time",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "PIL.Image.new",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "PIL.ImageDraw.Draw",
"line_numb... |
35627619001 | from pytube import YouTube
def checkVideoResolution(tmp):
videoResolution = ["2160p","1440p","1080p","720p","480p","360p","240p", "144p"]
array = []
url = YouTube(tmp)
for i in range(len(videoResolution)):
if(len(url.streams.filter(adaptive=True, file_extension='mp4', res=videoResolution[i]))>0):
array.append(videoResolution[i])
return array
def checkAudioResolution(tmp):
audioResolution = ["128kbps","48kbps"]
array = []
url = YouTube(tmp)
for i in range(len(audioResolution)):
if(len(url.streams.filter(file_extension='mp4',type="audio", abr=audioResolution[i]))>0):
array.append(audioResolution[i])
return array
| RamkaTheRacist/python-RTR | PY_les10_s/HW/ytdl/dloadlogic/resolutions.py | resolutions.py | py | 699 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pytube.YouTube",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "pytube.YouTube",
"line_number": 14,
"usage_type": "call"
}
] |
36112486215 | import sys
import getopt
import numpy as np
import scipy
from scipy import ndimage
from os import listdir
import matplotlib.pyplot as plt
def Read_CSV(fname):
f = open(fname, "r")
Y = np.empty((1, 0))
X = np.empty((64*64*3,0))
i=0
for line in f:
data = line.split(',')
with urllib.request.urlopen(data[0]) as url:
f = io.BytesIO(url.read())
a = ndimage.imread(f, flatten=False)
b = scipy.misc.imresize(a, size=(128, 128))
b = b/255
c = b.reshape(128*128*3, 1)
X = np.hstack((X, c))
Y[0].append(int(data[1]))
i = i+1
print(Y)
return X, Y
def Read_File(fname):
a = ndimage.imread(fname, flatten=False)
b = scipy.misc.imresize(a, size=(128, 128))
#Normalize data. 255 beecause max pixel value is 255
b = b/255
c = b.reshape(128*128*3, 1)
return c
def collect_data(dirname):
files = listdir(dirname)
Y = np.empty((1, len(files)))
X = np.empty((128*128*3,0))
i = 0
for f in files:
a = Read_File(dirname + "/" + f)
if f.find("cat") == -1:
Y[0][i] = 0
else:
Y[0][i] = 1
X = np.hstack((X, a))
i = i+1
return X, Y
def initialize_hyperparams():
hyperparams = {}
hyperparams["learning_rate"] = 0.01 # Learning rate of gradient descent
hyperparams["num_iterations"] = 4000 # Number of iterations of propagation
hyperparams["dims"] = [4, 4, 3, 1] # Number of nodes in each layer of NN
hyperparams["lamba"] = 0.01 # Regularization param lambda
hyperparams["beta1"] = 0.9 # Exponential Weighted average param
hyperparams["beta2"] = 0.999 # RMSProp param
hyperparams["epsilon"] = 10 ** -8 # Adams optimization zero correction
hyperparams["minibatch"] = 100 # size of the minibatch, 0 indicates no minibatching
return hyperparams
# Initialize the parameters
# lists_dims["n_x=l0", "l1", "l2" ....]
def initialize_parameters(lists_dims):
parameters = []
for i in range(1, len(lists_dims)):
# 'He' initialization for random weights
W = np.random.randn(lists_dims[i], lists_dims[i-1]) * np.sqrt(np.divide(2, lists_dims[i-1]))
Vw = np.zeros(W.shape)
Sw = np.zeros(W.shape)
b = np.zeros((lists_dims[i], 1))
Vb = np.zeros(b.shape)
Sb = np.zeros(b.shape)
parameters.append((W, b, Vw, Vb, Sw, Sb))
return parameters
#Activation Functions
def sigmoid(Z, derivative):
s = (1/(1+np.exp(-Z)))
if derivative == True:
return s*(1-s)
return s
def relu(Z, derivative):
for i in range(len(Z)):
for j in range(len(Z[i])):
if Z[i][j] > 0:
if derivative == True:
Z[i][j] = 1
else:
pass
else:
Z[i][j] = 0
#print("RELU: ", Z)
return Z
def forward_activation(Z, func):
if func == "sigmoid":
return sigmoid(Z, False)
elif func == "relu":
return relu(Z, False)
else:
assert(0)
return None
# Calculate the Z and A parameters of forward propagation for each layer
def forward_propagate(X, parameters, N):
forward_cache = []
A = X
AL = A
for i in range(N):
A = AL
W, b, Vw, Vb, Sw, Sb = parameters[i]
Z = np.dot(W, A) + b
if i == N-1 :
activation_func = "sigmoid"
else:
activation_func = "sigmoid"
AL = forward_activation(Z, activation_func)
forward_cache.append((A, Z, W, b))
#print (AL)
return AL, forward_cache
def regularize_cost(cost, m, lamba, parameters):
if lamba == 0:
return cost
W, b, Vw, Vb, Sw, Sb = parameters[len(parameters)-1]
n = np.linalg.norm(W)
normalize = np.divide(lamba, 2*m)*n
cost = cost + normalize
return cost
def compute_cost(AL, Y):
m = Y.shape[1]
cost = -(np.sum(np.multiply(Y, np.log(AL)) + np.multiply((1-Y), np.log(1-AL))))/Y.shape[1]
cost = np.squeeze(cost)
return cost
def back_propagate_linear(dZ, forward_cache):
A, Z, W, b = forward_cache
m = A.shape[1]
dW = (np.dot(dZ, A.T))/m
assert(W.shape == dW.shape)
db = (np.sum(dZ, axis=1, keepdims=True))/m
assert(b.shape == db.shape)
dA_prev = np.dot(W.T, dZ)
assert(dA_prev.shape == A.shape)
return dA_prev, dW, db
def back_propagate_activation(dA, forward_cache, activation_func):
A, Z, W, b = forward_cache
if activation_func == "relu":
dZ = dA*relu(Z, True)
elif activation_func == "sigmoid":
dZ = dA*sigmoid(Z, True)
else:
assert(0)
return back_propagate_linear(dZ, forward_cache)
def back_propagate(AL, Y, forward_cache):
grads = []
dA = - (np.divide(Y, AL) - np.divide((1-Y), (1-AL)))
activation_func = "sigmoid"
for cache in reversed(forward_cache):
dA_prev, dW, db = back_propagate_activation(dA, cache, activation_func)
grads.append((dW, db))
dA = dA_prev
activation_func = "sigmoid"
return grads
def regularize_weights(W, m, learning_rate, lamba):
if lamba == 0:
return W
W = W - np.multiply(np.divide(np.multiply(learning_rate, lamba), m), W)
return W
# A combination of Exponential Weighted Average and RMSProp with bias correction
# To be used with mini-batches
def momentum(dW, db, Vw, Vb, hyperparams, layer):
beta1 = hyperparams["beta1"]
Vw = np.multiply(beta1, Vw) + np.multiply((1-beta1), dW)
Vw_corrected = np.divide(Vw, (1-np.power(beta1, layer)))
Vb = np.multiply(beta1, Vb) + np.multiply((1-beta1), db)
Vb_corrected = np.divide(Vb, (1-np.power(beta1, layer)))
return Vw_corrected, Vb_corrected, Vw, Vb
def rms_prop(dW, db, Sw, Sb, hyperparams, layer):
beta2 = hyperparams["beta2"]
epsilon = hyperparams["epsilon"]
Sw = np.multiply(beta2, Sw) + np.multiply((1-beta2), np.power(dW,2))
Sw_corrected = np.divide(Sw, (1-np.power(beta2, layer)))
Sb = np.multiply(beta2, Sb) + np.multiply((1-beta2), np.power(db, 2))
Sb_corrected = np.divide(Sb, (1-np.power(beta2, layer)))
dW_optimized = np.divide(dW, (np.sqrt(Sw_corrected + epsilon)))
db_optimized = np.divide(db, (np.sqrt(Sb_corrected + epsilon)))
return dW_optimized, db_optimized, Sw, Sb
def adams_optimization(dW, db, Vw, Vb, Sw, Sb, hyperparams, layer):
beta1 = hyperparams["beta1"]
beta2 = hyperparams["beta2"]
epsilon = hyperparams["epsilon"]
Vw = np.multiply(beta1, Vw) + np.multiply((1-beta1), dW)
Vw_corrected = np.divide(Vw, (1-np.power(beta1, layer)))
Sw = np.multiply(beta2, Sw) + np.multiply((1-beta2), np.power(dW, 2))
Sw_corrected = np.divide(Sw, (1-np.power(beta2, layer)))
Vb = np.multiply(beta1, Vb) + np.multiply((1-beta1), db)
Vb_corrected = np.divide(Vb, (1-np.power(beta1, layer)))
Sb = np.multiply(beta2, Sb) + np.multiply((1-beta2), np.power(db, 2))
Sb_corrected = np.divide(Sb, (1-np.power(beta2, layer)))
dW_optimized = np.divide(Vw_corrected, (np.sqrt(Sw_corrected) + epsilon))
db_optimized = np.divide(Vb_corrected, (np.sqrt(Sb_corrected) + epsilon))
return dW_optimized, db_optimized, Vw, Vb, Sw, Sb
def update_parameters(m, parameters, grads, hyperparams):
new_params = []
learning_rate = hyperparams["learning_rate"]
j=len(parameters)-1
for i in range(len(parameters)):
W, b, Vw, Vb, Sw, Sb = parameters[i]
dW, db = grads[j]
#dW, db, Vw, Vb, Sw, Sb = adams_optimization(dW, db, Vw, Vb, Sw, Sb, hyperparams, j+1)
dW, db, Vw, Vb = momentum(dW, db, Vw, Vb, hyperparams, j+1)
#dW, db, Sw, Sb = rms_prop(dW, db, Sw, Sb, hyperparams, j+1)
j = j-1
W = W - (learning_rate * dW)
W = regularize_weights(W, m, learning_rate, hyperparams["lamba"])
b = b - (learning_rate * db)
new_params.append((W, b, Vw, Vb, Sw, Sb))
return new_params
def calculate_success(Y, AL):
p = np.around(AL)
#print("Activation: ", AL)
for i in range(len(p[0])):
if(p[0][i] != Y[0][i]):
p[0][i] = 0
else:
p[0][i] = 1
print("Success Cal: ", p)
#print("Expectation: ", Y)
#print(p.shape)
#print(np.sum(p))
#print(len(p[0]))
return np.squeeze(np.sum(p, axis=1, keepdims=1)/len(p[0]))
def create_mini_batches(X, Y, batch_size, seed):
np.random.seed(seed)
mini_batches = []
if batch_size == 0:
mini_batches.append((X,Y))
return mini_batches
m = X.shape[1]
permutate = np.random.permutation(m)
X_shuffle = X[:, permutate]
Y_shuffle = Y[:, permutate]
num_batches = int(np.floor(m/batch_size))
for k in range(num_batches):
mini_batches.append((X[:, batch_size*k:batch_size*(k+1)], Y[:, batch_size*k:batch_size*(k+1)]))
if m%batch_size>0 :
mini_batches.append((X[:, batch_size*num_batches:], Y[:, batch_size*num_batches:]))
return mini_batches
def run_one_epoch(seed, X, Y, parameters, hyperparams, costs, m):
N = len(parameters)
batches = create_mini_batches(X, Y, hyperparams["minibatch"], seed)
for batch in batches:
X,Y = batch
AL, forward_cache = forward_propagate(X, parameters, N)
c = compute_cost(AL, Y)
c = regularize_cost(c, m, hyperparams["lamba"], parameters)
costs.append(c)
grads = back_propagate(AL, Y, forward_cache)
parameters = update_parameters(m, parameters, grads, hyperparams)
return parameters, costs, AL
def train_model(X, Y, parameters, hyperparams):
num_iterations = hyperparams["num_iterations"]
N = len(parameters)
m = X.shape[1]
costs = []
for i in range(num_iterations):
parameters, costs, AL = run_one_epoch(i, X, Y, parameters, hyperparams, costs, m)
if i%100 == 0:
print ("Cost after ", i, " iterations:", costs[-1])
AL, forward_cache = forward_propagate(X, parameters, N)
print ("Trained model success rate: " + str(calculate_success(Y, AL) *100) + "%")
return parameters, costs
def test_model(X, Y, parameters):
m = X.shape[1]
N = len(parameters)
AL, forward_cache = forward_propagate(X, parameters, N)
return calculate_success(Y, AL)
def print_help():
print ("classifier.py -l <learn_dir> -t <test_dir> -r <learn_rate> -i <num iterations> -n \"<comma separated num nodes in each layer>\"")
def plot_cost_gradient(cost):
plt.plot(cost)
plt.ylabel("Cost")
plt.xlabel("Per 100 iterations")
plt.title("Cost gradient")
plt.show()
def main(argv):
np.random.seed(1)
learn_dir = "./images"
test_dir = "./test"
hyperparams = initialize_hyperparams()
dims = hyperparams["dims"]
try:
opts, args = getopt.getopt(argv, "hl:t:r:i:n:",["help", "learndir=", "testdir=", "learnrate=", "iters=", "net="])
except getopt.GetoptError:
print_help()
sys.exit(2)
for opt, arg in opts:
if opt == "-h":
print_help()
sys.exit(2)
elif opt == "-l":
learn_dir = arg
elif opt == "-t":
test_dir == arg
elif opt == "-r":
learning_rate = float(arg)
elif opt == "-i":
num_iterations = int(arg)
elif opt == "-n":
dims_str = arg.split(",")
dims = []
for num in dims_str:
dims.append(int(num))
else:
print_help()
sys.exit(2)
XL, YL = collect_data(learn_dir)
XT, YT = collect_data(test_dir)
#Starting Hyperparameters
lists_dims = [XL.shape[0]]
for num in dims:
lists_dims.append(num)
print("NN Dimensions: ", lists_dims)
#Train the model
parameters = initialize_parameters(lists_dims)
parameters, cost = train_model(XL, YL, parameters, hyperparams)
# check out the success rate with a test run
success_rate = test_model(XT, YT, parameters)
print ("Test Success rate: " + str(success_rate*100) + "%")
plot_cost_gradient(cost)
if __name__ == "__main__" :
main(sys.argv[1:])
| palafrank/simpleNN | classifier.py | classifier.py | py | 12,102 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.empty",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.empty",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "scipy.ndimage.imread",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "scipy.ndimage",
"lin... |
12288030546 | #!/usr/bin/env python2.7
import numpy as np
import cv2
import glob
import json
from copy import copy
import os
class Configure():
def __init__(self):
self.objpoints = []
self.imgpoints = []
self.height = 480
self.width = 640
def getCalibrationParameters(self):
dim = (8,5)
objp = np.zeros((dim[0]*dim[1], 3), np.float32)
objp[:,:2] = np.mgrid[0:dim[0], 0:dim[1]].T.reshape(-1,2)
images = glob.glob('../samples/pitch1/*.png')
for fname in images:
img = cv2.imread(fname)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# Find the chess board corners
ret, corners = cv2.findChessboardCorners(gray, dim, None)
# If found, add object points, image points (after refining them)
if ret == True:
self.objpoints.append(objp)
corners2 = copy(corners)
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
_ = cv2.cornerSubPix(gray,corners2,(11,11),(-1,-1),criteria)
self.imgpoints.append(corners2)
# Draw and display the corners
# Comment this out to skip showing sample images!
_ = cv2.drawChessboardCorners(img, dim, corners2, ret)
cv2.imshow('img',img)
cv2.waitKey(1000)
ret, camera_matrix, dist, _, _ = cv2.calibrateCamera(self.objpoints, self.imgpoints, gray.shape[::-1],None,None)
new_camera_matrix, roi=cv2.getOptimalNewCameraMatrix(camera_matrix, dist,(self.width,self.height),0,(self.width,self.height))
pitch1 = {'new_camera_matrix' : new_camera_matrix.tolist(),
'camera_matrix' : camera_matrix.tolist(),
'dist' : dist.tolist()}
pitch0 = {'new_camera_matrix' : new_camera_matrix.tolist(),
'camera_matrix' : camera_matrix.tolist(),
'dist' : dist.tolist()}
data = {0 : pitch0, 1: pitch1}
return data
if __name__ == "__main__":
path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "../config/", "undistort_pitch1.json")
C = Configure()
data = C.getCalibrationParameters()
with open(path, 'w') as f:
f.write(json.dumps(data))
| pbsinclair42/SDP-2016 | vision/scripts/get_camera_configuration.py | get_camera_configuration.py | py | 2,294 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "numpy.zeros",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "numpy.mgrid",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "glob.glob",
"line... |
72337035875 | from bs4 import BeautifulSoup
import requests
import json
import time
import random
headers = {
'User-Agent': 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Mobile Safari/537.36'
}
# GetAllLinksAndNamesFromPolitics
res = requests.get(f'https://www.bundestag.de/ajax/filterlist/de/abgeordnete/biografien/862712-862712?limit=20&noFilterSet=true&offset=0',headers=headers)
with open('AllData.html','a',encoding='utf-8') as f:
f.write(res.text)
i = 12
while(i <= 732):
res2 = requests.get(f'https://www.bundestag.de/ajax/filterlist/de/abgeordnete/biografien/862712-862712?limit=20&noFilterSet=true&offset={i}', headers=headers)
with open('AllData.html','a',encoding='utf-8') as f:
f.write(res2.text)
del res2
print(i)
time.sleep(5)
i+=20
# FormJsonFromHTML
with open('AllData.html','r',encoding='utf-8') as f:
fileRaw = f.read()
soup = BeautifulSoup(fileRaw,'html.parser')
names = soup.find_all('div',class_ = 'bt-bild-info-text')
politicNameForHref = []
politicHref = []
for name in names:
politicNameForHref.append(name.find('p').text.strip())
PoliticLinkJson = {}
namesHref = soup.find_all('a',href=True)
for href in namesHref:
politicHref.append(href['href'])
a = 0
while(a < len(namesHref)):
PoliticLinkJson[politicNameForHref[a]] = 'https://www.bundestag.de' + politicHref[a]
a+=1
with open('PoliticLinkJson.json','w',encoding='utf-8') as f:
json.dump(PoliticLinkJson,f,indent=4,ensure_ascii=False)
# PoliticsData
socialNetworks ={}
Person ={}
Persons = []
Name = []
Occupation = []
count = 0
while(count<len(politicHref)):
res3 = requests.get('https://www.bundestag.de'+politicHref[count] , headers=headers)
soup2 = BeautifulSoup(res3.text,'html.parser')
Facebook = soup2.find('a', {'title' : 'Facebook'})
socialNetworks['Facebook'] = '' if Facebook is None else Facebook['href']
Homepage = soup2.find('a', {'title' : 'Homepage'})
socialNetworks['Homepage'] = '' if Homepage is None else Homepage['href']
Youtube = soup2.find('a', {'title' : 'Youtube'})
socialNetworks['Youtube'] = '' if Youtube is None else Youtube['href']
Twitter= soup2.find('a', {'title' : 'Twitter'})
socialNetworks['Twitter'] = '' if Twitter is None else Twitter['href']
Instagram = soup2.find('a', {'title' : 'Instagram'})
socialNetworks['Instagram'] = '' if Instagram is None else Instagram['href']
LinkedIn= soup2.find('a', {'title' : 'Instagram'})
socialNetworks['LinkedIn'] = '' if LinkedIn is None else LinkedIn['href']
Name = soup2.find('div' , 'bt-biografie-name').find('h3')
Occupation = soup2.find('div',class_= 'bt-biografie-beruf')
Person['name'] = '' if Name is None else Name.text.strip()
Person['socialNetworks'] = socialNetworks.copy()
Person['Occupation'] = '' if Occupation is None else Occupation.text.strip()
Persons.append(Person.copy())
time.sleep(1)
print(count)
count+=1
with open('PoliticData.json','w',encoding='utf-8') as f:
json.dump(Persons,f,indent=4,ensure_ascii=False) | Artemxxx2/ParserGermanParliament | ParliamentParser.py | ParliamentParser.py | py | 3,214 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "requests.get",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_nu... |
39560557801 | import streamlit as st
import numpy as np
import pandas as pd
import plotly.express as px
from wordcloud import WordCloud, STOPWORDS
import matplotlib.pyplot as plt
from datetime import datetime
import db
st.title("Product Review Analysis")
st.sidebar.title("Select Your Choices")
st.set_option('deprecation.showPyplotGlobalUse', False)
data_path = ("Yelp.csv")
def load_data():
data = pd.read_csv(data_path)
data['date'] = pd.to_datetime(data['date'])
return data
data = load_data()
st.markdown("")
see_data = st.expander('Click here to see the dataset')
with see_data:
st.dataframe(data.reset_index(drop=True))
st.text('')
st.sidebar.subheader("Random Reviews")
random_tweet = st.sidebar.radio('Select the Sentiment',('positive','negative','neutral'))
if st.sidebar.checkbox("Show", False, key="1"):
st.subheader("Here are some of random reviews according to your choice!")
for i in range(len(data['date'])):
if i ==5:
break
else:
st.markdown(str(i+1) +"." + data.query("sentiments == @random_tweet")[['text']].sample(n=1).iat[0,0])
st.sidebar.markdown("### Visualization of Reviews")
select = st.sidebar.selectbox('Select type of visualization',['Histogram','PieChart'])
sentiment_count = data['sentiments'].value_counts()
sentiment_count = pd.DataFrame({'Sentiments':sentiment_count.index,'Reviews':sentiment_count.values})
if st.sidebar.checkbox('Show',False,key='0'):
st.markdown("### No. of reviews by sentiments ")
if select=='Histogram':
fig = px.bar(sentiment_count,x='Sentiments',y='Reviews',color='Reviews',height=500)
st.plotly_chart(fig)
else:
fig = px.pie(sentiment_count,values='Reviews',names='Sentiments')
st.plotly_chart(fig)
st.sidebar.subheader("Breakdown Sentiments by city")
choice = st.sidebar.multiselect("Pick City", tuple(pd.unique(data["city"])))
if st.sidebar.checkbox("Show", False, key="5"):
if len(choice) > 0:
chosen_data = data[data["city"].isin(choice)]
fig = px.histogram(chosen_data, x="city", y="sentiments",
histfunc="count", color="sentiments",
facet_col="sentiments", labels={"sentiments": "sentiment"})
st.plotly_chart(fig)
# Word cloud
st.sidebar.subheader("Word Cloud")
word_sentiment = st.sidebar.radio("Which Sentiment to Display?", tuple(pd.unique(data["sentiments"])))
if st.sidebar.checkbox("Show", False, key="6"):
st.subheader(f"Word Cloud for {word_sentiment.capitalize()} Sentiment")
df = data[data["sentiments"]==word_sentiment]
words = " ".join(df["text"])
#processed_words = " ".join([word for word in words.split() if "http" not in word and not word.startswith() and word != "RT"])
processed_words = " ".join([word for word in words.split()])
wordcloud = WordCloud(stopwords=STOPWORDS, background_color="white", width=600, height=500).generate(processed_words)
plt.imshow(wordcloud)
plt.xticks([])
plt.yticks([])
st.pyplot() | AnythingIsFineLambton/Product_Review | Home.py | Home.py | py | 3,036 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "streamlit.title",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "streamlit.sidebar.title",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "streamlit.sidebar",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "stream... |
39601964882 | import datetime
import json
import requests
from django.conf import settings
from django.db import models
class Bill(models.Model):
created_at = models.DateTimeField(auto_now_add=True)
comment = models.CharField(max_length=45, default='')
amount = models.FloatField(default=1.00)
status = models.CharField(max_length=15, null=True, blank=True)
site = models.CharField(max_length=45)
def get_url(self):
try:
headers = {
'Authorization': f'Bearer {settings.QIWI_SECRET_KEY}',
'Content-Type': 'application/json',
'Accept': 'application/json',
}
expirationDateTime = self.created_at + datetime.timedelta(days=2)
expirationDateTime = expirationDateTime.strftime('%Y-%m-%dT%H:%M:%S+03:00')
request_data = {
'amount': {
'currency': 'RUB',
'value': str(round(self.amount, 2))
},
'comment': str(self.comment),
'expirationDateTime': str(expirationDateTime),
}
request_data = json.dumps(request_data)
response = requests.put(f'https://api.qiwi.com/partner/bill/v1/bills/{settings.QIWI_DB_VERSION}_{self.id}/', headers=headers, data=request_data)
data = response.json()
self.status = data['status']['value']
return data['payUrl']
except:
return None
def reject(self):
try:
headers = {
'Authorization': f'Bearer {settings.QIWI_SECRET_KEY}',
'Content-Type': 'application/json',
'Accept': 'application/json',
}
response = requests.post(f'https://api.qiwi.com/partner/bill/v1/bills/{settings.QIWI_DB_VERSION}_{self.id}/reject/', headers=headers)
data = response.json()
self.status = data['status']['value']
except:
return
def success(self):
import hmac
import hashlib
try:
secret_key = bytes(settings.OPLATA_KEY)
message = bytes(f'{self.id}|{self.status}|RUB|{self.amount}')
signature = hmac.new(secret_key, message, hashlib.sha256).hexdigest()
headers = {
'Content-Type': 'application/json',
'X-Api-Signature-SHA256': signature,
}
request_data = {
'id': self.id,
'status': self.status,
'amount': {
'currency': 'RUB',
'value': str(self.amount)
},
'comment': self.comment,
}
request_data = json.dumps(request_data)
response = requests.post(self.site, data=request_data, headers=headers)
return response
except:
return None
| AlexFire-Dev/Billing | apps/bills/models.py | models.py | py | 2,918 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.db.models.Model",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "django.db.models.DateTimeField",
"line_number": 11,
"usage_type": "call"
},
{
"api_n... |
43819454476 | from sqlalchemy import and_
from sqlalchemy import delete
from sqlalchemy import desc
from sqlalchemy import select
from sqlalchemy.orm import Session
from danswer.db.models import ConnectorCredentialPair
from danswer.db.models import DeletionAttempt
from danswer.db.models import DeletionStatus
from danswer.db.models import IndexingStatus
def check_deletion_attempt_is_allowed(
connector_credential_pair: ConnectorCredentialPair,
) -> bool:
"""
To be deletable:
(1) connector should be disabled
(2) there should be no in-progress/planned index attempts
"""
return bool(
connector_credential_pair.connector.disabled
and (
connector_credential_pair.last_attempt_status != IndexingStatus.IN_PROGRESS
and connector_credential_pair.last_attempt_status
!= IndexingStatus.NOT_STARTED
)
)
def create_deletion_attempt(
connector_id: int,
credential_id: int,
db_session: Session,
) -> int:
new_attempt = DeletionAttempt(
connector_id=connector_id,
credential_id=credential_id,
status=DeletionStatus.NOT_STARTED,
)
db_session.add(new_attempt)
db_session.commit()
return new_attempt.id
def get_not_started_index_attempts(db_session: Session) -> list[DeletionAttempt]:
stmt = select(DeletionAttempt).where(
DeletionAttempt.status == DeletionStatus.NOT_STARTED
)
not_started_deletion_attempts = db_session.scalars(stmt)
return list(not_started_deletion_attempts.all())
def get_deletion_attempts(
db_session: Session,
connector_ids: list[int] | None = None,
statuses: list[DeletionStatus] | None = None,
ordered_by_time_updated: bool = False,
limit: int | None = None,
) -> list[DeletionAttempt]:
stmt = select(DeletionAttempt)
if connector_ids:
stmt = stmt.where(DeletionAttempt.connector_id.in_(connector_ids))
if statuses:
stmt = stmt.where(DeletionAttempt.status.in_(statuses))
if ordered_by_time_updated:
stmt = stmt.order_by(desc(DeletionAttempt.time_updated))
if limit:
stmt = stmt.limit(limit)
deletion_attempts = db_session.scalars(stmt)
return list(deletion_attempts.all())
def delete_deletion_attempts(
db_session: Session, connector_id: int, credential_id: int
) -> None:
stmt = delete(DeletionAttempt).where(
and_(
DeletionAttempt.connector_id == connector_id,
DeletionAttempt.credential_id == credential_id,
)
)
db_session.execute(stmt)
| wuzhiping/danswer | backend/danswer/db/deletion_attempt.py | deletion_attempt.py | py | 2,567 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "danswer.db.models.ConnectorCredentialPair",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "danswer.db.models.IndexingStatus.IN_PROGRESS",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "danswer.db.models.IndexingStatus",
"line_number":... |
15072056728 | from os import path
from django.conf import settings
from django.core.mail import EmailMultiAlternatives
from django.test import TestCase, override_settings
from django.test.client import RequestFactory
from django.urls import reverse
from ..errors import (
AttachmentTooLargeError,
AuthenticationError,
)
from ..signals import email_received, email_received_unacceptable
from ..views import receive_inbound_email, _log_request
from .test_files.sendgrid_post import test_inbound_payload as sendgrid_payload
from .test_files.mailgun_post import test_inbound_payload as mailgun_payload
from .test_files.mandrill_post import post_data as mandrill_payload
from .test_files.mandrill_post import (
post_data_with_attachments as mandrill_payload_with_attachments
)
# don't read it out of the settings - fix it here so we know what we're using
DEFAULT_TEST_PARSER = "inbound_email.backends.sendgrid.SendGridRequestParser"
MANDRILL_REQUEST_PARSER = "inbound_email.backends.mandrill.MandrillRequestParser"
SENDGRID_REQUEST_PARSER = "inbound_email.backends.sendgrid.SendGridRequestParser"
MAILGUN_REQUEST_PARSER = "inbound_email.backends.mailgun.MailgunRequestParser"
class ViewFunctionTests(TestCase):
"""Tests for the inbound view function receive_inbound_email.
The view function is responsible for loading the correct backend, and
firing the signal once the email is parsed. This test suite contains no
parsing tests - these are covered in the relevant backend tests - just tests
for the signals.
"""
def setUp(self):
# Every test needs access to the request factory.
self.factory = RequestFactory()
self.url = reverse('receive_inbound_email')
self.test_upload_txt = path.join(path.dirname(__file__), 'test_files/test_upload_file.txt')
def _get_payloads_and_parsers(self, with_attachments=False):
mpwa = mandrill_payload_with_attachments
mp = mandrill_payload
return [
(MANDRILL_REQUEST_PARSER, mpwa if with_attachments else mp),
(SENDGRID_REQUEST_PARSER, sendgrid_payload),
(MAILGUN_REQUEST_PARSER, mailgun_payload),
]
def test_log_inbound_requests(self):
"""Test the internal log function."""
# just to exercise the function - it doesn't 'do' anything other than
# log to the console, but is good to know that it doesn't break.
for klass, payload in self._get_payloads_and_parsers():
settings.INBOUND_EMAIL_PARSER = klass
request = self.factory.post(self.url, data=payload)
_log_request(request)
def test_inbound_request_HEAD_200(self):
"""Return 200 OK to a HEAD request."""
request = self.factory.head(self.url)
response = receive_inbound_email(request)
self.assertEqual(response.status_code, 200)
def test_valid_request(self):
"""Test the RequestParseErrors are handled correctly, and return HTTP 200."""
for klass, payload in self._get_payloads_and_parsers():
settings.INBOUND_EMAIL_PARSER = klass
request = self.factory.post(self.url, data=payload)
response = receive_inbound_email(request)
self.assertContains(response, "Successfully parsed", status_code=200)
def test_parse_error_response_200(self):
"""Test the RequestParseErrors are handled correctly, and return HTTP 200."""
settings.INBOUND_EMAIL_RESPONSE_200 = True
for klass, payload in self._get_payloads_and_parsers():
settings.INBOUND_EMAIL_PARSER = klass
request = self.factory.post(self.url, data={})
response = receive_inbound_email(request)
self.assertContains(response, "Unable to parse", status_code=200)
def test_parse_error_response_400(self):
"""Test the RequestParseErrors are handled correctly, and return HTTP 400."""
settings.INBOUND_EMAIL_RESPONSE_200 = False
request = self.factory.post(self.url, data={})
response = receive_inbound_email(request)
self.assertContains(response, "Unable to parse", status_code=400)
def test_email_received_signal(self):
"""Test that a valid POST fires the email_received signal."""
# define handler
for klass, payload in self._get_payloads_and_parsers():
def on_email_received(sender, **kwargs):
self.on_email_received_fired = True
self.assertEqual(sender.__name__, klass.split('.')[-1])
request = kwargs.pop('request', None)
email = kwargs.pop('email', None)
self.assertIsNotNone(email)
self.assertIsInstance(email, EmailMultiAlternatives)
self.assertIsNotNone(request)
email_received.connect(on_email_received)
settings.INBOUND_EMAIL_PARSER = klass
request = self.factory.post(self.url, data=payload)
# connect handler
self.on_email_received_fired = False
# fire a request in to force the signal to fire
receive_inbound_email(request)
self.assertTrue(self.on_email_received_fired)
email_received.disconnect(on_email_received)
def test_email_received_unacceptable_signal_fired_for_too_large_attachment(self):
# set a zero allowed max attachment size
settings.INBOUND_EMAIL_ATTACHMENT_SIZE_MAX = 0
for klass, payload in self._get_payloads_and_parsers(with_attachments=True):
settings.INBOUND_EMAIL_PARSER = klass
_payload = payload.copy()
if klass == SENDGRID_REQUEST_PARSER:
_payload['attachment'] = open(self.test_upload_txt, 'r')
if klass == MAILGUN_REQUEST_PARSER:
_payload['attachment-1'] = open(self.test_upload_txt, 'r')
# define handler
def on_email_received(sender, **kwargs):
self.on_email_received_fired = True
request = kwargs.pop('request', None)
email = kwargs.pop('email', None)
exception = kwargs.pop('exception', None)
self.assertEqual(sender.__name__, klass.split('.')[-1])
self.assertIsNotNone(request)
self.assertIsInstance(email, EmailMultiAlternatives)
self.assertIsInstance(exception, AttachmentTooLargeError)
email_received_unacceptable.connect(on_email_received)
self.on_email_received_fired = False
request = self.factory.post(self.url, data=_payload)
receive_inbound_email(request)
self.assertTrue(self.on_email_received_fired, klass)
email_received_unacceptable.disconnect(on_email_received)
@override_settings(INBOUND_MANDRILL_AUTHENTICATION_KEY='mandrill_key')
def test_email_received_unacceptable_signal_fired_for_mandrill_mistmatch_signature(self):
parser = MANDRILL_REQUEST_PARSER
payload = mandrill_payload
settings.INBOUND_EMAIL_PARSER = parser
_payload = payload.copy()
# define handler
def on_email_received(sender, **kwargs):
self.on_email_received_fired = True
request = kwargs.pop('request', None)
email = kwargs.pop('email', None)
exception = kwargs.pop('exception', None)
self.assertEqual(sender.__name__, parser.split('.')[-1])
self.assertIsNotNone(request)
self.assertIsNone(email,)
self.assertIsInstance(exception, AuthenticationError)
email_received_unacceptable.connect(on_email_received)
self.on_email_received_fired = False
request = self.factory.post(
self.url,
data=_payload,
HTTP_X_MANDRILL_SIGNATURE='invalid_signature',
)
receive_inbound_email(request)
self.assertTrue(self.on_email_received_fired, parser)
email_received_unacceptable.disconnect(on_email_received)
| yunojuno-archive/django-inbound-email | inbound_email/tests/test_views.py | test_views.py | py | 8,035 | python | en | code | 67 | github-code | 1 | [
{
"api_name": "django.test.TestCase",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "django.test.client.RequestFactory",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "django.urls.reverse",
"line_number": 43,
"usage_type": "call"
},
{
"api_na... |
25108675279 | #!/usr/bin/env python
# xml parsing help from https://www.geeksforgeeks.org/reading-and-writing-xml-files-in-python/?ref=lbp
# to run you need to do
# `pip3 install beautifulsoup4`
# `pip3 install lxml`
import argparse
import datetime
from bs4 import BeautifulSoup
class Card(object):
"""
a card from files generated by magic set editor https://magicseteditor.boards.net/page/downloads
"""
def __init__(self, styling=False, notes="", time_created="", time_modified="", name="", casting_cost="", image="", supertype="", subtype="", rule_text="", flavor_text="", watermark="", power="", toughness="", card_code_text="", image2="", mainframe_image1="", mainframe_image2=""):
self.styling = styling
self.notes = notes
self.timeCreated = time_created
self.timeModified = time_modified
self.name = name
self.castingCost = casting_cost
self.image = image
self.superType = supertype
self.subType = subtype
self.ruleText = rule_text
self.flavorText = flavor_text
self.watermark = watermark
self.power = power
self.toughness = toughness
self.card_code_text = card_code_text
self.image2 = image2
self.mainframe_image1 = mainframe_image1
self.mainframe_image2 = mainframe_image2
self.rarity = ""
def __init__(self, cockatrice_card):
self.styling = False
self.notes = ""
dt_date = datetime.datetime.now()
current_time_str = dt_date.strftime('%y-%m-%d %H:%M:%S')
self.time_created = current_time_str
self.time_modified = current_time_str
# have to parse the passed tag for this info
self.name = ""
self.casting_cost = ""
self.supertype = ""
self.subtype = ""
self.rule_text = ""
self.flavor_text = ""
self.power = ""
self.toughness = ""
# non-creature card:
# <card>
# <name>Lavender Town</name>
# <set picURL="https://i.imgur.com/HEb2JN5.jpeg" rarity="uncommon">PTCG</set>
# <color/>
# <manacost/>
# <type>Land</type>
# <tablerow>0</tablerow>
# <text>T: Add B or W.
# 3B, T: each player sacrifices a Pokémon. If all players do, each player returns a Pokémon from their graveyard to the battlefield tapped.</text>
# </card>
# creature card
# <card>
# <name>Squirtle</name>
# <set picURL="https://images.pokemontcg.io/base6/95_hires.png" rarity="common">PTCG</set>
# <color>U</color>
# <manacost>1U</manacost>
# <type>Pokémon - Water</type>
# <pt>1/2</pt>
# <tablerow>2</tablerow>
# <text>2U: Evolve to Wartotle
# Whenever Squirtle blocks a creature, that creature doesn't untap during its controller's next untap step.</text>
# </card>
for tag in cockatrice_card:
if tag.name == 'name':
self.name = tag.string
if tag.name == 'manacost':
self.casting_cost = tag.string
if tag.name == 'type':
value = tag.string
values = value.split('-')
self.supertype = values[0].strip()
if len(values) > 1:
self.subtype = values[1].strip()
if tag.name == 'pt':
value = tag.string
values = value.split('/')
self.power = values[0]
self.toughness = values[1]
if tag.name == 'text':
self.rule_text = tag.string
if tag.name == "set":
self.rarity = tag['rarity']
if tag['rarity'] == "mythic":
self.rarity = "mythic rare"
# this is all blank any from the cockatrice info, in future script work this can be merged with existing info
self.watermark = ""
self.card_code_text = ""
self.image = ""
self.image2 = ""
self.mainframe_image1 = ""
self.mainframe_image2 = ""
def __repr__(self):
return self.exportAsString()
def exportAsString(self):
"""
cards have the following format:
card:
has styling: false
notes:
time created: 2020-12-06 21:19:30
time modified: 2020-12-07 15:54:05
name: Bulbasaur
casting cost: G
image: image1
super type: <word-list-type>Pokémon</word-list-type>
sub type:
Grass Poison
rule text:
<sym>1G</sym>: <kw-0><nospellcheck>Evolve</nospellcheck></kw-0> to <error-spelling:en_us:/magic.mse-game/magic-words>Ivysaur</error-spelling:en_us:/magic.mse-game/magic-words>
<sym>T</sym>: Add <sym>G</sym>
<kw-a><nospellcheck>Reach</nospellcheck></kw-a>
flavor text: <i-flavor></i-flavor>
watermark: Pokemon Grass
rarity: mythic rare
power: 2
toughness: 2
card code text:
image 2:
mainframe image:
mainframe image 2:
"""
string = "card:\n"
string += f"\thas styling: {str(self.styling).lower()}\n"
string += "\tnotes:\n"
string += f"\ttime created: {self.time_created}\n"
string += f"\ttime modified: {self.time_modified}\n"
string += f"\tname: {self.name}\n"
string += f"\tcasting cost: {self.casting_cost}\n"
string += f"\timage: {self.image}\n"
string += f"\tsuper type: <word-list-type>{self.supertype}</word-list-type>\n"
string += "\tsub type:\n"
string += f"\t\t{self.subtype}\n"
string += "\trule text:\n"
string += f"\t\t{self.rule_text}\n"
string += f"\tflavor text: <i-flavor>{self.flavor_text}</i-flavor>\n"
string += f"\twatermark: {self.watermark}\n"
string += f"\tpower: {self.power}\n"
string += f"\ttoughness: {self.toughness}\n"
string += f'\trarity: {self.rarity}\n'
string += f"\tcard code text: {self.card_code_text}\n"
string += f"\timage 2: {self.image2}\n"
string += f"\tmainframe image: {self.mainframe_image1}\n"
string += f"\tmainframe image 2: {self.mainframe_image2}\n"
return string
def main():
"""
Usage:
- python set_editor_script.py [options] -i input_file -o output_file
Options:
-p preserve pre-set image data from the input file
Example:
python set_editor_script.py -i ../ptcg_cockatrice.xml -o example_set2.set
"""
# TODO: we need a version of this functionality that smashes a cockatrice file w/
# a mtg set file to populate images etc but update the mtg set file with the updated
# cockatrice info
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--preserve', action='store_true', help='preserves image information in the input file')
parser.add_argument('-i', '--input', type=str, required=True, help='input file')
parser.add_argument('-o', '--output', type=str, required=True, help='output file, cannot be the same as input file')
args = parser.parse_args()
# validate args
if (args.input == args.output):
print('Error: input file cannot be the same as output file.')
return
# open the input file as XML, input is assumed to be a cockatrice card file
with open(args.input, 'r') as f:
data = f.read()
xmlObjects = BeautifulSoup(data, "xml")
# grab the list of cards out of the input cockatrice xml file
allCards = xmlObjects.findAll("card")
print(f'Retrieved {len(allCards)} cards')
ripped_cards = []
for index in range(len(allCards)):
print(f'card {index}:')
# grab the info and transform this into the Card instance format above
cockatrice_card = allCards[index]
ripped_card = Card(cockatrice_card)
print(ripped_card.name)
ripped_cards.append(ripped_card)
# then open the output file
with open(args.output, 'w') as output_file:
# write the header
output_file.write("""
mse version: 0.3.8
game: magic
stylesheet: m15
set info:
\tsymbol: symbol5.mse-symbol
\tmasterpiece symbol:
styling:
\tmagic-m15:
\t\ttext box mana symbols: magic-mana-small.mse-symbol-font
\t\tinverted common symbol: no
\t\toverlay:
""")
# write each card in the list
for card in ripped_cards:
output_file.write(card.exportAsString())
# write the footer
output_file.write("""
version control:
\ttype: none
apprentice code:
""")
print('Done')
if __name__ == "__main__":
main()
| brooks42/pkto | scripts/set_editor_script.py | set_editor_script.py | py | 9,191 | python | en | code | 5 | github-code | 1 | [
{
"api_name": "datetime.datetime.now",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": ... |
26721674595 | import json
from flask import Flask, render_template, jsonify, request, make_response, current_app
from random import *
from flask_cors import CORS
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
import logging
from logging.handlers import RotatingFileHandler
from flask_httpauth import HTTPTokenAuth
from app.license_util import license_generate, license_verify
# -----------------------------------setting begin--------------------------------------------
logging.basicConfig(level=logging.DEBUG)
file_log_handler = RotatingFileHandler('logs', maxBytes=1024 * 1024, backupCount=10)
formatter = logging.Formatter('%(levelname)s %(filename)s %(lineno)d %(message)s')
file_log_handler.setFormatter(formatter)
# 为全局的日志工具对象添加日志记录器
logging.getLogger().addHandler(file_log_handler)
app = Flask(__name__,
static_folder="../dist/static", # 设置静态文件夹目录
template_folder="../dist") # 设置vue编译输出目录dist文件夹,为Flask模板文件目录
app.config.from_object('config')
CORS(app, supports_credentials=True)
# 跨域配置
@app.after_request
def after_request(resp):
resp = make_response(resp)
resp.headers['Access-Control-Allow-Origin'] = 'http://127.0.0.1:5000'
resp.headers['Access-Control-Allow-Methods'] = 'GET,POST'
resp.headers['Access-Control-Allow-Headers'] = 'content-type,Authorization'
return resp
# -----------------------------------setting end--------------------------------------------
# -----------------------------------auth begin--------------------------------------------
auth = HTTPTokenAuth(scheme='Bearer')
@auth.verify_token
def verify_token(token):
return TokenTool.verify_auth_token(token)
# -----------------------------------auth end--------------------------------------------
# -----------------------------------routes begin--------------------------------------------
# 登录态检查
@app.route('/api/authCheck')
@auth.login_required
def authCheck():
return jsonify({'msg': 'ok'})
# license验证登录
@app.route('/api/login', methods=['POST'])
def login():
try:
user = license_verify.license_verify()
except Exception as ex:
return forbidden('Invalid license. ' + str(ex))
token = TokenTool.generate_auth_token(600, user)
response = {
'token': token,
}
return response
# license生成
@app.route('/api/generate', methods=['POST'])
def generate():
data = request.get_data()
data = json.loads(data)['data']
username = data['username']
valid_seconds = data['valid_seconds']
modules = data['modules']
try:
license_generate.license_generate(username, valid_seconds, modules)
except Exception as ex:
return unauthorized('Generate license failed. ' + str(ex))
return jsonify({'msg': 'ok'})
# 前端获取license信息
@app.route('/api/licenseMassage', methods=['GET'])
@auth.login_required
def get_license_massage():
token = request.headers.get('Authorization')[7:]
try:
user = TokenTool.get_token_message(token)
except Exception:
return unauthorized('token is needed')
response = {
'modules': user['modules'],
'valid_date': user['valid_date']
}
return response
# 模块鉴权
@app.route('/api/moduleAccess', methods=['POST'])
@auth.login_required
def module_access_check():
token = request.headers.get('Authorization')[7:]
try:
user = TokenTool.get_token_message(token)
except Exception:
return unauthorized('token is needed')
modules = user['modules']
data = request.get_data()
moduleName = json.loads(data)['data']['moduleName']
if moduleName in modules:
return {'access': True}
return {'access': False}
# 能力测试接口——生成随机数
@app.route('/api/random')
@auth.login_required
def random_number():
response = {
'randomNumber': randint(1, 100)
}
return jsonify(response)
# url redirect 所有的url将被重定向至index.html,由前端vue-router进行分发
@app.route('/', defaults={'path': ''})
@app.route('/<path:path>')
# @auth_login
def catch_all(path):
# 使用模板插件,引入index.html。此处会自动Flask模板文件目录寻找index.html文件。
return render_template("index.html", name="index")
# -----------------------------------routes end--------------------------------------------
# -----------------------------------error begin--------------------------------------------
def forbidden(message):
response = jsonify({'error': 'forbidden', 'message': message})
response.status_code = 403
return response
def unauthorized(message):
response = jsonify({'error': 'unauthorized', 'message': message})
response.status_code = 401
return response
# -----------------------------------error end --------------------------------------------
# -----------------------------------Data Base begin--------------------------------------------
# token生成验证工具 后续应存储于数据库
class TokenTool:
@staticmethod
def generate_auth_token(expiration=1800, user=None):
if user is None:
user = {}
serializer = Serializer(app.config['SECRET_KEY'], expires_in=expiration)
token = serializer.dumps(user).decode('utf-8')
return token
@staticmethod
def verify_auth_token(token):
serializer = Serializer(current_app.config['SECRET_KEY'])
try:
serializer.loads(token)
except Exception:
return False
return True
@staticmethod
def get_token_message(token):
serializer = Serializer(current_app.config['SECRET_KEY'])
try:
user = serializer.loads(token)
except Exception as ex:
raise ex
return user
# -----------------------------------end----------------------------------------------------
# if __name__ == '__main__':
# app.debug = True
# app.run()
| Ciscol/license_backend | run.py | run.py | py | 6,038 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "logging.basicConfig",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "logging.handlers.RotatingFileHandler",
"line_number": 14,
"usage_type": "call"
},
{
"api_n... |
43184453169 | import cv2
import numpy as np
import os
# from .myEdgeDetector import myCanny
from myHoughLines import Handwrite_HoughLines
from myHoughTransform import Handwrite_HoughTransform
# parameters
sigma = 2
threshold = 0.03
rhostep = 2
thetastep = np.pi / 90
num_lines = 15
# end of parameters
img0 = cv2.imread('img02.jpg')
if (img0.ndim == 3):
img = cv2.cvtColor(img0,cv2.COLOR_BGR2GRAY)
else:
img = img0
img_edge = cv2.Canny(img, 50, 150, apertureSize = 3)
img_edge = np.float32(img_edge) / 255
img_threshold = np.float32(img_edge > threshold)
[img_houghtrans, rhoList, thetaList] = Handwrite_HoughTransform(img_threshold, rhostep, thetastep)
cv2.imwrite('edge02.png', 255 * np.sqrt(img_edge / img_edge.max()))
cv2.imwrite('thres02.png', 255 * img_threshold)
cv2.imwrite('hough02.png', 255 * img_houghtrans / img_houghtrans.max())
[rhos, thetas] = Handwrite_HoughLines(img_houghtrans, num_lines)
# display your line segment results in red
for k in np.arange(num_lines):
a = np.cos(thetaList[thetas[k]])
b = np.sin(thetaList[thetas[k]])
x0 = a*rhoList[rhos[k]]
y0 = b*rhoList[rhos[k]]
x1 = int(x0 + 1000*(-b))
y1 = int(y0 + 1000*(a))
x2 = int(x0 - 1000*(-b))
y2 = int(y0 - 1000*(a))
cv2.line(img0,(x1,y1),(x2,y2),(0,0,255),1)
lines = cv2.HoughLinesP(np.uint8(255 * img_threshold), rhostep, thetastep, \
50, minLineLength = 20, maxLineGap = 5)
# display line segment results from cv2.HoughLinesP in green
for line in lines:
coords = line[0]
cv2.line(img0, (coords[0], coords[1]), (coords[2], coords[3]), (0, 255, 0), 1)
cv2.imwrite('line02.png', img0)
| TaikiShuttle/ECE4880J | hw3/houghScript.py | houghScript.py | py | 1,668 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "numpy.pi",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "cv2.imread",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
"line_... |
15359754817 | import os
import cv2
import matplotlib.pyplot as plt
import numpy as np
import yaml
class Map:
'''
Wraps ros2 map into a class
'''
def __init__(self):
self.yaml_path = ''
self.image_path = ''
self.image = None
self.resolution = None
self.origin = [0.0, 0.0, 0.0]
self.negate = 0
self.occupied_thresh = 0.65
self.free_thresh = 0.196
self.height = None
self.width = None
def initialize(self, path):
'''
Initializes the map object from a yaml file
Args:
- path: path to the yaml file
'''
# read yaml file into a dictionary
params_dict = {}
params_dict = yaml.load(open(path, 'r'), Loader=yaml.FullLoader)
self.yaml_path = path
self.image_path = os.path.join(os.path.dirname(path),
params_dict['image'])
self.resolution = params_dict['resolution']
self.origin = params_dict['origin']
self.negate = params_dict['negate']
self.occupied_thresh = params_dict['occupied_thresh']
self.free_thresh = params_dict['free_thresh']
# read image
self.image = cv2.imread(self.image_path, -1)#cv2.IMREAD_GRAYSCALE)
# if image has > 1 channel, convert to grayscale
if len(self.image.shape) > 2:
self.image = cv2.cvtColor(self.image, cv2.COLOR_BGR2GRAY)
self.height, self.width = self.image.shape
def show_map(self):
'''
Shows the map
'''
plt.figure()
print(self.image)
plt.imshow(self.image, cmap='gray')
plt.show()
def show_coordinates(self, xs_cart, ys_cart, color=None, size=600):
'''
Shows the map with a coordinate marked in cartesian coordinates
Args:
- xs: ndarray of x coordinates
- ys: ndarray of y coordinates
- color: ndarray of colors
- size: size of the map in pixels
'''
# if x, y are not iterables, convert them to lists
if not hasattr(xs_cart, '__iter__'):
xs_cart = np.array([xs_cart])
ys_cart = np.array([ys_cart])
half_size = int(size / 2)
plt.figure(figsize=(30, 30))
# use initial coordinates to center the map
xs, ys = self.cartesian_to_pixel(xs_cart, ys_cart)
x0 = xs[0]
y0 = ys[0]
# shows a square around the initial coordinates
plt.imshow(self.image, cmap='gray')
if color is not None:
#plt.scatter(xs-x0+half_size, ys-y0+half_size, s=100, c=color, cmap='rainbow')
plt.scatter(xs, ys, s=100, c=color, cmap='rainbow')
else:
#plt.scatter(xs-x0+half_size, ys-y0+half_size, s=100, c=np.arange(len(xs)), cmap='rainbow')
plt.scatter(xs, ys, s=100, c=np.arange(len(xs)), cmap='rainbow')
# equal aspect ratio
plt.gca().set_aspect('equal', adjustable='box')
step = 1 # meters
step = step / self.resolution # convert to pixels
xtick_origin = half_size + xs_cart[0] / self.resolution
ytick_origin = half_size + ys_cart[0] / self.resolution
xtick_neg = np.arange(xtick_origin, xtick_origin-half_size, -step)
xtick_pos = np.arange(xtick_origin, xtick_origin+half_size, step)
ytick_neg = np.arange(ytick_origin, ytick_origin-half_size, -step)
ytick_pos = np.arange(ytick_origin, ytick_origin+half_size, step)
xticks = np.concatenate((xtick_neg[::-1][:-1], xtick_pos))
yticks = np.concatenate((ytick_neg[::-1][:-1], ytick_pos))
xlabels_pos = np.arange(0, len(xtick_pos))
xlabels_neg = np.arange(0, -len(xtick_neg), -1)
ylabels_pos = np.arange(0, -len(ytick_pos),-1)
ylabels_neg = np.arange(0, len(ytick_neg))
xlabels = np.concatenate((xlabels_neg[::-1][:-1], xlabels_pos))
ylabels = np.concatenate((ylabels_neg[::-1][:-1], ylabels_pos))
#plt.xticks(xticks, xlabels, rotation=45, fontsize=20)
#plt.yticks(yticks, ylabels, rotation=0, fontsize=20)
#plt.grid()
plt.show()
def cartesian_to_pixel(self, x, y):
'''
Converts cartesian coordinates to pixel coordinates
Args:
- x: ndarray of x coordinates
- y: ndarray of y coordinates
'''
print(self.origin)
x_pixel = np.round((x - self.origin[0]) / self.resolution).astype(int)
y_pixel = self.height - np.round((y - self.origin[1]) / self.resolution).astype(int)
return x_pixel, y_pixel
def pixel_to_cartesian(self, x_pixel, y_pixel):
'''
Converts pixel coordinates to cartesian coordinates
Args:
- x_pixel: ndarray of x coordinates
- y_pixel: ndarray of y coordinates
'''
x = x_pixel * self.resolution + self.origin[0]
y = (self.height - y_pixel) * self.resolution + self.origin[1]
return x, y | JChunX/libf1tenth | libf1tenth/planning/map.py | map.py | py | 5,213 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "yaml.load",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "yaml.FullLoader",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_numbe... |
487845027 | from PIL import Image
import colorsys
import numpy as np
nx, ny = 128, 128
def createHSVimage(hue, sat, val, alpha):
rgba = []
# convert hsv -> rgb
for h, s, v, a in zip(hue.tolist(), sat.tolist(), val.tolist(), alpha.tolist()):
r, g, b = colorsys.hsv_to_rgb(h, s, v)
rgba.append( (int(255*r), int(255*g), int(255*b), int(255*a)) )
# new image for color composite
imrgb = Image.new('RGBA', (nx, ny))
imrgb.putdata(rgba)
imrgb = imrgb.transpose(Image.FLIP_TOP_BOTTOM)
return imrgb
x = np.linspace(-1.2, 1.2, nx)[np.newaxis,:]
y = np.linspace(-1.2, 1.2, ny)[:,np.newaxis]
rr = np.sqrt(x**2 + y**2)
th = np.arctan2(y, x)
phase = 0.0
boost = 2.0 # exaggerate the field angle
phi = np.remainder(phase + boost*np.arctan2(x, y),
2.0*np.pi)/(2.0*np.pi) # should be in range [0, 1]
# dip = np.where(rr <= 1.0, 1.0 - np.sqrt(1.0 - rr**2), 1.0)
dip = np.where(rr <= 1.0, rr**2, 1.0)
hue = phi
bgamma = 2.0
# hue is in-plane angle
hue = phi.flatten()
# saturation is out-of-plane angle
sat = dip.flatten()
# alpha channel to smoothly cut out the circle
sharpness = 20.0
alpha = 1.0/(1.0 + np.exp(sharpness*(rr-1.05))).flatten()
for field_strength in [0.5, 1.0]:
# value is field strength
val = np.array([field_strength]*nx*ny)
imrgb = createHSVimage(hue, sat, val, alpha)
imrgb.save("mhdcuts-Bkey-%2.2i.png" % (int(10*field_strength)))
# and another one, with saturation going down in the middle but value too
val = np.where(rr <= 1.0, 0.8 + 0.2*rr**2, 1.0).flatten()
imrgb = createHSVimage(hue, sat, val, alpha)
imrgb.save("mhdcuts-Bkey-sat.png")
# now do another one, with full saturation but value proportional to radius
sat = np.ones(nx*ny, np.float)
val = np.where(rr <= 1.0, rr, 1.0).flatten()
imrgb = createHSVimage(hue, sat, val, alpha)
imrgb.save("mhdcuts-Bkey-val.png")
# and finally one that combines everything....
peak = 0.7 # fully saturated, value=1 appears here
val = np.where(rr >= peak, (peak/rr)**2 , 1.0).flatten()
sat = np.where(rr <= peak, (rr/peak)**3, 1.0).flatten()
imrgb = createHSVimage(hue, sat, val, alpha)
imrgb.save("mhdcuts-Bkey-both.png")
| will-henney/phabc2-post | mhdcuts-Bkey.py | mhdcuts-Bkey.py | py | 2,188 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "colorsys.hsv_to_rgb",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "PIL.Image.new",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "PIL.Image.FLIP_TOP_BOTTOM... |
29123925476 | import glob
import os
import shutil
import subprocess
from conftest import aws_credentials_required
# External modules
import pytest
def pyinstaller_exists():
return shutil.which('pyinstaller') is not None
# PyTest doesn't let you place skipif markers on fixures. Otherwise,
# we'd ideally be able to do that and all the dependent tests would be
# skipped automatically.
@pytest.fixture(scope='session')
def pyinstaller_flintrock():
flintrock_executable_path = './dist/flintrock/flintrock'
p = subprocess.run([
'python', 'generate-standalone-package.py'
])
assert p.returncode == 0
assert glob.glob('./dist/*.zip')
assert os.path.isfile(flintrock_executable_path)
return flintrock_executable_path
@pytest.mark.skipif(not pyinstaller_exists(), reason="PyInstaller is required")
def test_pyinstaller_flintrock_help(pyinstaller_flintrock):
p = subprocess.run(
# Without explicitly setting the locale here, Click will complain
# when this test is run via GitHub Desktop that the locale is
# misconfigured.
"""
export LANG=en_US.UTF-8
{flintrock_executable}
""".format(
flintrock_executable=pyinstaller_flintrock
),
shell=True)
assert p.returncode == 0
@pytest.mark.skipif(not pyinstaller_exists(), reason="PyInstaller is required")
@aws_credentials_required
def test_pyinstaller_flintrock_describe(pyinstaller_flintrock):
# This test picks up some PyInstaller packaging issues that are not
# exposed by the help test.
p = subprocess.run(
# Without explicitly setting the locale here, Click will complain
# when this test is run via GitHub Desktop that the locale is
# misconfigured.
"""
export LANG=en_US.UTF-8
{flintrock_executable} describe
""".format(
flintrock_executable=pyinstaller_flintrock,
),
shell=True)
assert p.returncode == 0
| nchammas/flintrock | tests/test_pyinstaller_packaging.py | test_pyinstaller_packaging.py | py | 1,977 | python | en | code | 629 | github-code | 1 | [
{
"api_name": "shutil.which",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "subprocess.run",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_numb... |
25810988721 | import json
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
from drawing import draw_pitch
def import_xtvalues():
with open('input_data/open_xt_12x8_v1.json', 'r') as f:
xTvalues = np.array(json.load(f))
return xTvalues
def offset_df(df, dx, dy):
df = df.copy()
df.x = df.x+dx
df.y = df.y+dy
return df
def create_multipoints(df):
dfxT = df.from_records(import_xtvalues()).unstack().reset_index()
dfxT.columns = ['x', 'y', 'xT']
dfxT2= df.from_records(import_xtvalues()).unstack().reset_index()
dfxT2.columns = ['x', 'y', 'xT']
dfxT = pd.concat([offset_df(dfxT, dx, dy)
for dx, dy
in [(0.25, 0.25), (0.25, 0.75), (0.75, 0.25), (0.75, 0.75)]
])
dfxT2= pd.concat([offset_df(dfxT2, dx, dy)
for dx, dy
in [(0.25, 0.25), (0.25, 0.75), (0.75, 0.25), (0.75, 0.75)]
])
ny, nx = import_xtvalues().shape
dfxT.x = dfxT.x*100/nx
dfxT.y = dfxT.y*100/ny
#only consider locations with better than median threat (i.e. ignore defensive positions)
dfxT.xT = np.clip(dfxT.xT-dfxT.xT.median(), 0, 1)
#flip axis if necessary (to align with attacking team)
dfxT2.x = 100-dfxT.x
dfxT2.y = 100-dfxT.y
fig, ax = draw_pitch()
#plotting pitch with probability
rgba_colors = np.zeros((dfxT.shape[0],4))
rgba_colors = np.zeros((dfxT2.shape[0],4))
rgba_colors[:,0] = 1.0
rgba_colors[:,3] = dfxT.xT.values/dfxT.xT.max()
rgba_colors[:,3] = dfxT2.xT.values/dfxT2.xT.max()
plt.scatter(dfxT['x'], dfxT['y'], c=rgba_colors)
plt.scatter(dfxT2['x'], dfxT2['y'], c=rgba_colors)
return fig,dfxT,rgba_colors
| omarkorim98/Football-Data-Analysis-master | data_statistics/threat_Potential/threat_values.py | threat_values.py | py | 1,704 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "numpy.array",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": ... |
69895816035 | """empty message
Revision ID: 7a0cb1100d0a
Revises: 3c79ca63799e
Create Date: 2022-06-19 23:56:15.292951
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = '7a0cb1100d0a'
down_revision = '3c79ca63799e'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('solution', 'picture')
op.add_column('step', sa.Column('picture_id', sa.Integer(), nullable=True))
op.create_foreign_key(None, 'step', 'picture', ['picture_id'], ['picture_id'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'step', type_='foreignkey')
op.drop_column('step', 'picture_id')
op.add_column('solution', sa.Column('picture', mysql.VARCHAR(length=128), nullable=True))
# ### end Alembic commands ###
| luvyingying/IntelligentFaultHandlingSystem | migrations/versions/7a0cb1100d0a_.py | 7a0cb1100d0a_.py | py | 969 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "alembic.op.drop_column",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "alembic.op.add_column",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "alembic.op",
... |
25793358290 | import numpy as np
from scipy.sparse import csr_matrix, vstack, isspmatrix_csr
from tqdm import tqdm
def tfidf_with_dates_to_weekly_term_counts(term_value_array, uspto_week_dates):
number_of_rows, number_of_terms = term_value_array.shape
week_counts_csr = None
if not isspmatrix_csr(term_value_array):
term_value_array = csr_matrix(term_value_array)
current_week = int(uspto_week_dates[0])
current_week_counts_csr = csr_matrix((1, number_of_terms), dtype=np.int32)
week_totals = []
week_dates = []
week_total = 0
for current_row_index in tqdm(range(number_of_rows), 'Counting terms per week', unit='patent'):
new_week = int(uspto_week_dates[current_row_index])
while new_week > current_week:
if ((current_week % 100) == 53) and (week_total == 0):
current_week += 100 - 53 + 1 # next year, so add 100 but remove the "used" weeks and move on by 1
else:
week_counts_csr = vstack([week_counts_csr, current_week_counts_csr],
format='csr') if week_counts_csr is not None else current_week_counts_csr
week_totals.append(week_total)
week_dates.append(current_week)
current_week_counts_csr = csr_matrix((1, number_of_terms), dtype=np.int32)
current_week += 1
if (current_week % 100) > 53:
current_week += 100 - 53 # next year, so add 100 but remove the "used" weeks
week_total = 0
current_row_as_counts = term_value_array[current_row_index, :] > 0
current_week_counts_csr += current_row_as_counts
week_total += 1
week_counts_csr = vstack([week_counts_csr, current_week_counts_csr],
format='csr') if week_counts_csr is not None else current_week_counts_csr
week_totals.append(week_total)
week_dates.append(current_week)
return week_counts_csr, week_totals, week_dates
| Haydn8/pyGrams | scripts/utils/datesToPeriods.py | datesToPeriods.py | py | 2,011 | python | en | code | null | github-code | 1 | [
{
"api_name": "scipy.sparse.isspmatrix_csr",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "scipy.sparse.csr_matrix",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "scipy.sparse.csr_matrix",
"line_number": 14,
"usage_type": "call"
},
{
"api_n... |
74525946592 | from collections import namedtuple
import dgl
from dgl.data.tree import SSTDataset
SSTBatch = namedtuple('SSTBatch', ['graph', 'mask', 'wordid', 'label'])
trainset = SSTDataset(mode='tiny')
tiny_sst = trainset.trees
num_vocabs = trainset.num_vocabs
num_classes = trainset.num_classes
vocab = trainset.vocab # vocabulary dict: key -> id
inv_vocab = {v: k for k, v in vocab.items()} # inverted vocabulary dict: id -> word
a_tree = tiny_sst[0]
for token in a_tree.ndata['x'].tolist():
if token != trainset.PAD_WORD:
print(inv_vocab[token], end=" ")
##############################################################################
# Step 1: Batching
# ----------------
#
# Add all the trees to one graph, using
# the :func:`~dgl.batched_graph.batch` API.
#
import networkx as nx
import matplotlib.pyplot as plt
graph = dgl.batch(tiny_sst)
def plot_tree(g):
# this plot requires pygraphviz package
pos = nx.nx_agraph.graphviz_layout(g, prog='dot')
nx.draw(g, pos, with_labels=False, node_size=10,
node_color=[[.5, .5, .5]], arrowsize=4)
# plt.show()
# plot_tree(graph.to_networkx())
# Step 2: Tree-LSTM cell with message-passing APIs
# ------------------------------------------------
#
# Researchers have proposed two types of Tree-LSTMs: Child-Sum
# Tree-LSTMs, and :math:`N`-ary Tree-LSTMs. In this tutorial you focus
# on applying *Binary* Tree-LSTM to binarized constituency trees. This
# application is also known as *Constituency Tree-LSTM*. Use PyTorch
# as a backend framework to set up the network.
#
# In `N`-ary Tree-LSTM, each unit at node :math:`j` maintains a hidden
# representation :math:`h_j` and a memory cell :math:`c_j`. The unit
# :math:`j` takes the input vector :math:`x_j` and the hidden
# representations of the child units: :math:`h_{jl}, 1\leq l\leq N` as
# input, then update its new hidden representation :math:`h_j` and memory
# cell :math:`c_j` by:
#
# .. math::
#
# i_j & = & \sigma\left(W^{(i)}x_j + \sum_{l=1}^{N}U^{(i)}_l h_{jl} + b^{(i)}\right), & (1)\\
# f_{jk} & = & \sigma\left(W^{(f)}x_j + \sum_{l=1}^{N}U_{kl}^{(f)} h_{jl} + b^{(f)} \right), & (2)\\
# o_j & = & \sigma\left(W^{(o)}x_j + \sum_{l=1}^{N}U_{l}^{(o)} h_{jl} + b^{(o)} \right), & (3) \\
# u_j & = & \textrm{tanh}\left(W^{(u)}x_j + \sum_{l=1}^{N} U_l^{(u)}h_{jl} + b^{(u)} \right), & (4)\\
# c_j & = & i_j \odot u_j + \sum_{l=1}^{N} f_{jl} \odot c_{jl}, &(5) \\
# h_j & = & o_j \cdot \textrm{tanh}(c_j), &(6) \\
#
# It can be decomposed into three phases: ``message_func``,
# ``reduce_func`` and ``apply_node_func``.
#
# .. note::
# ``apply_node_func`` is a new node UDF that has not been introduced before. In
# ``apply_node_func``, a user specifies what to do with node features,
# without considering edge features and messages. In a Tree-LSTM case,
# ``apply_node_func`` is a must, since there exists (leaf) nodes with
# :math:`0` incoming edges, which would not be updated with
# ``reduce_func``.
#
import torch as th
import torch.nn as nn
class TreeLSTMCell(nn.Module):
def __init__(self, x_size, h_size):
super(TreeLSTMCell, self).__init__()
self.W_iou = nn.Linear(x_size, 3 * h_size, bias=False)
self.U_iou = nn.Linear(2 * h_size, 3 * h_size, bias=False)
self.b_iou = nn.Parameter(th.zeros(1, 3 * h_size))
self.U_f = nn.Linear(2 * h_size, 2 * h_size)
def message_func(self, edges):
return {'h': edges.src['h'], 'c': edges.src['c']}
def reduce_func(self, nodes):
# concatenate h_j1 for equation (1), (2), (3), (4)
h_cat = nodes.mailbox['h'].view(nodes.mailbox['h'].size(0), -1)
# equation (2)
f = th.sigmoid(self.U_f(h_cat)).view(*nodes.mailbox['h'].size())
# second term of equation (5)
c = th.sum(f * nodes.mailbox['c'], 1)
return {'iou': self.U_iou(h_cat), 'c': c}
def apply_node_func(self, nodes):
# equation (1), (3), (4)
iou = nodes.data['iou'] + self.b_iou
i, o, u = th.chunk(iou, 3, 1)
i, o, u = th.sigmoid(i) ,th.sigmoid(o), th.tanh(u)
# equation (5)
c = i * u + nodes.data['c']
# equation (6)
h = o * th.tanh(c)
return {'h': h, 'c': c}
##############################################################################
# Step 3: Define traversal
# ------------------------
#
# After you define the message-passing functions, induce the
# right order to trigger them. This is a significant departure from models
# such as GCN, where all nodes are pulling messages from upstream ones
# *simultaneously*.
#
# In the case of Tree-LSTM, messages start from leaves of the tree, and
# propagate/processed upwards until they reach the roots. A visualization
# is as follows:
#
# .. figure:: https://i.loli.net/2018/11/09/5be4b5d2df54d.gif
# :alt:
#
# DGL defines a generator to perform the topological sort, each item is a
# tensor recording the nodes from bottom level to the roots. One can
# appreciate the degree of parallelism by inspecting the difference of the
# followings:
#
# to heterogenous graph
trv_a_tree = dgl.graph(a_tree.edges())
print('Traversing one tree:')
print(dgl.topological_nodes_generator(trv_a_tree))
# to heterogenous graph
trv_graph = dgl.graph(graph.edges())
print('Traversing many trees at the same time:')
print(dgl.topological_nodes_generator(trv_graph))
##############################################################################
# Call :meth:`~dgl.DGLGraph.prop_nodes` to trigger the message passing:
import dgl.function as fn
import torch as th
trv_graph.ndata['a'] = th.ones(graph.number_of_nodes(), 1)
traversal_order = dgl.topological_nodes_generator(trv_graph)
trv_graph.prop_nodes(traversal_order,
message_func=fn.copy_src('a', 'a'),
reduce_func=fn.sum('a', 'a'))
class TreeLSTM(nn.Module):
def __init__(self,
num_vocabs,
x_size,
h_size,
num_classes,
dropout,
pretrained_emb=None):
super(TreeLSTM, self).__init__()
self.x_size = x_size
self.embedding = nn.Embedding(num_vocabs, x_size)
if pretrained_emb is not None:
print('Using glove')
self.embedding.weight.data.copy_(pretrained_emb)
self.embedding.weight.requires_grad = True
self.dropout = nn.Dropout(dropout)
self.linear = nn.Linear(h_size, num_classes)
self.cell = TreeLSTMCell(x_size, h_size)
def forward(self, batch, h, c):
'''
Compute tree-lstm prediction given a batch.
:param batch: dgl.data.SSTBatch
:param h: Tensor initial hidden state
:param c: Tensor initial cell state
:return: logits : Tensor
The prediction of each node.
'''
g = batch.graph
# to heterogenous graph
g = dgl.graph(g.edges())
# feed embedding
embeds = self.embedding(batch.wordid * batch.mask)
g.ndata['iou'] = self.cell.W_iou(self.dropout(embeds)) * batch.mask.float().unsqueeze(-1)
g.ndata['h'] = h
g.ndata['c'] = c
# propagate
dgl.prop_nodes_topo(g,
message_func=self.cell.message_func,
reduce_func=self.cell.reduce_func,
apply_node_func=self.cell.apply_node_func)
# compute logits
h = self.dropout(g.ndata.pop('h'))
logits = self.linear(h)
return logits
##############################################################################
# Main Loop
# ---------
#
# Finally, you could write a training paradigm in PyTorch.
#
from torch.utils.data import DataLoader
import torch.nn.functional as F
device = th.device('cpu')
# hyper parameters
x_size = 256
h_size = 256
dropout = 0.5
lr = 0.05
weight_decay = 1e-4
epochs = 10
# create the model
model = TreeLSTM(trainset.num_vocabs,
x_size,
h_size,
trainset.num_classes,
dropout)
print(model)
# create the optimizer
optimizer = th.optim.Adagrad(model.parameters(),
lr=lr,
weight_decay=weight_decay)
def batcher(dev):
def batcher_dev(batch):
batch_trees = dgl.batch(batch)
return SSTBatch(graph=batch_trees,
mask=batch_trees.ndata['mask'].to(device),
wordid=batch_trees.ndata['x'].to(device),
label=batch_trees.ndata['y'].to(device))
return batcher_dev
train_loader = DataLoader(dataset=tiny_sst,
batch_size=5,
collate_fn=batcher(device),
shuffle=False,
num_workers=0)
# training loop
for epoch in range(epochs):
for step, batch in enumerate(train_loader):
g = batch.graph
n = g.number_of_nodes()
h = th.zeros((n, h_size))
c = th.zeros((n, h_size))
logits = model(batch, h, c)
logp =F.log_softmax(logits, 1)
loss = F.nll_loss(logp, batch.label, reduction='sum')
optimizer.zero_grad()
loss.backward()
optimizer.step()
pred = th.argmax(logits, 1)
acc = float(th.sum(th.eq(batch.label, pred))) / len(batch.label)
print("Epoch {:05d} | Step {:05d} | Loss {:.4f} | Acc {:.4f} |".format(
epoch, step, loss.item(), acc)) | acproject/GNNs | GNN/Model/TreeLSTM.py | TreeLSTM.py | py | 9,765 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "collections.namedtuple",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "dgl.data.tree.SSTDataset",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "dgl.batch",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "networkx.nx_a... |
16780787848 | import datetime
class exception():
"""
exception(name: str, starttime: datetime, stoptime: datetime,
value: bool)
exceptions used when schemes should be overriden
Methods:
check if starttime and stoptime is now, if true return value
"""
def __init__(self,
name: str,
starttime: datetime.datetime,
stoptime: datetime.datetime,
value: bool):
self.name = name
self.starttime = starttime
self.stoptime = stoptime
self.value = value
def check(self) -> bool:
if datetime.datetime.now() > self.starttime and\
datetime.datetime.now() < self.stoptime:
return self.value
| gurkslask/tiddur | app/time_cmd/exception.py | exception.py | py | 738 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "datetime.datetime",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "d... |
39594408669 | # editor: Wang Zhixiong
from typing import TypeVar, Generic, List, Iterator
from typing import Any
from typing import Union
from typing import Generator
from typing import Callable
from typing import Optional
K = Union[str, int, float]
D = Union[None, str, int, float]
class TreeNode:
def __init__(self, k: K, v: D,
left: object = None,
right: object = None) -> None:
self.key = k # key
self.val = v # value
self.leftChild = left # left pointer
self.rightChild = right # right pointer
def __iter__(self) -> Iterator[Any]:
return iter(tolist(self))
def size(b: Union[TreeNode, None]) -> int:
if b is None:
return 0
else:
return 1 + size(b.leftChild) +\
size(b.rightChild) # type: ignore
def insert(bst: Union[TreeNode, None], key: K,
val: D) -> Optional[TreeNode]:
if bst is None:
bst = TreeNode(key, val)
elif key is None or val is None:
if bst.leftChild is None:
bst.leftChild = TreeNode(key, val)
else:
if bst.rightChild is None:
bst.rightChild = TreeNode(key, val)
# raise AttributeError("The element is wrong.")
elif bst.key is None:
if bst.leftChild is None:
bst.leftChild = TreeNode(key, val)
else:
if bst.rightChild is None:
bst.rightChild = TreeNode(key, val)
else:
if isinstance(key, str):
key_num = 0
for i in range(len(key)):
key_num = key_num + ord(key[i])
else:
key_num = key # type: ignore
if isinstance(bst.key, str):
bstkey_num = 0
for i in range(len(bst.key)):
bstkey_num = bstkey_num + ord(bst.key[i])
else:
bstkey_num = bst.key # type: ignore
if key_num <= bstkey_num:
if bst.leftChild is None:
bst.leftChild = TreeNode(key, val)
else:
insert(bst.leftChild, key, val)
else:
if bst.rightChild is None:
bst.rightChild = TreeNode(key, val) # type: ignore
else:
insert(bst.rightChild, key, val)
return bst
def get(bst: Union[TreeNode, None], key: K) -> Union[TreeNode, None]:
if bst is None:
return None
if type(bst.key) != key:
if get(bst.leftChild, key) is not None:
return get(bst.leftChild, key)
if get(bst.rightChild, key) is not None:
return get(bst.rightChild, key)
if key == bst.key:
return bst
def find(bst: Union[TreeNode, None],
key: K) -> Union[D, bool]:
if get(bst, key) is None:
return False
else:
return get(bst, key).val # type: ignore
def parent(bst: Union[TreeNode, None],
key: K) -> Union[TreeNode, None]:
if bst is None or bst.key == key:
return None
elif key == bst.leftChild.key or key == bst.rightChild.key:
return bst
elif key < bst.key:
return parent(bst.leftChild, key)
else:
return parent(bst.rightChild, key)
def is_member(bst: Union[TreeNode, None],
k: K, v: D) -> bool:
if find(bst, k) == v:
return True
else:
return False
def delete(bst: Union[TreeNode, None],
key: K) -> None:
res = tolist(bst)
res1 = res[0::2]
index = None
for i in range(0, len(res1)):
if key == res1[i]:
index = i
res2 = res[1::2]
if index is None:
raise AttributeError("The element does not exist.")
del res[index * 2 + 1]
del res[index * 2]
return fromlist(res)
def tolist(bst: Union[TreeNode, None]) -> List:
res = [] # type: ignore
def tolist_loop(bst, ans):
if bst is not None:
ans.append(bst.key)
ans.append(bst.val)
ans = tolist_loop(bst.leftChild, ans)
ans = tolist_loop(bst.rightChild, ans)
return ans
return tolist_loop(bst, res)
def fromlist(lst: List) -> Union[TreeNode, None, bool]:
bst = None
if len(lst) == 0:
return None
elif len(lst) % 2 == 1:
return False
for i in range(0, len(lst)):
for j in range(0, len(lst)):
if lst[i] == lst[j] and i % 2 == 0 and j % 2 == 0:
lst[i + 1] = lst[j + 1]
else:
for i in range(0, len(lst), 2):
bst = insert(bst, lst[i], lst[i + 1])
return bst
def map(bst: Union[TreeNode, None],
f: Callable[[float], float]) -> TreeNode:
if bst is not None and bst.key is not None:
bst.val = f(bst.val)
map(bst.leftChild, f)
map(bst.rightChild, f)
return bst
def func(bst: Union[TreeNode, None],
f: Callable[[float], float]) -> int:
ans = [0]
def func_loop(bst, f, ans):
if bst is not None:
ans[0] = f(ans[0], bst.val)
func_loop(bst.leftChild, f, ans)
func_loop(bst.rightChild, f, ans)
return ans
return func_loop(bst, f, ans)[0]
def filter(tree: Union[TreeNode, None],
rule: Generator[str, int, float]) -> Union[TreeNode, None]:
bst = None
def filter_loop(bst, current, rule):
if current is not None:
if rule(current.key) is False:
bst = insert(bst, current.key, current.val)
bst = filter_loop(bst, current.leftChild, rule)
bst = filter_loop(bst, current.rightChild, rule)
return bst
return filter_loop(bst, tree, rule)
def mconcat(bst1: Union[TreeNode, None],
bst2: Union[TreeNode, None]) -> Union[TreeNode, None, bool]:
lst1 = tolist(bst1)
lst2 = tolist(bst2)
lst = lst1 + lst2
return fromlist(lst)
def mempty() -> None:
return None
def iterator(bst: Union[TreeNode, None]) -> List:
return [tolist(bst), 0]
def next_item(it_lst: List) -> Callable[[], Any]:
lst = it_lst[0]
cur = it_lst[1]
def foo():
nonlocal cur
if cur >= len(lst) or lst == []:
raise StopIteration
tmp = lst[cur]
cur = cur + 1
it_lst[1] = it_lst[1] + 1
return tmp
return foo
def display(bst: Union[TreeNode, None]) -> dict:
if bst is None:
return {}
else:
a = tolist(bst)
b = dict(zip(a[0::2], a[1::2]))
return b
| Zhixiong-Wang/CPO-Dreams-of-billion-girs_lab2_varliant6 | immutable.py | immutable.py | py | 6,679 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "typing.Union",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "typing.Iterator",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_numbe... |
39998519394 | import unittest
import os
import shutil
import copy
import config as cfg
from analyzer import Analyzer
class ConfigTestCase(unittest.TestCase):
def test_analyzer(self):
ana1 = cfg.Analyzer(
Analyzer,
toto = '1',
tata = 'a'
)
# checking that the analyzer name does not contain a slash,
# to make sure the output directory name does not contain a subdirectory
self.assertTrue( '/' not in ana1.name )
def test_MCComponent(self):
DYJets = cfg.MCComponent(
name = 'DYJets',
files ='blah_mc.root',
xSection = 3048.,
nGenEvents = 34915945,
triggers = ['HLT_MC'],
vertexWeight = 1.,
effCorrFactor = 1 )
self.assertTrue(True)
def test_config(self):
ana1 = cfg.Analyzer(
Analyzer,
toto = '1',
tata = 'a'
)
comp1 = cfg.Component(
'comp1',
files='*.root',
triggers='HLT_stuff'
)
from heppy.framework.eventstext import Events
config = cfg.Config( components = [comp1],
sequence = [ana1],
services = [],
events_class = Events )
def test_copy(self):
ana1 = cfg.Analyzer(
Analyzer,
instance_label = 'inst1',
toto = '1',
)
ana2 = copy.copy(ana1)
ana2.instance_label = 'inst2'
ana2.toto2 = '2'
self.assertTrue(ana2.name.endswith('analyzer.Analyzer_inst2'))
self.assertEqual(ana2.toto2, '2')
def test_sequence(self):
seq = cfg.Sequence( 0, 1, 2 )
self.assertEqual(seq, range(3))
seq = cfg.Sequence( range(3) )
self.assertEqual(seq, range(3))
seq = cfg.Sequence( range(3), 3)
self.assertEqual(seq, range(4))
seq = cfg.Sequence( 'blah' )
self.assertEqual(seq, ['blah'])
self.assertRaises(ValueError, cfg.Sequence, dict(a=1) )
def test_pickle(self):
'''Test that a config object can be pickled and unpickled'''
import pickle
import shelve
import tempfile
ana = cfg.Analyzer(
Analyzer,
'test_pickle',
var=1,
fun=lambda x: x # works because dill is imported in config.py
)
fd, tmpfname = tempfile.mkstemp()
os.close(fd)
with open(tmpfname, 'w') as out:
pickle.dump(ana, out)
self.assertTrue(True)
with open(tmpfname) as infile:
ana2 = pickle.load(infile)
self.assertEqual(ana2.fun(1), 1)
if __name__ == '__main__':
unittest.main()
| cbernet/heppy | heppy/framework/test_config.py | test_config.py | py | 2,810 | python | en | code | 9 | github-code | 1 | [
{
"api_name": "unittest.TestCase",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "config.Analyzer",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "analyzer.Analyzer",
"line_number": 13,
"usage_type": "argument"
},
{
"api_name": "config.MC... |
25646551717 | # -*- coding: utf-8 -*-
# @Time : 2021/3/2 11:25
# @Author : Zoey
# @File : add_member_page.py
# @describe:
from selenium.webdriver.common.by import By
from seleniumPO.pyse.pyselenium import PySelenium
class AddMemberPage:
def __init__(self, driver):
self.driver = driver
self.element = PySelenium(self.driver)
def add_member(self, username, account, phone):
"""
添加联系人
:return:
"""
# 输入用户名
self.element.find(By.ID, "username").send_keys(username)
# 输入账号
self.element.find(By.ID, "memberAdd_acctid").send_keys(account)
# 输入手机号
self.element.find(By.ID, "memberAdd_phone").send_keys(phone)
# 点击保存
self.element.find(By.CSS_SELECTOR, ".js_btn_save").click()
return True
def get_member(self):
"""
获取所有的联系人姓名
:return:
"""
locator = (By.CSS_SELECTOR, ".member_colRight_memberTable_th_Checkbox")
self.element.wait_for_click(10, locator)
eles_list = self.element.finds(By.CSS_SELECTOR, ".member_colRight_memberTable_td:nth-child(2)")
print(eles_list)
names = []
for ele in eles_list:
names.append(ele.get_attribute("title"))
return names
| ZhangYi8326/Selenium_Zoey | seleniumPO/page/add_member_page.py | add_member_page.py | py | 1,338 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "seleniumPO.pyse.pyselenium.PySelenium",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.common.by.By.ID",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 22,
"usage... |
42548147202 | from django.core.paginator import Paginator
from django.http import Http404
from django.shortcuts import render
from .converters import DateConverter
from books.models import Book
def books_view(request):
template = 'books/books_list.html'
context = {'books': Book.objects.all()}
return render(request, template, context)
def books_on_date(request, pub_date):
template = 'books/books_on_date.html'
converter = DateConverter()
books = None
current_date = None
next_date = None
prev_date = None
try:
all_books = Book.objects.order_by('pub_date')
current_date = converter.to_python(value=pub_date).date()
all_pub_dates = all_books.values_list('pub_date', flat=True).distinct()
next_pub_dates = all_pub_dates.filter(pub_date__gt=current_date)
prev_pub_dates = all_pub_dates.filter(pub_date__lt=current_date).order_by('-pub_date')
if next_pub_dates:
next_date = next_pub_dates[0]
if prev_pub_dates:
prev_date = prev_pub_dates[0]
books = all_books.filter(pub_date=current_date)
except ValueError:
current_date = None
context = {
'books': books,
'current_date': current_date,
'next_date': next_date,
'prev_date': prev_date,
}
return render(request, template, context) | graticule/django-homeworks | 2.1-databases/models_list_displaying/books/views.py | views.py | py | 1,375 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "books.models.Book.objects.all",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "books.models.Book.objects",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "books.models.Book",
"line_number": 11,
"usage_type": "name"
},
{
"ap... |
21168992917 | """
Question:
3. Create a python script that parses jmeter log files in CSV format,
and in the case if there are any non-successful endpoint responses recorded in the log,
prints out the label, response code, response message, failure message,
and the time of non-200 response in human-readable format in PST timezone
(e.g. 2021-02-09 06:02:55 PST).
Please use Jmeter_log1.jtl, Jmeter_log2.jtl as input files for testing out your script
(the files have .jtl extension but the format is CSV).
"""
import os
from datetime import timezone
import pandas as pd
import pytz
def filter_failure_responses(file_name: str):
"""
This function filters non-200 responses from given csv file
Formats by filtering required columns and formats timestamp
Prints data in readable form and saves in a csv file
:param file_name: file with csv extension, file saved in cwd
:type file_name: str
"""
# Suppress chained assignment warnings
pd.options.mode.chained_assignment = None # default='warn'
# Columns required in the output
filter_columns = ['label', 'responseCode', 'responseMessage', 'failureMessage', 'timeStamp']
try:
# Read data from input csv file, filter by required columns
file = open(file_name, 'r')
csv_data = pd.read_csv(file, usecols=filter_columns)
print(f'Number of rows in input csv file: {len(csv_data)}')
# Rearrange columns
csv_data = csv_data.reindex(columns=filter_columns)
file.close()
# Filter non-200 responses
filter_data = csv_data[csv_data['responseCode'] != 200]
count = len(filter_data)
print(f'Number of non-200 responses: {count}')
if count == 0:
print(f'NO non-200 responses found in the log. Exiting')
return
# Convert epoch time to dateTime and set to PST timezone
filter_data['timeStamp'] = pd.to_datetime(filter_data['timeStamp'], unit='ms')
filter_data['timeStamp'] = filter_data['timeStamp'].dt.tz_localize(timezone.utc)
filter_data['timeStamp'] = filter_data['timeStamp'].dt.tz_convert(pytz.timezone('US/Pacific'))
print(filter_data)
# Save output in new csv file
output_filename = f'failed_responses_{file_name}'
output_file = open(f'./{output_filename}', 'w')
filter_data.to_csv(output_file)
output_file.close()
print(f'Non-200 Responses are saved in {os.getcwd()}/{output_filename}')
except Exception as e:
print(f'Exception: {e.args[1]}')
filter_failure_responses('Jmeter_log1.csv')
filter_failure_responses('Jmeter_log2.csv')
| ujwalnitha/qa-python-exercises | exercise3/filter_responses.py | filter_responses.py | py | 2,635 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pandas.options",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "pandas.read_csv",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "pandas.to_datetime",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "datetime.timez... |
28548701178 | #!/usr/bin/python
# Python
import logging
from datetime import datetime
from time import mktime
# Libraries
import feedparser
# Local
import lifestream
logger = logging.getLogger('Atom')
lifestream.arguments.add_argument('type')
lifestream.arguments.add_argument('url')
args = lifestream.arguments.parse_args()
Lifestream = lifestream.Lifestream()
logger.info('Grabbing %s' % args.url)
fp = feedparser.parse(args.url)
for i in range(len(fp['entries'])):
o_item = fp['entries'][i]
id = o_item['guid']
dt = datetime.fromtimestamp(mktime(o_item['updated_parsed']))
updated = dt.strftime("%Y-%m-%d %H:%M")
logger.info("Adding new %s item: %s" % (args.type, o_item['title']))
Lifestream.add_entry(
type=args.type,
id=id,
title=o_item['title'],
source=args.type,
date=updated,
url=o_item['links'][0]['href'])
| aquarion/Lifestream | imports/atom.py | atom.py | py | 884 | python | en | code | 7 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "lifestream.arguments.add_argument",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "lifestream.arguments",
"line_number": 15,
"usage_type": "attribute"
},
{
"api... |
28557952081 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('docs', '0002_drop_doccomments'),
]
operations = [
migrations.CreateModel(
name='DocPageAlias',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('file1', models.CharField(unique=True, max_length=64)),
('file2', models.CharField(unique=True, max_length=64)),
],
options={
'db_table': 'docsalias',
'verbose_name_plural': 'Doc page aliases',
},
),
migrations.RunSQL("CREATE UNIQUE INDEX docsalias_unique ON docsalias (LEAST(file1, file2), GREATEST(file1, file2))"),
]
| postgres/pgweb | pgweb/docs/migrations/0003_docs_alias.py | 0003_docs_alias.py | py | 880 | python | en | code | 66 | github-code | 1 | [
{
"api_name": "django.db.migrations.Migration",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "django.db.migrations",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.CreateModel",
"line_number": 14,
"usage_type": "call"
},
... |
29476078580 | from itertools import count
from numpy import poly1d
import streamlit as st
from streamlit_option_menu import option_menu
import pandas as pd
import re
# Data Viz Pkgs
import plotly
import plotly.express as px
import plotly.graph_objs as go
from db_fxns import add_data, create_table, view_all_data, get_name, view_unique_name, edit_patient_data, delete_data, create_usertable, add_userdata, login_user, view_user
def show_database_page():
st.title("Explore database page")
st.info("You need to be logged in as qualified medical personnel to access database services.")
with st.sidebar:
auth = option_menu(
menu_title=None,
options= [ "Login","Signup","Logout"] ,
icons =["person-check","person-plus","person-x"],
menu_icon = "cast",
default_index=0,
)
if auth == "Login":
st.sidebar.write(" # Login Here #")
username = st.sidebar.text_input("User Name")
password = st.sidebar.text_input("Password" ,type="password")
authstatus = "verified"
if st.sidebar.checkbox("Login"):
create_usertable()
resultss = login_user(username,password,authstatus)
#if password == "1234":
if resultss:
st.sidebar.success("Succesfully logged in as {}".format(username))
st.write(
"""
### View And Modify Patients' Database
"""
)
choice = option_menu(
menu_title="Database Menu",
options= ["Add New Patient Details","View All Patients Details","Update Patient Details","Delete Patient Details"] ,
icons =["folder-plus","folder2-open","folder-symlink","folder-x"],
menu_icon = "hdd-stack-fill",
default_index=0,
orientation="horizontal",
)
create_table()
if choice == "Add New Patient Details":
st.subheader("Add Patient Details")
col1,col2 = st.columns(2)
col3,col4,col5 = st.columns(3)
col6,col7,col8 = st.columns(3)
with col1:
name = st.text_input("Patient's Full Name")
with col2:
id = st.text_input("Patient's ID Number")
with col3:
diabetis = st.selectbox("Diabetis Status" , ("Not Tested","Positive", "Negative"))
with col4:
heart = st.selectbox("Heart Disease Status" , ("Not Tested","Positive", "Negative"))
with col5:
parkinsons = st.selectbox("Parkinson's Disease Status" , ("Not Tested","Positive", "Negative"))
with col6:
Hospital = st.text_input("Hosipital Name")
with col8:
date = st.date_input("Date of last testing")
with col7:
county = st.selectbox("Patient's County" ,("Mombasa","Kwale","Kilifi","Tana River","Lamu","Taita/Taveta","Garissa","Wajir",
"Mandera","Marsabit","Isiolo","Meru","Tharaka-Nithi","Embu","Kitui", "Machakos", "Makueni","Nyandarua","Nyeri","Kirinyaga",
"Murang'a","Kiambu","Turkana","West Pokot","Samburu","Trans Nzoia","Uasin Gishu","Elgeyo/Marakwet","Nandi","Baringo","Laikipia",
"Nakuru","Narok","Kajiado","Kericho","Bomet","Kakamega","Vihiga","Bungoma","Busia","Siaya","Kisumu","Homa Bay","Migori","Kisii","Nyamira","Nairobi City"))
add = st.button("Add Patient to database")
if add:
add_data(name,id,diabetis,heart,parkinsons,Hospital,date,county)
st.success("sucessfully added :: {} :: to database".format(name))
elif choice == "View All Patients Details":
st.subheader("View Database")
result = view_all_data()
#st.write(result)
df = pd.DataFrame(result,columns=['Name of patient','ID Number.','Diabetis Status','Heart Status','Parkinsons Status','Hospital Name','Date of checking','Patients County'])
df.index += 1
with st.expander("View all Data"):
st.dataframe(df)
with st.expander("Diabetis Distribution Summary"):
diabetis_df= df['Diabetis Status'].value_counts().to_frame()
diabetis_df = diabetis_df.reset_index()
st.dataframe(diabetis_df)
p1 = px.pie(diabetis_df,names='index',values='Diabetis Status')
st.plotly_chart(p1,use_container_width=True)
with st.expander("heart Disease Distribution Summary"):
heart_df= df['Heart Status'].value_counts().to_frame()
heart_df = heart_df.reset_index()
st.dataframe(heart_df)
p1 = px.pie(heart_df,names='index',values='Heart Status')
st.plotly_chart(p1,use_container_width=True)
with st.expander("Parkinson's Disease Distribution Summary"):
parkinson_df= df['Parkinsons Status'].value_counts().to_frame()
parkinson_df = parkinson_df.reset_index()
st.dataframe(parkinson_df)
p1 = px.pie(parkinson_df,names='index',values='Parkinsons Status')
st.plotly_chart(p1,use_container_width=True)
with st.expander("Diabetes Disease Distribution Graph By County In Kenya "):
county= df['Patients County']
countysort = st.selectbox("Please Select County" ,("Mombasa","Kwale","Kilifi","Tana River","Lamu","Taita/Taveta","Garissa","Wajir",
"Mandera","Marsabit","Isiolo","Meru","Tharaka-Nithi","Embu","Kitui", "Machakos", "Makueni","Nyandarua","Nyeri","Kirinyaga",
"Murang'a","Kiambu","Turkana","West Pokot","Samburu","Trans Nzoia","Uasin Gishu","Elgeyo/Marakwet","Nandi","Baringo","Laikipia",
"Nakuru","Narok","Kajiado","Kericho","Bomet","Kakamega","Vihiga","Bungoma","Busia","Siaya","Kisumu","Homa Bay","Migori","Kisii","Nyamira","Nairobi City"))
pos_responses = df[df['Diabetis Status'] == 'Positive'][df['Patients County'] == countysort]['Diabetis Status'].value_counts()
neg_responses = df[df['Diabetis Status'] == 'Negative'][df['Patients County'] == countysort]['Diabetis Status'].value_counts()
non_responses = df[df['Diabetis Status'] == 'Not Tested'][df['Patients County'] == countysort]['Diabetis Status'].value_counts()
date =df['Date of checking']
trace1 = go.Bar(
x=pos_responses.index,
y=pos_responses.values,
name='Positive'
)
trace2 = go.Bar(
x=neg_responses.index,
y=neg_responses.values,
name='Negative'
)
trace3 = go.Bar(
x=non_responses.index,
y=non_responses.values,
name='Not Tested'
)
data = [trace1, trace2 ,trace3]
layout = go.Layout(
barmode='stack'
)
fig = go.Figure(data=data, layout=layout)
st.plotly_chart(fig,use_container_width=True)
with st.expander("Heart Disease Distribution Graph By County In Kenya "):
county= df['Patients County']
countysort = st.selectbox("Select County" ,("Mombasa","Kwale","Kilifi","Tana River","Lamu","Taita/Taveta","Garissa","Wajir",
"Mandera","Marsabit","Isiolo","Meru","Tharaka-Nithi","Embu","Kitui", "Machakos", "Makueni","Nyandarua","Nyeri","Kirinyaga",
"Murang'a","Kiambu","Turkana","West Pokot","Samburu","Trans Nzoia","Uasin Gishu","Elgeyo/Marakwet","Nandi","Baringo","Laikipia",
"Nakuru","Narok","Kajiado","Kericho","Bomet","Kakamega","Vihiga","Bungoma","Busia","Siaya","Kisumu","Homa Bay","Migori","Kisii","Nyamira","Nairobi City"))
pos_responses = df[df['Heart Status'] == 'Positive'][df['Patients County'] == countysort]['Heart Status'].value_counts()
neg_responses = df[df['Heart Status'] == 'Negative'][df['Patients County'] == countysort]['Heart Status'].value_counts()
non_responses = df[df['Heart Status'] == 'Not Tested'][df['Patients County'] == countysort]['Heart Status'].value_counts()
date =df['Date of checking']
trace1 = go.Bar(
x=pos_responses.index,
y=pos_responses.values,
name='Positive'
)
trace2 = go.Bar(
x=neg_responses.index,
y=neg_responses.values,
name='Negative'
)
trace3 = go.Bar(
x=non_responses.index,
y=non_responses.values,
name='Not Tested'
)
data = [trace1, trace2 ,trace3]
layout = go.Layout(
barmode='stack'
)
fig = go.Figure(data=data, layout=layout)
st.plotly_chart(fig,use_container_width=True)
with st.expander("Parkinson's Disease Distribution Graph By County In Kenya "):
county= df['Patients County']
countysort = st.selectbox("Select County here" ,("Mombasa","Kwale","Kilifi","Tana River","Lamu","Taita/Taveta","Garissa","Wajir",
"Mandera","Marsabit","Isiolo","Meru","Tharaka-Nithi","Embu","Kitui", "Machakos", "Makueni","Nyandarua","Nyeri","Kirinyaga",
"Murang'a","Kiambu","Turkana","West Pokot","Samburu","Trans Nzoia","Uasin Gishu","Elgeyo/Marakwet","Nandi","Baringo","Laikipia",
"Nakuru","Narok","Kajiado","Kericho","Bomet","Kakamega","Vihiga","Bungoma","Busia","Siaya","Kisumu","Homa Bay","Migori","Kisii","Nyamira","Nairobi City"))
pos_responses = df[df['Parkinsons Status'] == 'Positive'][df['Patients County'] == countysort]['Parkinsons Status'].value_counts()
neg_responses = df[df['Parkinsons Status'] == 'Negative'][df['Patients County'] == countysort]['Parkinsons Status'].value_counts()
non_responses = df[df['Parkinsons Status'] == 'Not Tested'][df['Patients County'] == countysort]['Parkinsons Status'].value_counts()
date =df['Date of checking']
trace1 = go.Bar(
x=pos_responses.index,
y=pos_responses.values,
name='Positive'
)
trace2 = go.Bar(
x=neg_responses.index,
y=neg_responses.values,
name='Negative'
)
trace3 = go.Bar(
x=non_responses.index,
y=non_responses.values,
name='Not Tested'
)
data = [trace1, trace2 ,trace3]
layout = go.Layout(
barmode='stack'
)
fig = go.Figure(data=data, layout=layout)
st.plotly_chart(fig,use_container_width=True)
elif choice == "Update Patient Details":
st.subheader("Edit / Update Patient Details")
with st.expander("View Patient Current Data"):
result = view_all_data()
df = pd.DataFrame(result,columns=['Name of patient','ID Number.','Diabetis Status','Heart Status','Parkinsons Status','Hospital Name','Date of checking','Patients county'])
df.index += 1
st.dataframe(df)
list_of_name = [i [0] for i in view_unique_name()]
selected_name = st.selectbox("Patient's Detail To Edit",list_of_name)
selected_result = get_name(selected_name)
if selected_result:
name = selected_result[0][0]
id = selected_result[0][1]
diabetis = selected_result[0][2]
heart = selected_result[0][3]
parkinsons = selected_result[0][4]
Hospital = selected_result[0][5]
date = selected_result[0][6]
col1,col2 = st.columns(2)
col3,col4,col5 = st.columns(3)
col6,col7 = st.columns(2)
with col1:
new_name = st.text_input("Patient's Full Name",name)
with col2:
new_id = st.text_input("Patient's ID Number",id)
with col3:
new_diabetis = st.selectbox("Diabetis Status" , ["Not Tested","Positive", "Negative"])
with col4:
new_heart = st.selectbox("Heart Disease Status" , ["Not Tested","Positive", "Negative"])
with col5:
new_parkinsons = st.selectbox("Parkinson's Disease Status" , ["Not Tested","Positive", "Negative"])
with col6:
new_Hospital = st.text_input("Hosipital Name",Hospital)
with col7:
new_date = st.date_input("Date of last testing")
add = st.button("Update Patient details")
if add:
edit_patient_data(new_name,new_id,new_diabetis,new_heart,new_parkinsons,new_Hospital,new_date,name,id,diabetis,heart,parkinsons,Hospital,date)
st.success("sucessfully updated :: {} :: details to :: {} ".format(name,new_name))
with st.expander("View Patient Updated Data"):
result2 = view_all_data()
df2 = pd.DataFrame(result2,columns=['Name of patient','ID Number.','Diabetis Status','Heart Status','Parkinsons Status','Hospital Name','Date of checking','Patients county'])
df2.index += 1
st.dataframe(df2)
elif choice == "Delete Patient Details":
st.subheader("Delete Patient Details")
with st.expander("View Patient's Current Data"):
result = view_all_data()
df = pd.DataFrame(result,columns=['Name of patient','ID Number.','Diabetis Status','Heart Status','Parkinsons Status','Hospital Name','Date of checking','Patients county'])
df.index += 1
st.dataframe(df)
list_of_name = [i [0] for i in view_unique_name()]
selected_name = st.selectbox("Patient's Detail To Delete",list_of_name)
st.warning("Do You Want To Delete Patient :: {} Details?".format(selected_name))
if st.button("Delete Patient's Details"):
delete_data(selected_name)
st.success("Patient Details Successfully deleted")
with st.expander("View Patient Updated Data"):
result3 = view_all_data()
df2 = pd.DataFrame(result3,columns=['Name of patient','ID Number.','Diabetis Status','Heart Status','Parkinsons Status','Hospital Name','Date of checking','Patients county'])
df2.index += 1
st.dataframe(df2)
else:
st.sidebar.warning("Incorrect Username/Password Combination Or Your Account Maybe Unverifed")
elif auth == "Signup":
st.sidebar.write(" # SignUp Here #")
new_username = st.sidebar.text_input("User Name")
new_email = st.sidebar.text_input("Email Address")
new_regnumber = st.sidebar.text_input("Registration Number")
confirm_password = st.sidebar.text_input("Password" ,type="password")
new_password = st.sidebar.text_input("Confirm Password" ,type="password")
new_authstatus = "Pending"
regex = re.compile(r'([A-Za-z0-9]+[.-_])*[A-Za-z0-9]+@[A-Za-z0-9-]+(\.[A-Z|a-z]{2,})+')
vals = view_user(new_username)
if st.sidebar.checkbox("SignUp"):
if vals:
st.sidebar.warning("This user is already regestered!")
st.sidebar.info("If you are already verified by administrator please proceed to login")
else:
if re.fullmatch(regex, new_email):
if confirm_password == new_password:
create_usertable()
add_userdata(new_username,new_password,new_email,new_regnumber,new_authstatus)
st.sidebar.success("Successfully Signed Up")
st.sidebar.info("You Will Be notified Once Your Account Is Verified To Access Other Features Of The App")
else:
st.sidebar.warning("SignUp Unsuccessful!")
st.sidebar.info("Make sure the passwords entered match each other")
else:
st.sidebar.warning("Invalid email format, Please check your eamil and try again")
elif auth == "Logout":
st.sidebar.info("Successfully Logged out")
st.write("You are currently logged out") | kinteog/sam-diseaseprediction | database_page.py | database_page.py | py | 19,402 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "streamlit.title",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "streamlit.info",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "streamlit.sidebar",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "streamlit_optio... |
38543312863 | import tensorflow as tf
from tensorflow import keras
import numpy as np
import matplotlib.pyplot as plt
from keras.models import load_model
import time
total_starttime = time.time()
generator = load_model(r'C:\Licenta\GAN_IMAGES\model_16_batch\generator_model_99.h5')
damaged_directory = r"C:\Licenta\GAN_IMAGES\damaged\test"
damaged_dataset = tf.keras.utils.image_dataset_from_directory(damaged_directory,
label_mode=None,
batch_size=1,
image_size=(256, 256),
shuffle=False)
### Define a normalization layer ###
normalization_layer = tf.keras.layers.Rescaling(scale=1. / 127.5, offset=-1)
### Normalize the datasets ###
damaged_dataset = damaged_dataset.map(lambda x: normalization_layer(x))
i = 0
for img in (damaged_dataset):
generated_image = generator(img)
generated_image = (generated_image[0].numpy() * 127.5 + 127.5).astype(np.uint8)
tf.keras.preprocessing.image.save_img(r"C:\Licenta\GAN_IMAGES\results_GAN_bs32\16_batch_size_100_epochs/" + str(i) + ".jpg", generated_image)
i = i + 1
total_endtime = time.time()
print(f'Total execution time: {total_endtime - total_starttime} s')
| acuiram/DCGAN-with-U-Net | load_model.py | load_model.py | py | 1,410 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "time.time",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "keras.models.load_model",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.utils.image_dataset_from_directory",
"line_number": 13,
"usage_type": "call"
},
{
... |
22611037359 | # coding: utf-8
import time
from admin.models import PaymentAccountInfo
from common.models import SalesManUser
from common.serializers import CommonNoticeSerializer
from coupon.models import UserCoupon
from rest_framework.response import Response
from rest_framework.views import APIView
from authentication.models import User
from authentication.models import UserInfo
from order.models import Order, UserCourse
from utils.functions import get_key_verbose_data, handle_mongodb_cursor_data
from utils.mongodb import stu_db
class GlobalEnumsViewSet(APIView):
def get(self, request):
res = {
'account_payment': get_key_verbose_data(dict(PaymentAccountInfo.PAYMENT)),
'account_currency': get_key_verbose_data(dict(PaymentAccountInfo.CURRENCY)),
'user_info_gender': get_key_verbose_data(dict(UserInfo.GENDER)),
'user_grade': get_key_verbose_data(dict(UserInfo.GRADE)),
'order_payment': get_key_verbose_data(dict(Order.PAYMENT)),
'order_currency': get_key_verbose_data(dict(Order.CURRENCY)),
'order_status': get_key_verbose_data(dict(Order.STATUS)),
'user_status': get_key_verbose_data(dict(SalesManUser.STATUS)),
'course_credit_switch': get_key_verbose_data(dict(UserCourse.CREDIT_SWITCH_STATUS)),
'user_course_status': get_key_verbose_data(dict(UserCourse.STATUS)),
'coupon_status': get_key_verbose_data(dict(UserCoupon.STATUS)),
'user_role': get_key_verbose_data(dict(User.ROLE)),
}
return Response(res)
class CommonNoticeViewSet(APIView):
def get(self, request):
notice_message = handle_mongodb_cursor_data(
stu_db.find(
collection_name='message_auto_notice',
pagination=True,
sort_field=("_id", -1),
search_data={
'user_id': request.user.id,
'read': {'$ne': True},
'create_time': {'$gte': int(time.time() - 3 * 24 * 3600)}
}
)
)
return Response(notice_message)
def put(self, request):
serializer = CommonNoticeSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
validated_data = serializer.validated_data
stu_db.update_many(collection_name='message_auto_notice',
search_data={'user_id': request.user.id, 'module_name': validated_data['module_name']},
update_data={"$set": {'read': True}})
return Response({'msg': '操作成功'})
class OrderCurrencyPaymentViewSet(APIView):
def get(self, request):
payment = PaymentAccountInfo.objects.all()
inland_bank = payment.filter(payment='BANK', currency='RMB'). \
values('payment', 'currency', 'account_number', 'account_name', 'opening_bank', )
international_bank = payment.filter(payment='BANK', currency='FOREIGN_CURRENCY'). \
values('payment', 'currency', 'account_number', 'account_name', 'pay_link')
ali_pay = payment.filter(payment='ALI_PAY'). \
values('payment', 'currency', 'account_number', 'account_name', )
pay_pal = payment.filter(payment='PAY_PAL'). \
values('payment','bank_name', 'opening_bank', 'currency', 'account_name', 'swift_code', 'routing_number_paper',
'swift_code_foreign_currency', 'company_address', 'account_number', 'routing_number_wires')
res = [
{
'key': 'FOREIGN_CURRENCY',
'verbose': dict(PaymentAccountInfo.CURRENCY).get('FOREIGN_CURRENCY'),
'payment': [
{
'key': 'BANK',
'verbose': dict(PaymentAccountInfo.PAYMENT).get('BANK'),
'payment_information': international_bank
},
{
'key': 'PAY_PAL',
'verbose': dict(PaymentAccountInfo.PAYMENT).get('PAY_PAL'),
'payment_information': pay_pal
}
]
},
{
'key': 'RMB',
'verbose': dict(PaymentAccountInfo.CURRENCY).get('RMB'),
'payment': [
{
'key': 'BANK',
'verbose': dict(PaymentAccountInfo.PAYMENT).get('BANK'),
'payment_information': inland_bank
},
{
'key': 'ALI_PAY',
'verbose': dict(PaymentAccountInfo.PAYMENT).get('ALI_PAY'),
'payment_information': ali_pay
},
]
}
]
return Response(res) | liaochenghao/StuSystem | StuSystem/common/views.py | views.py | py | 4,855 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "rest_framework.views.APIView",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "utils.functions.get_key_verbose_data",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "admin.models.PaymentAccountInfo.PAYMENT",
"line_number": 20,
"usage_typ... |
74543466592 | import unittest
from pathlib import Path
import lindenmayer_system as ls
from PIL import Image, ImageDraw
class TurtleInterpreterTestCase(unittest.TestCase):
def setUp(self):
self.image = Image.new("RGB", (1280, 1280), "white")
self.draw = ImageDraw.Draw(self.image)
self.pos = (1280 / 2, 1280 / 2, 180)
self.step_length = 10
self.turn_angle = 90
self.turtle = ls.TurtleInterpreter(
self.pos[0], self.pos[1], self.pos[2], self.step_length, self.turn_angle
)
tests_dir = Path(__file__).resolve().parent
self.output_dir = tests_dir / "testing_outputs"
self.alphabet = {"F", "f", "+", "-"}
def test_interpret(self):
axiom = "F-F-F-F"
production_rules = {"F": "F+f-FF+F+FF+Ff+FF-f+FF-F-FF-Ff-FFF", "f": "fffff"}
derivation_length = 2
draw_color = (0, 0, 0)
output_file = self.output_dir / "interpret_test_output.jpg"
lsystem = ls.DOLSystem(self.alphabet, axiom, production_rules)
lsystem_strings = lsystem.apply_production_rules(derivation_length)
lsystem_string = lsystem_strings[-1]
instructions = self.turtle.interpret(lsystem_string)
for instruction in instructions:
point1 = (instruction[1][1], instruction[1][0])
point2 = (instruction[2][1], instruction[2][0])
if instruction[0] == "line":
self.draw.line((point1, point2), fill=draw_color, width=2)
self.image.save(output_file)
def test_interpret_FASS(self):
alphabet = {"Fl", "Fr", "+", "-"}
axiom = "Fl"
production_rules = {
"Fl": "FlFl+Fr+Fr-Fl-Fl+Fr+FrFl-Fr-FlFlFr+Fl-Fr-FlFl-Fr+FlFr+Fr+Fl-Fl-FrFr+",
"Fr": "-FlFl+Fr+Fr-Fl-FlFr-Fl+FrFr+Fl+Fr-FlFrFr+Fl+FrFl-Fl-Fr+Fr+Fl-Fl-FrFr",
}
derivation_length = 3
draw_color = (0, 0, 0)
output_file = self.output_dir / "interpret_fass_test_output.jpg"
lsystem = ls.DOLSystem(alphabet, axiom, production_rules)
lsystem_strings = lsystem.apply_production_rules(derivation_length)
lsystem_string = lsystem_strings[-1]
instructions = self.turtle.interpret(lsystem_string)
for instruction in instructions:
point1 = (instruction[1][1], instruction[1][0])
point2 = (instruction[2][1], instruction[2][0])
if instruction[0] == "line":
self.draw.line((point1, point2), fill=draw_color, width=2)
self.image.save(output_file)
def test_interpret_bracketed(self):
self.turtle.turn_angle = 25.7
alphabet = {"F", "+", "-", "[", "]"}
axiom = "F"
production_rules = {"F": "F[+F]F[-F]F"}
derivation_length = 5
draw_color = (0, 0, 0)
output_file = self.output_dir / "interpret_bracketed_test_output.jpg"
lsystem = ls.DOLSystem(alphabet, axiom, production_rules)
lsystem_strings = lsystem.apply_production_rules(derivation_length)
lsystem_string = lsystem_strings[-1]
instructions = self.turtle.interpret(lsystem_string)
for instruction in instructions:
point1 = (instruction[1][1], instruction[1][0] + 600)
point2 = (instruction[2][1], instruction[2][0] + 600)
if instruction[0] == "line":
self.draw.line((point1, point2), fill=draw_color, width=2)
self.image.save(output_file)
if __name__ == "__main__":
unittest.main()
| lucas-escobar/botany-lab | tests/test_turtle_interpreter.py | test_turtle_interpreter.py | py | 3,503 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "unittest.TestCase",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image.new",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "PIL.ImageDraw.Draw",
... |
20383117487 | from django import forms
from .models import Post
from urllib import request
from django.core.files.base import ContentFile
from django.utils.text import slugify
class PostCreateForm(forms.ModelForm):
def save(self, force_insert=False, force_update=False, commit=True):
posts = super().save(commit=False)
name = slugify(posts.title)
if commit:
posts.save()
return posts
class Meta:
model = Post
fields = ('image', 'description')
class ProfileEditForm(forms.ModelForm):
class Meta:
model = Post
fields = ('image', 'description') | OtuokereTobechukwu/trill | activity/forms.py | forms.py | py | 619 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.forms.ModelForm",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.utils.text.slugify",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "mode... |
30915139901 | import pytest as pytest
from selenium import webdriver
driver = None
@pytest.fixture()
def setup_and_teardown(request):
global driver
driver = webdriver.Chrome()
driver.maximize_window()
request.cls.driver = driver
yield
driver.quit()
| VardhiniMohan/SeleniumPythonHybridFramework2 | tests/conftest.py | conftest.py | py | 262 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "pytest.fixture",
"line_number": 7,
"usage_type": "call"
}
] |
7217036142 | import pendulum
from airflow import models
from airflow.operators.bash_operator import BashOperator
from datetime import datetime, timedelta
KST = pendulum.timezone("Asia/Seoul")
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': datetime(2021, 8, 9, tzinfo=KST),
'email': ['airflow@airflow.com'],
'email_on_failure': False,
'email_on_retry': False,
'retries': 1,
'retry_delay': timedelta(minutes=5)
}
with models.DAG(
dag_id='test', description='write date and, hello airflow!',
schedule_interval='*/10 * * * *', # 10분마다 한번 씩 반복
default_args=default_args,
catchup=False
) as dag:
t1 = BashOperator(
task_id='write_date',
bash_command='echo "data now : $(date +%Y)-$(date +%m)-$(date +%d) $(date +%H):$(date +%M):$(date +%S)" >> ~/airflow/test.txt',
dag=dag
)
t2 = BashOperator(
task_id='write_hello_airflow',
bash_command='echo "hello airflow!" >> ~/airflow/test.txt',
dag=dag
)
t1 >> t2 | J-TKim/airflow_tutorial | main.py | main.py | py | 1,065 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "pendulum.timezone",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "airflow.models.... |
10808815511 | import requests
import sys
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
MAX_RETRY_NUM = 5
BACKOFF_FACTOR = 1
STATUS_FORCELIST = [502, 503, 504]
def get_request(url):
"""
Attempts to get the HTML content with the given URL by making an HTTP GET request. It is set up to
retry the request for a maximum number of retries.
:param url:
:return:
"""
try:
session = requests.Session()
retries = Retry(total=MAX_RETRY_NUM, backoff_factor=BACKOFF_FACTOR, status_forcelist=STATUS_FORCELIST)
session.mount('http://', HTTPAdapter(max_retries=retries))
request_page = session.get(url)
return request_page
except requests.exceptions.ConnectionError as errc:
# ConnectionError occurs in the event of a network problem (DNS failure, refused connection)
print(f"Connection error: {errc}")
except requests.exceptions.HTTPError as errh:
# HTTPError occurs in the event of a rare invalid HTTP response
print(f"HTTP error: {errh}")
except requests.exceptions.Timeout as errt:
# Timeout exception occurs whenever a request times out
print(f"Request timeout error: {errt}")
except requests.exceptions.TooManyRedirects as errm:
# TooManyRedirects exception occurs if a request exceeds the configured number of maximum redirections
print(f"URL was bad: {errm}")
except requests.exceptions.RequestException as err:
# RequestException occurs for any kind of exceptions that are raised
print(f"Error during requests to {url}: {err}")
sys.exit(1)
| vonniewu/nga-artists | nga_artists/html_request.py | html_request.py | py | 1,651 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "requests.Session",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "requests.packages.urllib3.util.retry.Retry",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "requests.adapters.HTTPAdapter",
"line_number": 22,
"usage_type": "call"
},
... |
25516931292 | import numpy as np
import numpy.typing as npt
import pandas as pd
from scipy import special
from typing import Any
from linearlab.lik.base import Likelihood
from linearlab.link import Link, LogitLink, logit
class _BinomialBase(Likelihood[npt.NDArray[np.int_]]):
def params(self) -> list[str]:
return ["p"]
def prepare_y(self, y: pd.Series | pd.DataFrame) -> tuple[tuple[npt.NDArray[np.int_], npt.NDArray[np.int_]], float]:
if isinstance(y, pd.Series):
k = y.to_numpy(dtype=np.bool_).astype(np.int_)
n = np.ones(y.shape[0], dtype=np.int_)
logZ = 0.0
elif isinstance(y, pd.DataFrame) and (y.shape[1] == 2):
y = y.to_numpy(dtype=np.int_)
k = y[:,0]
n = y.sum(axis=1)
logZ = np.sum(special.gammaln(n+1) - special.gammaln(k+1) - special.gammaln(n-k+1))
else:
raise ValueError("binomial needs either bool or n_success,n_failure")
return (k, n), logZ
class Binomial(_BinomialBase):
def __init__(self, link:Link) -> None:
self.link = link
def __call__(
self,
y: tuple[npt.NDArray[np.int_], npt.NDArray[np.int_]],
eta: npt.NDArray[np.float64],
out_g: None | npt.NDArray[np.float64],
out_h: None | npt.NDArray[np.float64],
) -> float:
k, n = y
p, dp = self.link.inv(eta[0])
f = np.sum(
(k * special.logit(p)) +
(n * special.log1p(-p))
)
if out_g is not None:
out_g[0] = dp * (k - (n * p)) / p / (1 - p)
if out_h is not None:
out_h[0,0] = (dp**2) * n / p / (1 - p)
return f
def __repr__(self) -> str:
return f"binomial likelihood ({self.link})"
class BinomialLogit(_BinomialBase):
def __call__(
self,
y: tuple[npt.NDArray[np.int_], npt.NDArray[np.int_]],
eta: npt.NDArray[np.float64],
out_g: None | npt.NDArray[np.float64],
out_h: None | npt.NDArray[np.float64],
) -> float:
k, n = y
eta = eta[0]
p = special.expit(eta)
f = np.sum(
(k * eta) +
(n * special.log_expit(-eta))
)
if out_g is not None:
out_g[0] = k - (n * p)
if out_h is not None:
out_h[0,0] = n * p * (1 - p)
return f
def __repr__(self) -> str:
return "binomial likelihood (logit link)"
def binomial(link: Link = logit) -> Likelihood:
if isinstance(link, LogitLink):
return BinomialLogit()
else:
return Binomial(link)
| dschulman/linearlab | linearlab/lik/_binomial.py | _binomial.py | py | 2,597 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "linearlab.lik.base.Likelihood",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "numpy.typing.NDArray",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "numpy.typing",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "... |
18810182845 | """
Orthogonal Distance Regression using Monte Carlo to estimate errors
i.e. 1 fit using ODR with 10000 MC samples
Isaac Cheng - January 2021
"""
import sys
from pathlib import Path
import sqlite3
from contextlib import closing
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import pandas as pd
import scipy.odr as odr
# Want to add my own programs as package:
# Make a $PATH to coop2021 (twice parent folder of this file)
_SCRIPT_DIR = str(Path.cwd() / Path(__file__).parent.parent)
# Add coop2021 to $PATH
sys.path.append(_SCRIPT_DIR)
import mytransforms as trans
from universal_rotcurve import urc_odr
# Universal rotation curve parameters (Persic et al. 1996)
_A_TWO = 0.96 # (Reid et al. 2019)
_A_THREE = 1.62 # (Reid et al. 2019)
# Sun's distance from galactic centre
_RSUN = 8.15 # kpc (Reid et al. 2019)
def get_coords(db_file):
"""
Retrieves ra, dec, glong, glat, plx, and e_plx from
database connection specified by db_file
Returns DataFrame with:
ra (deg), dec (deg), glong (deg), glat (deg), plx (mas), and e_plx (mas)
"""
with closing(sqlite3.connect(db_file).cursor()) as cur: # context manager, auto-close
cur.execute("SELECT ra, dec, glong, glat, plx, e_plx FROM Parallax")
coords = pd.DataFrame(cur.fetchall(), columns=[desc[0] for desc in cur.description])
return coords
def get_vels(db_file):
"""
Retrieves x & y proper motions (mux, muy) from J200 equatorial frame,
and vlsr from database connection specified by db_file
Returns DataFrame with: mux (mas/yr), muy (mas/yr), vlsr (km/s) + all uncertainties
"""
with closing(sqlite3.connect(db_file).cursor()) as cur: # context manager, auto-close
cur.execute("SELECT mux, muy, vlsr, e_mux, e_muy, e_vlsr FROM Parallax")
vels = pd.DataFrame(cur.fetchall(), columns=[desc[0] for desc in cur.description])
return vels
def main():
# # Specifying database file name & folder
# filename = Path("data/hii_v2_20201203.db")
# # Database folder in parent directory of this script (call .parent twice)
# db = Path(__file__).parent.parent / filename
# Specifying absolute file path instead
# (allows file to be run in multiple locations as long as database location does not move)
db = Path("/home/chengi/Documents/coop2021/data/hii_v2_20201203.db")
# Get data + put into DataFrame
coords = get_coords(db) # coordinates
vels = get_vels(db) # velocities
# print(coords.to_markdown())
# Create condition to filter data
all_radii = trans.get_gcen_cyl_radius(coords["glong"], coords["glat"], coords["plx"])
# Bad data criteria (N.B. casting to array prevents "+" not supported warnings)
bad = (np.array(all_radii) < 4.0) + (np.array(coords["e_plx"]/coords["plx"]) > 0.2)
# Slice data into components
r_asc = coords["ra"][~bad] # deg
dec = coords["dec"][~bad] # deg
glon = coords["glong"][~bad] # deg
glat = coords["glat"][~bad] # deg
plx = coords["plx"][~bad] # mas
e_plx = coords["e_plx"][~bad] # mas
eqmux = vels["mux"][~bad] # mas/yr (equatorial frame)
e_eqmux = vels["e_mux"][~bad] # mas/y (equatorial frame)
eqmuy = vels["muy"][~bad] # mas/y (equatorial frame)
e_eqmuy = vels["e_muy"][~bad] # mas/y (equatorial frame)
vlsr = vels["vlsr"][~bad] # km/s
e_vlsr = vels["e_vlsr"][~bad] # km/s
_NUM_SAMPLES = 10000 # number of Monte Carlo samples of each parameter for each trial
num_sources = len(r_asc) # number of entries in each (and every) dataframe
print("Number of data points included:", num_sources)
# Sample measurements _NUM_SAMPLES times (e.g. 10000 times)
# Rows = Samples of 1 source, columns = distinct samples
# e.g. all samples from 1st parallax source: plx_mc[:,0]
plx_mc = np.random.normal(loc=plx, scale=e_plx, size=(_NUM_SAMPLES, num_sources))
eqmux_mc = np.random.normal(loc=eqmux, scale=e_eqmux, size=(_NUM_SAMPLES, num_sources))
eqmuy_mc = np.random.normal(loc=eqmuy, scale=e_eqmuy, size=(_NUM_SAMPLES, num_sources))
vlsr_mc = np.random.normal(loc=vlsr, scale=e_vlsr, size=(_NUM_SAMPLES, num_sources))
r_asc_mc = np.array([r_asc, ] * _NUM_SAMPLES) # _NUM_SAMPLES by num_sources
dec_mc = np.array([dec, ] * _NUM_SAMPLES) # _NUM_SAMPLES by num_sources
glon_mc = np.array([glon, ] * _NUM_SAMPLES) # _NUM_SAMPLES by num_sources
glat_mc = np.array([glat, ] * _NUM_SAMPLES) # _NUM_SAMPLES by num_sources
# Transform raw data into galactocentric cylindrical distance & circular velocity
radius_mc, v_circ_mc = trans.eq_and_gal_to_gcen_cyl(
r_asc_mc, dec_mc, plx_mc, glon_mc, glat_mc, eqmux_mc, eqmuy_mc, vlsr_mc
)
# Store results
radii_mc = np.mean(radius_mc, axis=0) # mean of each column (axis=0)
e_radii_mc = np.std(radius_mc, axis=0) # std of each column (axis=0)
v_circs_mc = np.mean(v_circ_mc, axis=0) # mean of each column (axis=0)
e_v_circs_mc = np.std(v_circ_mc, axis=0) # std of each column (axis=0)
# Create model for fitting
urc_model = odr.Model(urc_odr)
# Create RealData object using galactocentric cylindrical radius & circular velocity
model_data = odr.RealData(x=radii_mc, y=v_circs_mc, sx=e_radii_mc, sy=e_v_circs_mc)
# Set up ODR with model and data
my_odr = odr.ODR(model_data, urc_model, beta0=[_A_TWO, _A_THREE])
# Run regression
my_output = my_odr.run()
# Print results
my_output.pprint()
# Get optimal parameters
a2_opt = my_output.beta[0]
a3_opt = my_output.beta[1]
e_a2_opt = my_output.sd_beta[0]
e_a3_opt = my_output.sd_beta[1]
print(f"a2: {a2_opt} +/- {e_a2_opt}")
print(f"a3: {a3_opt} +/- {e_a3_opt}")
# Create and plot dashed line for rotation curve using optimal parameters
fig1, ax1 = plt.subplots()
Rvals = np.linspace(0, 17, 101)
Vvals = urc_odr((a2_opt, a3_opt), Rvals)
ax1.plot(Rvals, Vvals, "r-.", linewidth=0.5)
# Plot data
ax1.errorbar(x=radii_mc, y=v_circs_mc,
xerr=e_radii_mc, yerr=e_v_circs_mc,
fmt="o", linewidth=1, markersize=2, capsize=2)
# ax1.plot(radii_mc, v_circs_mc, "o", markersize=2)
# Set title and labels. Then save figure
plt.suptitle("Galactic Rotation Curve using ODR and MC Errors", y=0.96)
ax1.set_title(f"(errors derived using N={_NUM_SAMPLES} random samples)",
fontsize=8)
ax1.set_xlabel("R (kpc)")
ax1.set_ylabel(r"$\Theta$ (km $\mathrm{s}^{-1})$")
ax1.set_xlim(0, 17)
ax1.set_ylim(0, 300)
# Create legend to display current parameter values
legend_elements = [
Line2D(
[0], [0],
marker="o", color="w", markerfacecolor="k", markersize=0,
label=fr"a2 = {round(a2_opt, 2)} $\pm$ {round(e_a2_opt, 2)}",
),
Line2D(
[0], [0],
marker="o", color="w", markerfacecolor="k", markersize=0,
label=fr"a3 = {round(a3_opt, 4)} $\pm$ {round(e_a3_opt, 4)}",
),
]
ax1.legend(handles=legend_elements, handlelength=0, handletextpad=0)
fig1.savefig(
Path(__file__).parent / "odr_MC_errors.jpg",
format="jpg",
dpi=300,
bbox_inches="tight",
)
plt.show()
if __name__ == "__main__":
main()
| tvwenger/coop2021 | rot_curve/odr_MC_errors.py | odr_MC_errors.py | py | 7,351 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pathlib.Path.cwd",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "sys.path.append",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_nu... |
161448674 |
import os
import pytest
from path import Path
from . import initialize_git_repo_and_commit, prepare_project
DIST_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '../dist'))
# Test if package can be imported to allow testing on
# conda-forge where ``pytest-virtualenv`` is not available.
try:
import pytest_virtualenv # noqa: F401
HAS_PYTEST_VIRTUALENV = True
except ImportError:
HAS_PYTEST_VIRTUALENV = False
@pytest.mark.skipif(not HAS_PYTEST_VIRTUALENV,
reason="pytest_virtualenv not available. See #228")
def test_source_distribution(virtualenv):
sdists = Path(DIST_DIR).files(match="*.tar.gz") if Path(DIST_DIR).exists() else []
if not sdists:
pytest.skip("no source distribution available")
assert len(sdists) == 1
virtualenv.run("pip install %s" % sdists[0])
assert "scikit-build" in virtualenv.installed_packages()
prepare_project("hello-no-language", virtualenv.workspace, force=True)
initialize_git_repo_and_commit(virtualenv.workspace, verbose=False)
virtualenv.run("python setup.py bdist_wheel")
@pytest.mark.skipif(not HAS_PYTEST_VIRTUALENV,
reason="pytest_virtualenv not available. See #228")
def test_wheel(virtualenv):
wheels = Path(DIST_DIR).files(match="*.whl") if Path(DIST_DIR).exists() else []
if not wheels:
pytest.skip("no wheel available")
assert len(wheels) == 1
virtualenv.run("pip install %s" % wheels[0])
assert "scikit-build" in virtualenv.installed_packages()
prepare_project("hello-no-language", virtualenv.workspace, force=True)
initialize_git_repo_and_commit(virtualenv.workspace, verbose=False)
virtualenv.run("python setup.py bdist_wheel")
| jem0101/BigSwag-SQA2022-AUBURN | TestOrchestrator4ML-main/resources/Data/supervised/GITHUB_REPOS/scikit-build@scikit-build/tests/test_distribution.py | test_distribution.py | py | 1,736 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "os.path.abspath",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_nu... |
25463104297 | import argparse
import os
import os.path
import pandas
def main():
parser = argparse.ArgumentParser(description='Quick helper for getting model accuracies', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('inputs', nargs = '+', help='input CSVs')
parser.add_argument('--nrows', help='num lines', type = int, default=None)
args = parser.parse_args()
for p in args.inputs:
try:
df = pandas.read_csv(p, nrows = args.nrows)
except EOFError:
print(f"{os.path.abspath(p).split('.')[0]} EOF")
else:
print(f"{os.path.abspath(p).split('.')[0]} {df['model_correct'].mean() * 100:.2f}%")
if __name__ == "__main__":
main()
| CSSLab/maia-individual | 3-analysis/get_accuracy.py | get_accuracy.py | py | 727 | python | en | code | 18 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentDefaultsHelpFormatter",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "pandas.read_csv",
"line_number": 15,
"usage_type": "call"
},
{
... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.