seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
37320137463 | from projects.generalize_ising_model.tools.utils import save_file,ks_test
from projects.phi.tools.utils import load_matrix
from projects.phi.utils import *
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
main_path = '/home/brainlab/Desktop/Popiel/Ising_HCP/'
parcels = ['Aud', 'CinguloOperc', 'CinguloParietal', 'DMN', 'Dorsal', 'FrontoParietal', 'Retrosplenial', 'SMhand',
'SMmouth', 'Ventral', 'Visual']
for parcel in parcels:
print('Running', parcel)
parcel_path = main_path + parcel + '/'
#results_path = sub_path + 'results/'
tpm_path = parcel_path + '/results/'
tpm_ising = np.squeeze(load_matrix(tpm_path+'tpm_ising.npy'))
tpm_tc = np.squeeze(load_matrix(tpm_path+'tpm_tc.npy'))
ts = np.squeeze(load_matrix(tpm_path + 'time_series.csv'))
crit_temp = np.squeeze(load_matrix(tpm_path + 'crit_temp.csv'))
ks_temp = []
for temp in range(tpm_ising.shape[0]):
ks_temp.append(ks_test(tpm_ising[temp,...],tpm_tc[temp,...]))
t_star = np.where(np.asarray(ks_temp) == np.max(ks_temp))[0]
#save_file(np.asarray(ks_temp),tpm_path,'ks_results_'+parcel)
plt.figure()
plt.plot(ks_temp,marker='o',color='IndianRed')
plt.axvline(np.where(ts==crit_temp),ymax=20,ymin=-20,color='k')
plt.axvline(t_star,color='purple')
plt.show()
| jrudascas/brain_lab | projects/phi/HCP_Ising/find_tpm_tstar.py | find_tpm_tstar.py | py | 1,348 | python | en | code | 2 | github-code | 13 |
22249719026 | """
to convert pickle file from python 2 to python 3
this script should be run with python 2
"""
from __future__ import print_function
import argparse
import os
import pickle
from _data_io import ITCExperiment
parser = argparse.ArgumentParser()
parser.add_argument("--exper_info_dir", type=str, default="05.exper_info")
parser.add_argument("--inp", type=str, default="experimental_information.pickle")
parser.add_argument("--out", type=str, default="experimental_information_dict.pickle")
parser.add_argument("--experiments", type=str, default=" ")
args = parser.parse_args()
def _convert(exper_info_obj):
exper_info_dict = {}
exper_info_dict["target_temperature_kelvin"] = exper_info_obj.get_target_temperature_kelvin()
exper_info_dict["number_injections"] = exper_info_obj.get_number_injections()
exper_info_dict["cell_volume_liter"] = exper_info_obj.get_cell_volume_liter()
exper_info_dict["injection_volumes_liter"] = exper_info_obj.get_injection_volumes_liter()
exper_info_dict["syringe_concentration_milli_molar"] = exper_info_obj.get_syringe_concentration_milli_molar()
exper_info_dict["cell_concentration_milli_molar"] = exper_info_obj.get_cell_concentration_milli_molar()
return exper_info_dict
experiments = args.experiments.split()
print("experiments:", experiments)
exper_info_dir = os.path.abspath(args.exper_info_dir)
print("exper_info_dir:", exper_info_dir)
for exper in experiments:
print("Processing " + exper)
inp_file = os.path.join(exper_info_dir, exper, args.inp)
out_file = os.path.join(exper_info_dir, exper, args.out)
if os.path.exists(out_file):
raise ValueError("File exists: " + out_file)
exper_info_obj = ITCExperiment(inp_file)
exper_info_dict = _convert(exper_info_obj)
pickle.dump(exper_info_dict, open(out_file, "wb"))
print("DONE")
| nguyentrunghai/bayesian_itc_racemic | scripts/run_convert_exper_info.py | run_convert_exper_info.py | py | 1,856 | python | en | code | 0 | github-code | 13 |
70361346899 | ##Make Data
#
#take background picture
#choose random number of objects between MIN_OBJ_PLACED and MAX_OBJ_PLACED
#paste objects to background and monitor/log their location and size
#save new picture
#save picture location and each object bbox(xmin,ymin,xmAx,ymAx) and class number to txt file
#
# print("xxx : ",xxx)
from PIL import Image
import os
import numpy as np
import cv2
#from random import randrange, randint
Total_Pics = 25 #Total Number of Pictures to Generate
Path_Background = 'C:/Users\Bee/Desktop/_Background Items/'
Path_Objects = 'C:/Users/Bee/Desktop/taco_obj/'
Path_Save_Loc = 'C:/B_Folder/Desktop/Education/WPI/RBE_594_Capstone_Experience/Trash_Detection/Code_Research/TensorFlow-2.x-YOLOv3-master/train_testingDataGen/'
Generic_Img_Name = 'Generated_Trash_Img_'
Img_Format = '.jpg'
Images_Backgound = [Path_Background+img for img in os.listdir(Path_Background) if (img.endswith(".jpg") or img.endswith(".png"))]
Objects_SubDir = [Path_Objects+SubDir+"/" for SubDir in os.listdir(Path_Objects)]
#print("Objects_SubDir : ", Objects_SubDir)
#Images_Objects = [img for img in os.listdir(Objects_SubDir) if (img.endswith(".jpg") or img.endswith(".png"))]
Images_Objects = []
for directory in Objects_SubDir:
Sub_Obj_List = [directory+img for img in os.listdir(directory) if (img.endswith(".jpg") or img.endswith(".png"))]
Images_Objects.extend(Sub_Obj_List)
#print("\n\nImages_Objects : ", Images_Objects)
#print("\n\n")
MIN_OBJ_PLACED = 3
MAX_OBJ_PLACED = 9
MAX_OBJ_COUNT = len(Images_Objects)
SIZE_IMG_OUTPUT = 416
MAX_PERCENT_OBJ_SIZE = 0.4
#resize_percentage_list
RESIZE_PERCENTAGE_LIST = [0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]
MAX_OBJ_SIZE = SIZE_IMG_OUTPUT * MAX_PERCENT_OBJ_SIZE
#dictionary matching object class name and id
OBJ_CLASS_ID = {
"Aluminium_Foil" : 0,
"Battery" : 1,
"Aluminium_Blister_Pack" : 2,
"Carded_Blister_Pack" : 3,
"Other_Plastic_Bottle" : 4,
"Clear_Plastic_Bottle" : 5,
"Glass_Bottle" : 6,
"Plastic_Bottle_Cap" : 7,
"Metal_Bottle_Cap" : 8,
"Broken_Glass" : 9,
"Food_Can" : 10,
"Aerosol" : 11,
"Drink_Can" : 12,
"Toilet_Tube" : 13,
"Other_Carton" : 14,
"Egg_Carton" : 15,
"Drink_Carton" : 16,
"Corrugated_Carton" : 17,
"Meal_Carton" : 18,
"Pizza_Box" : 19,
"Paper_Cup" : 20,
"Disposable_Plastic_Cup" : 21,
"Foam_Cup" : 22,
"Glass_Cup" : 23,
"Other_Plastic_Cup" : 24,
"Food_Waste" : 25,
"Glass_Jar" : 26,
"Plastic_Lid" : 27,
"Metal_Lid" : 28,
"Other_Plastic" : 29,
"Magazine_Paper" : 30,
"Tissues" : 31,
"Wrapping_Paper" : 32,
"Normal_Paper" : 33,
"Paper_Bag" : 34,
"Plastified_Paper_Bag" : 35,
"Plastic_Film" : 36,
"Six_Pack_Rings" : 37,
"Garbage_Bag" : 38,
"Other_Plastic_Wrapper" : 39,
"Single-Use_Carrier_Bag" : 40,
"Polypropylene_Bag" : 41,
"Crisp_Packet" : 42,
"Spread_Tub" : 43,
"Tupperware" : 44,
"Disposable_Food_Container" : 45,
"Foam_Food_Container" : 46,
"Other_Plastic_Container" : 47,
"Plastic_Glooves" : 48,
"Plastic_Utensils" : 49,
"Pop_Tab" : 50,
"Rope_&_Strings" : 51,
"Scrap_Metal" : 52,
"Shoe" : 53,
"Squeezable_Tube" : 54,
"Plastic_Straw" : 55,
"Paper_Straw" : 56,
"Styrofoam_Piece" : 57,
"Unlabeled_Litter" : 58,
"Cigarette" : 59,
"Place_Holder_1" : 60,
"Place_Holder_2" : 61,
"Place_Holder_3" : 62,
"Place_Holder_4" : 63,
"Place_Holder_5" : 64,
"Place_Holder_6" : 65,
"Place_Holder_7" : 66,
"Place_Holder_8" : 67,
"Place_Holder_9" : 68,
"Place_Holder_10" : 69
}
#Open Text File
Text_File = open('Generated_Data_Train.txt', 'w')
#masking values
threshold=225
dist=6
#counter for potential statistics
total_num_Objects_placed = 0
for i in range(Total_Pics):
#Open Background Image and Resize to standardized sized for yolo training
img_primary = Image.open(Images_Backgound[i])
img_primary = img_primary.resize((SIZE_IMG_OUTPUT, SIZE_IMG_OUTPUT))
#Generate a random number of objects to be placed in the background image
rand_num_objs_placed = np.random.randint(MIN_OBJ_PLACED, MAX_OBJ_PLACED)
Annotations = ""
Objects_List = []
Objects_Max_Dim_List = []
#Generate List of object images to be pasted into a background image
for q in range(rand_num_objs_placed):
#Randomly select an object from the list to be added
rand_obj_num = np.random.randint(0,MAX_OBJ_COUNT)
#Find and Open Object Image
img_obj = Image.open(Images_Objects[rand_obj_num]).convert('RGBA')
#Randomly Rotate the input object image
rand_rot = np.random.randint(0,360)
img_obj = img_obj.rotate(rand_rot, expand = True)
#*temp resizing of img_obj
img_width, img_height = img_obj.size
#use bigger dimension to generate resizing percentage to create baseline size
if img_width >= (img_height):
standardized_sizing = MAX_OBJ_SIZE/img_width
else:
standardized_sizing = MAX_OBJ_SIZE/img_height
#generate standard width and height of object image
standardized_width = int(standardized_sizing * img_width)
standardized_height = int(standardized_sizing * img_height)
new_size = (standardized_width, standardized_height)
img_obj = img_obj.resize(new_size)
#*temp resize over
img_width, img_height = img_obj.size
#Randomly resize object image from standard (will be smaller or close to same)
random_resize = np.random.randint(0, len(RESIZE_PERCENTAGE_LIST))
#random_resize_percentage = RESIZE_PERCENTAGE_LIST[random_resize]
random_resize_percentage = np.random.uniform(0.05, 1.0)
new_width = int(random_resize_percentage * img_width)
new_height = int(random_resize_percentage * img_height)
new_size = (new_width, new_height)
img_obj = img_obj.resize(new_size)
#print("\n\nBEFORE Rotation Image Width, Image Height : ", new_size)
#track object size for annotations lists
xmin = np.random.randint(0, (SIZE_IMG_OUTPUT-new_width))
ymin = np.random.randint(0, (SIZE_IMG_OUTPUT-new_height))
xmax = xmin + new_width
ymax = ymin + new_height
#save relevant object data into list so it can be pasted by largest to smallest
#this is to alleviate issues with a smaller image being pasted into an image and
#and then subsequently having a larger object pasted over it and effectively "erasing" the object image
Objects_List.append(list((img_obj, xmin, ymin, xmax, ymax, rand_obj_num)) )
Objects_Max_Dim_List.append(max(new_width, new_height))
for q in range(rand_num_objs_placed):
#Find max dim image index and pull out relevant data from list and removing it from the list for next iteration
Max_Obj_Index = Objects_Max_Dim_List.index(max(Objects_Max_Dim_List))
max_dim = Objects_Max_Dim_List.pop(Max_Obj_Index)
obj_list = Objects_List.pop(Max_Obj_Index)
img_obj = obj_list[0]
xmin = obj_list[1]
ymin = obj_list[2]
xmax = obj_list[3]
ymax = obj_list[4]
rand_obj_num = obj_list[5]
img_spot = (xmin, ymin)
#Paste Objects into background from largest to smallest
#Convert White-Space in Obj Image into mask
arr=np.array(np.asarray(img_obj))
r,g,b,a=np.rollaxis(arr,axis=-1)
#mask out pixels that are exactly white
#mask=((r==255)&(g==255)&(b==255))
#mask out pixels that are white-ish
mask=((r>threshold)
& (g>threshold)
& (b>threshold)
& (np.abs(r-g)<dist)
& (np.abs(r-b)<dist)
& (np.abs(g-b)<dist)
)
arr[mask,3]=0
mask_img=Image.fromarray(arr,mode='RGBA')
#Paste Object image into background image
img_primary.paste(img_obj , img_spot, mask_img) # **************** img obj and spot need to be pulled from list
#Determine Object Class by parsing Object Folder
parse_obj_path = Images_Objects[rand_obj_num].split('/')
#print("parse_obj_path[5] : ",parse_obj_path[5])
#print("OBJ_CLASS_ID[parse_obj_path[5]] : ",OBJ_CLASS_ID[parse_obj_path[5]])
#Generate YOLO Annotations list for image
Annotations += Annotations + " " + str(xmin) + "," + str(xmin) + "," + str(xmin) + "," + str(xmin) + "," + str(OBJ_CLASS_ID[parse_obj_path[5]])
#Monitor Number of Objects Placed in Background Images ... for stats
total_num_Objects_placed += 1
#save the generated image
Path_Save = Path_Save_Loc + Generic_Img_Name + str(i) + Img_Format
img_primary.save(Path_Save)
#Write annotations and path to file for use in training
Text_File.writelines(Path_Save + Annotations + '\n')
Text_File.close()
print("\n\n", total_num_Objects_placed, " objects(s) were placed in ", Total_Pics, " background images\n") | amielf/RakerOne | Trash-Classifier/dataGen.py | dataGen.py | py | 9,753 | python | en | code | 0 | github-code | 13 |
33287870398 | import string
# tipo de dato:
numero = 3
print("El tipo es: " + str(type(numero)))
# tamaño del dato
print(len("es un texto")) # 11
print(len("345345")) # 6
print(len([34, 564, 23])) # 3
#string
nombre = "marianito"
print(nombre[0]) # p
print(nombre[1:3]) # ep
print(nombre[-1]) # e
#recorrer string 1
#nombre2 = "pepe";
#for i in range(0, len(nombre2)):
#print(nombre2[i]);
#recorrer string 2
#nombre3 = "pepe";
#for letra3 in nombre3:
#print(letra3);
#modificar strings
"texto".capitalize() # Texto
"TEXTO".lower() # texto
"texto".upper() # TEXTO
"Texto".swapcase() # tEXTO
#completar un string # Mi nombre es Pepe y tengo 90 años.
print ("Mi nombre es %s y tengo %i años." % ("Pepe", 90))
# Hola Pepe
texto = "Hola {0}"
print(texto.format("Pepe"))
# Especificamos un hueco, pero sin decir de que tipo ni la posicion, se deja ahi vacio y se rellena con lo del final
"Esto es un texto {} y un numero {}".format("asd", 2)
# Obtenemos un 2 del total de letras t que hay
"texto".count("t", 0, -1)
# Obtenemos un 0 que es el indice de la primera t
"texto".find("t", 0, -1) #EL -1 es el ultimo siempre
# Recorremos el string desde el INICIO y si encontramos un t obtenemos un True / Boolean
"texto".startswith("t", 0, -1)
# Recorremos el string desde el FINAL y si encontramos un t obtenemos un True / Boolean
"texto".endswith("t", 0, -1)
#COMPROBACION
"texto".isalnum() # True si el contenido cadena es alfanúmerica
#"texto".Isalpha() # True si el contenido cadena es alfabético
#"texto".Isdigit() # True si el contenido cadena es númerico
#REEMPLAZAR
"Esto es una prueba".replace("e", "A") # Esto as una pruAba
#UNIR
# Manzana - Pera - Plátano
simbolo = " - "
secuenciaTexto = ("Manzana", "Pera", "Plátano")
print(simbolo.join(secuenciaTexto))
#SEPARAR
print("Esto es un texto con espacios".split(" "))
["Esto", "es", "un", "texto", "con", "espacios"]
| lorena112233/pythonDay1 | opStrings.py | opStrings.py | py | 1,880 | python | es | code | 0 | github-code | 13 |
72660402577 | def testing_while():
i = 1
while i<100:
print(i)
i += 1
if i == 30:
print("breaking ... while")
break
def testing_color_for():
colors = ["white", "red", "blue"]
for color in colors:
print(color)
if color == "blue":
print("for will break whe the its color blue")
break
else:
print("Continue")
def testing_numbers_for():
seq_number = range(1, 11)
for number in seq_number:
print(number)
def testing_string_for():
characters = "some text"
for char in characters:
print(char)
def main():
testing_string_for()
if __name__ == '__main__':
main()
| lmokto/CoursePythonSummer | Fundamentals/iterations1.py | iterations1.py | py | 590 | python | en | code | 0 | github-code | 13 |
3658383992 | """Задача 2. 15 баллов
Тема Dict
Написать программу, которая подсчитывает количество символов в строке
и формирует dict в котором key = буква, value= количество их в слове:
Входная строка : 'Hillel school'
Результат : {'H': 1, 'i': 1, 'l': 3, 'e': 1, ' ': 1, 's': 1, 'c': 1, 'h': 1, 'o': 2}"""
# ask the user to enter text and create a list based on this
user_text: list = list(input("Print you sting"))
# create an empty dictionary
our_dictionary: dict = {}
# create an iterable variable based on input
for i in user_text:
# loop through the interpreted variables and the number of occurrences in them
our_dictionary[i] = int(user_text.count(i))
# output the final dictionary
print(our_dictionary)
| artiushenkoartem/hillel_hw_4_python | hillel_leson_4_task_2.py | hillel_leson_4_task_2.py | py | 864 | python | ru | code | 0 | github-code | 13 |
36322787583 |
from PIL import Image,ImageGrab
import time,pyautogui
initcolor=(255,255,255,255)
time.sleep(5)
while 1:
time.sleep(2)
box = (200,500,300,600)
img = ImageGrab.grab(box)
img = img.load()
color =img[50,50]
if color==initcolor:
print('kadun',time.time())
pyautogui.click(58, 703, 1, button='left')
time.sleep(1)
pyautogui.click(67,777,1, button='left')
else:
initcolor = color | initialencounter/code | Python/卡顿检测/虎牙.py | 虎牙.py | py | 444 | python | en | code | 0 | github-code | 13 |
37488902771 | # -*- coding: utf-8 -*-
"""
Created on Mon Mar 13 10:53:35 2017
@author: tih
"""
# ----------------------------- Method 1 --------------------------------
import pandas as pd
# Define the start en enddate
Startdate = '2016-12-01'
Enddate = '2016-12-31'
# Define the daily date range
Dates = pd.date_range(Startdate, Enddate, freq = 'D')
for Date in Dates:
Year = Date.year
Month = Date.month
Day = Date.day
name_CHIRPS_tif = 'chirps-v2.0.%d.%02d.%02d.tif' %(Year, Month, Day)
print(name_CHIRPS_tif)
# ----------------------------- Show glob -------------------------------
import os
import glob
# directory to the CHIRPS data
Input_folder = r'D:\Python_Training\CHIRPS'
# Change working directory
os.chdir(Input_folder)
# Get all files in directory
CHIRPSAllFiles = glob.glob('*.tif')
# Get all files starting with chirps
CHIRPSAllFiles1 = glob.glob('chirps*')
# Get all files with v2.0 in middle
CHIRPSAllFiles2 = glob.glob('*v2.0*')
# Get all files ending with .gz
CHIRPSAllFiles3 = glob.glob('*.gz')
# ----------------------------- Method 2 --------------------------------
import os
import glob
# directory to the CHIRPS data
Input_folder = r'D:\Python_Training\CHIRPS'
# Change working directory
os.chdir(Input_folder)
Files = glob.glob('*.tif')
for File in Files:
print(File)
| Olsthoorn/IHE-python-course-2017 | exercises/Mar14/Tim_Hessels/2_Variable_Names.py | 2_Variable_Names.py | py | 1,418 | python | en | code | 5 | github-code | 13 |
22890598259 | import sys
import pandas as pd
# Input binding elements from shell
input_file = sys.argv[1]
input_type = input_file.split(".")[-1]
input_len = len(input_file.split("/"))
assert input_type == "tab", "input file for found binding elements should be in tab format"
assert input_len != 1, "full path to an input file is required"
input_base = input_file.split(".")[0]
# Coordinates of QAPA 3`UTRs
inpath = "/path/to/qapa_hg19_3utrs.bed"
qapa_annotation = pd.read_csv(inpath, sep='\t', header = None)
bedfile = open(input_base + ".bed", "w")
with open(input_file, "r") as f:
i = 0
for l in f:
# Sequence ID of 3`UTR isoform
l_split = l.split(",")
sequence_id = ','.join(l_split[:-6])
motif_id = ','.join(l_split[-6:])
# Strand
strand = sequence_id.split(",")[-1].split("_")[6]
if i % 1000 == 0:
print(i)
# 3`UTR information: chromosome, coordinates
selector = qapa_annotation.iloc[:,3] == sequence_id
utr_info = qapa_annotation[selector].values[0]
chrom = utr_info[0]
utr_start = int(utr_info[1])
utr_end = int(utr_info[2])
# RBP element information: start, width
element_start = int(motif_id.split(",")[1])
element_width = len(motif_id.split(",")[4])
# Write coordinates of a binding element. Keep in mind
# bed files are in 0-based format.
if strand == "+":
coordinate_A = utr_start + element_start
coordinate_B = utr_start + element_start + element_width
bedfile.write("{0} {1} {2} {3}".format(chrom, coordinate_A, coordinate_B, l))
else:
coordinate_A = utr_end - element_start - element_width
coordinate_B = utr_end - element_start
bedfile.write("{0} {1} {2} {3}".format(chrom, coordinate_A, coordinate_B, l))
# Update index
i += 1
bedfile.close()
| JellisLab/translatome-neurodevo | binding_elements/conservation_cleaning/prepare_elements_bed.py | prepare_elements_bed.py | py | 1,924 | python | en | code | 2 | github-code | 13 |
30687396295 | '''
Given an integer array nums, find the contiguous subarray (containing at least one number)
which has the largest sum and return its sum.
Example:
Input: [-2,1,-3,4,-1,2,1,-5,4],
Output: 6
Explanation: [4,-1,2,1] has the largest sum = 6.
'''
'''
Intuition:
At first, I think the sub problem should look like: maxSubArray(int A[], int i, int j),
which means the maxSubArray for A[i: j]. In this way, our goal is to figure out
what maxSubArray(A, 0, A.length - 1) is. However, if we define the format of the sub
problem in this way, it's hard to find the connection from the sub problem to the
original problem(at least for me). In other words, I can't find a way to divided
the original problem into the sub problems and use the solutions of the sub problems
to somehow create the solution of the original one.
So I change the format of the sub problem into something like:
maxSubArray(int A[], int i), which means the maxSubArray for A[0:i ]
which must has A[i] as the end element. Note that now the sub problem's format
is less flexible and less powerful than the previous one because
there's a limitation that A[i] should be contained in that sequence and
we have to keep track of each solution of the sub problem to update the
global optimal value. However, now the connect between the sub problem &
the original one becomes clearer:
maxSubArray(A, i) = maxSubArray(A, i - 1) > 0 ? maxSubArray(A, i - 1) : 0 + A[i];
'''
class Solution(object):
def maxSubArray(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
n = len(nums)
if n == 0:
return 0
dp = {
0: nums[0]
}
max_sofar = dp[0]
for i in list(range(1,n)):
# get the previous largest subarray's sum
prev = dp[i-1] if dp[i-1] > 0 else 0
# the maximum sum that contains nums[i]
dp[i] = max(nums[i], prev+nums[i])
# get the maximum
max_sofar = max(max_sofar, dp[i])
return max_sofar
| kevinsu628/study-note | leetcode-notes/easy/array/53_maximum_subarray.py | 53_maximum_subarray.py | py | 2,079 | python | en | code | 0 | github-code | 13 |
6627740499 | import collections
from abc import ABC
from typing import Tuple, List, Dict
import numpy as np
class StepInformationProvider(ABC):
"""
This class calculates certain values which are used frequently in reward generators.
A single instance of this class can be shared between a set of (sub)generators
to prevent multiple calculations of costly intermediate results.
:param maze: (np.ndarray) two dimensional array defining the maze where 0 indicates passable terrain and 1 indicates an obstacle
:param goal: (list) A point coordinate in form [x, y] ([column, row]) defining the goal.
:param goal_range: (int) Range around the goal position which should be treated as 'goal reached'.
:param n_particles: (int) Total number of robots.
:param action_map: (dict) Map containing allowed actions.
"""
def __init__(
self,
maze: np.ndarray,
goal: Tuple[int, int],
goal_range: int,
n_particles: int,
action_map: Dict[int, Tuple[int, int]],
relative: bool = False,
):
self.goal_range = goal_range
self.n_particles = n_particles
self.initial_robot_locations = None
self.action_map = action_map
self.maze = maze
self.goal = goal
self.relative = relative
self.last_locations = None
self.last_action = None
self._cost = None
self._max_start_cost = None
self._max_cost = None
self._particle_cost = None
self._total_start_cost = None
self._total_cost = None
self._unique_particles = None
self._done = False
self._step_reward = 0.0
self._mean_cost = None
self._ep_len_estimate = None
self._convex_corners = None
def reset(self, locations):
self.initial_robot_locations = np.copy(locations)
self.last_locations = locations
self._done = False
if self.relative:
self._ep_len_estimate = None
self._total_start_cost = None
self._max_start_cost = None
def step(self, action, locations):
self._step_reward = 0.0
self.last_locations = locations
self.last_action = action
self._step_reset()
def stepped_generator(self, done, reward):
self._step_reward += reward
if done:
self._done = True
def _step_reset(self):
self._max_cost = None
self._particle_cost = None
self._total_cost = None
self._unique_particles = None
def _calculate_cost_map(self, maze, goal) -> np.ndarray:
"""
Calculates the cost map based on a given goal position via bfs
"""
queue = collections.deque([goal]) # [x, y] pairs in point notation order!
seen = np.zeros(maze.shape, dtype=int)
seen[goal[1], goal[0]] = 1
cost = np.zeros(maze.shape, dtype=int)
height, width = maze.shape
while queue:
x, y = queue.popleft()
for action in self.action_map.values():
x2, y2 = x + action[1], y + action[0]
if (
0 <= x2 < width
and 0 <= y2 < height
and maze[y2, x2] != 1
and seen[y2, x2] != 1
):
queue.append([x2, y2])
seen[y2, x2] = 1
cost[y2, x2] = cost[y, x] + 1
return cost
def _count_convex_corners(self) -> Tuple[int, int, int, int]:
"""
Calculates the number of convex corners
:return: (Tuple[int, int, int, int]) Tuple containing the number of convex corners for nw, ne, sw and se convex corners.
"""
nw = ne = sw = se = 0
for ix, iy in np.ndindex(self.maze.shape):
if self.maze[ix, iy] == 0:
if self.maze[ix + 1, iy] == 1 and self.maze[ix, iy + 1] == 1:
sw += 1
elif self.maze[ix + 1, iy] == 1 and self.maze[ix, iy - 1] == 1:
se += 1
elif self.maze[ix - 1, iy] == 1 and self.maze[ix, iy + 1] == 1:
nw += 1
elif self.maze[ix - 1, iy] == 1 and self.maze[ix, iy - 1] == 1:
ne += 1
return (nw, ne, sw, se)
def _count_freespace(self):
free = 0
idxes = np.argwhere(self.maze == 0)
for iy, ix in idxes:
if (self.maze[iy - 1 : iy + 1, ix - 1 : ix + 1] == 0).all():
free += 1
return free
def set_particle_count(self, n_particles):
self.n_particles = n_particles
self._total_start_cost = None
@property
def convex_corners(self) -> Tuple[int, int, int, int]:
if self._convex_corners is None:
self._convex_corners = self._count_convex_corners()
return self._convex_corners
@property
def costmap(self) -> np.ndarray:
if self._cost is None:
self._cost = self._calculate_cost_map(self.maze, self.goal)
return self._cost
@property
def max_start_cost(self) -> float:
if self._max_start_cost is None:
if self.relative:
self._max_start_cost = np.max(self.particle_cost)
else:
self._max_start_cost = np.max(self.costmap)
return self._max_start_cost
@property
def particle_cost(self) -> np.ndarray:
if self._particle_cost is None:
self._particle_cost = self.costmap.ravel()[
(
self.last_locations[:, 1]
+ self.last_locations[:, 0] * self.costmap.shape[1]
)
]
return self._particle_cost
@property
def episode_length_estimate(self) -> int:
if self._ep_len_estimate is None:
points = np.argwhere(self.maze == 0)
extremes = np.argmax(points, axis=0)
if np.sum(points[extremes[0]]) > np.sum(points[extremes[1]]):
extreme = points[extremes[0]]
else:
extreme = points[extremes[1]]
costmap = self._calculate_cost_map(self.maze, (extreme[1], extreme[0]))
self._ep_len_estimate = int(
0.75
* np.max(costmap)
* np.log(self.mean_cost * np.min(self.convex_corners))
)
return self._ep_len_estimate
@property
def mean_cost(self) -> int:
if self._mean_cost is None:
self._mean_cost = np.ma.masked_equal(self.costmap, 0).mean()
return self._mean_cost
@property
def total_start_cost(self) -> float:
if self._total_start_cost is None:
if self.relative:
self._total_start_cost = np.sum(self.particle_cost)
else:
self._total_start_cost = self.mean_cost * self.n_particles
return self._total_start_cost
@property
def total_cost(self) -> float:
if self._total_cost is None:
self._total_cost = np.sum(self.particle_cost)
return self._total_cost
@property
def max_cost(self) -> float:
if self._max_cost is None:
self._max_cost = np.max(self.particle_cost)
return self._max_cost
@property
def unique_particles(self) -> np.ndarray:
if self._unique_particles is None:
self._unique_particles = np.unique(self.last_locations, axis=0)
return self._unique_particles
@property
def is_relative(self):
return self.relative
@property
def is_done(self):
return self._done
@property
def step_reward(self):
return self._step_reward
class RewardGenerator(ABC):
"""
Base Class for reward generators for the maze environments
"""
def __init__(
self, information_provider: StepInformationProvider = None, scale: float = 1.0
):
self.calculator = None
self.generators = [] # type: List[RewardGenerator]
self.scale = scale
if information_provider:
self.set_information_provider(information_provider)
def set_information_provider(self, calculator: StepInformationProvider):
self.calculator = calculator
for generator in self.generators:
generator.set_information_provider(calculator)
def set_particle_count(self, n_particles):
self.calculator.set_particle_count(n_particles=n_particles)
def add_sub_generator(self, generator):
generator.set_information_provider(self.calculator)
self.generators.append(generator)
def reset(self, locations):
self.calculator.reset(locations)
self._reset(locations)
self._reset_generators(locations)
def _reset_generators(self, locations):
for generator in self.generators:
generator._reset(locations)
def _reset(self, locations):
pass
def step(self, action, locations) -> Tuple[bool, float]:
self.calculator.step(action, locations)
self.calculator.stepped_generator(*self._step(action, locations))
self._step_generators(action, locations)
if self.calculator.is_done:
end_reward = self._on_done()
self.calculator.stepped_generator(False, end_reward)
self._on_done_generators()
return self.calculator.is_done, self.calculator.step_reward
def _step(self, action, locations) -> Tuple[bool, float]:
return False, 0.0
def _on_done(self) -> float:
return 0.0
def _step_generators(self, action, locations) -> None:
for generator in self.generators:
self.calculator.stepped_generator(*generator._step(action, locations))
def _on_done_generators(self) -> None:
for generator in self.generators:
end_reward = generator._on_done()
self.calculator.stepped_generator(False, end_reward)
| NeoExtended/gym-gathering | gym_gathering/rewards/base_reward_generator.py | base_reward_generator.py | py | 9,960 | python | en | code | 1 | github-code | 13 |
9064571234 | # Assignment 4. Sage Hourihan
import hashlib, os
# Creating a function to collect input on the file name
def filename():
file = input("Type file name: ")
return file
def get_hash_of_binary_file_contents (file_path, algorithm = 'MD5'):
"""This function will read and hash the contents of a file.
:param file_path: The path to the file to be hashed.
:type file_path: str.
:param algorithm: The hashing algorithm to be used. Defaults to 'MD5'.
:type algorithm: str.
:returns: str -- The hash of the contents of the file.
"""
file_contents = read_binary_file(file_path)
file_hash = get_hash_of_string(file_contents, algorithm)
return file_hash
def get_hash_of_string (string, algorithm = 'MD5'):
if algorithm == 'MD5':
hash_object = hashlib.md5(string)
hash_digest = hash_object.hexdigest()
else:
hash_digest = ''
return hash_digest
def read_binary_file (file_path):
try:
file_object = open(file_path,'rb')
file_contents = file_object.read()
except:
raise
return file_contents
def print_hash():
printHash = input("Would you like to print the hash?: ")
if printHash == "Yes" or printHash == "yes":
print(get_hash_of_binary_file_contents(x))
else:
print("Goodbye")
# Calling the functions
x = filename()
get_hash_of_binary_file_contents(x)
#get_hash_of_string(b"insert string here")
read_binary_file(x)
print_hash()
| SageHourihan/File-hasher | hash.py | hash.py | py | 1,454 | python | en | code | 0 | github-code | 13 |
35594106928 | """
1.11. Naming a Slice
slice(start, stop, step)创建了一个分割器
slice.indices(len)限定stop的长度
"""
li = range(20)
# res0和res1等价
res0 = li[slice(1, 15)]
res1 = li[1 : 15]
#print(res)
#print(li[SLICE])
sl = slice(1, 10, 2)
#print(sl.start, sl.stop, sl.step)
#
s = "HelloWorld"
sl.indices(len(s))
"""
1.12. Determining the Most Frequently Occurring Items in a Sequence
# Collections.Counter is a dictionary that maps the items to the number of occurrences.
"""
words = [
'look', 'into', 'my', 'eyes', 'look', 'into', 'my', 'eyes',
'the', 'eyes', 'the', 'eyes', 'the', 'eyes', 'not', 'around', 'the',
'eyes', "don't", 'look', 'around', 'the', 'eyes', 'look', 'into',
'my', 'eyes', "you're", 'under'
]
from collections import Counter
word_counts = Counter(words)
top_three = word_counts.most_common(3)
#print(top_three)
#
morewords = ['why','are','you','not','looking','in','my','eyes']
#for word in morewords:
# word_counts[word] += 1
# or
#word_counts.update(morewords)
#
a = Counter(words)
b = Counter(morewords)
c = a + b
d = a - b
"""
1.13. Sorting a List of Dictionaries by a Common Key
You have a list of dictionaries and you would like to sort the entries according to one or more of the dictionary values.
# 根据一个或多个特定的关键字进行排序
# 使用operator.itemgetter()
"""
rows = [
{'fname': 'Brian', 'lname': 'Jones', 'uid': 1003},
{'fname': 'David', 'lname': 'Beazley', 'uid': 1002},
{'fname': 'John', 'lname': 'Cleese', 'uid': 1001},
{'fname': 'Big', 'lname': 'Jones', 'uid': 1004}
]
from operator import itemgetter
rows_by_fname = sorted(rows, key=itemgetter('fname'))
rows_by_uid = sorted(rows, key=itemgetter('uid'))
#print(rows_by_fname)
#print(rows_by_uid)
"""
1.14. Sorting Objects Without Native Comparison Support
You want to sort objects of the same class, but they don’t natively support comparison operations.
"""
class User:
def __init__(self, user_id):
self.user_id = user_id
def __repr__(self):
return 'User({})'.format(self.user_id)
users = [User(23), User(3), User(99)]
from operator import attrgetter
sorted(users, key=attrgetter('user_id'))
# 与itemgetter相似,也支持多关键字比较。
# by_name = sorted(users, key=attrgetter('last_name', 'first_name'))
"""
1.15. Grouping Records Together Based on a Field
通过某个字段将记录分组
"""
rows = [
{'address': '5412 N CLARK', 'date': '07/01/2012'},
{'address': '5148 N CLARK', 'date': '07/04/2012'},
{'address': '5800 E 58TH', 'date': '07/02/2012'},
{'address': '2122 N CLARK', 'date': '07/03/2012'},
{'address': '5645 N RAVENSWOOD', 'date': '07/02/2012'},
{'address': '1060 W ADDISON', 'date': '07/02/2012'},
{'address': '4801 N BROADWAY', 'date': '07/01/2012'},
{'address': '1039 W GRANVILLE', 'date': '07/04/2012'},
]
from operator import itemgetter
from itertools import groupby
rows.sort(key = itemgetter('date'))
#for date, items in groupby(rows, key = itemgetter('date')):
# print(date)
# for i in items:
# print(' ', i)
"""
1.16. Filtering Sequence Elements
"""
mylist = [1, 4, -5, 10, -7, 2, 3, -1]
# list comprehension
res = [i for i in mylist if i > 0]
# generator expression
pos = (i for i in mylist if i > 0)
#for x in pos:
# print(x)
# filter()
values = ['1', '2', '-3', '-', '4', 'N/A', '5']
def is_int(val):
try:
x = int(val)
return True
except ValueError:
return False
ivals = list(filter(is_int, values))
#print(ivals)
# itertools.compress()
addresses = [
'5412 N CLARK',
'5148 N CLARK',
'5800 E 58TH',
'2122 N CLARK'
'5645 N RAVENSWOOD',
'1060 W ADDISON',
'4801 N BROADWAY',
'1039 W GRANVILLE',
]
counts = [ 0, 3, 10, 4, 1, 7, 6, 1]
from itertools import compress
more5 = [n > 5 for n in counts]
#print(list(compress(addresses, more5)))
"""
1.17. Extracting a Subset of a Dictionary
dictionary comprehension
"""
prices = {'ACME': 45.23, 'AAPL': 612.78, 'IBM': 205.55, 'HPQ': 37.20, 'FB': 10.75}
p1 = {key : value for key, value in prices.items() if value < 200}
tech_names = { 'AAPL', 'IBM', 'HPQ', 'MSFT' }
p2 = { key:value for key,value in prices.items() if key in tech_names }
"""
1.18. Mapping Names to Sequence Elements
# collections.namedtuple() is actually a factory method that returns a subclass of the standard Python tuple type.
# reflection
# One possible use of a namedtuple is as a replacement for a dictionary, which requires
more space to store. Thus, if you are building large data structures involving dictionaries,
use of a namedtuple will be more efficient. However, be aware that unlike a dictionary,
_replace() method makes an entirely new namedtuple with specified values replaced.
a namedtuple is immutable.
"""
from collections import namedtuple
Subscriber = namedtuple('Subscriber', ['addr', 'joined'])
sub = Subscriber('jonesy@example.com', '2012-10-19')
#print(sub.addr, sub.joined)
"""
1.19. Transforming and Reducing Data at the Same Time
You need to execute a reduction function (e.g., sum(), min(), max()), but first need to
transform or filter the data.
"""
nums = [1, 2, 3, 4, 5]
s = sum((x * x for x in nums)) # Pass generator-expr as argument
s = sum([x * x for x in nums]) # generate a temporary list as argument
s = sum(x * x for x in nums) # More elegant syntax
# Certain reduction functions such as min() and max() accept a key argument that might
# be useful in situations where you might be inclined to use a generator.
portfolio = [
{'name':'GOOG', 'shares': 50},
{'name':'YHOO', 'shares': 75},
{'name':'AOL', 'shares': 20},
{'name':'SCOX', 'shares': 65}
]
# Original: Returns 20
min_shares = min(s['shares'] for s in portfolio)
# Alternative: Returns {'name': 'AOL', 'shares': 20}
min_shares = min(portfolio, key=lambda s: s['shares'])
"""
1.20. Combining Multiple Mappings into a Single Mapping
# collections.ChainMap takes multiple mappings and makes them logically appear as one. However,
the mappings are not literally merged together.
# the duplicate key would always refer to the first dict which contains the key
"""
from collections import ChainMap
a = {'x': 1, 'z': 3 }
b = {'y': 2, 'z': 4 }
c = ChainMap(a, b)
#print(c['x'], c['y'], c['z'])
# ChainMap.new_child() add a new mapping
# ChainMap.parents discard last mapping
values = ChainMap()
values['x'] = 1
values = values.new_child()
values['x'] = 2
values = values.new_child()
values['x'] = 3
#print(values)
values = values.parents
#print(values)
# different from dict.update()
# if any of the original dictionaries mutate, the changes don’t get reflected
# in the merged dictionary. A ChainMap uses the original dictionaries, so it
# doesn’t have this behavior
| hanhansoul/PythonCookbook | sec_chapter01/chpt11.py | chpt11.py | py | 6,773 | python | en | code | 0 | github-code | 13 |
270861704 | from glob import glob
def get_activations(model, model_inputs, print_shape_only=False, layer_name=None):
import keras.backend as K
print('----- activations -----')
activations = []
inp = model.input
model_multi_inputs_cond = True
if not isinstance(inp, list):
# only one input! let's wrap it in a list.
inp = [inp]
model_multi_inputs_cond = False
outputs = [layer.output for layer in model.layers if
layer.name == layer_name or layer_name is None] # all layer outputs
funcs = [K.function(inp + [K.learning_phase()], [out]) for out in outputs] # evaluation functions
if model_multi_inputs_cond:
list_inputs = []
list_inputs.extend(model_inputs)
list_inputs.append(1.)
else:
list_inputs = [model_inputs, 1.]
# Learning phase. 1 = Test mode (no dropout or batch normalization)
# layer_outputs = [func([model_inputs, 1.])[0] for func in funcs]
layer_outputs = [func(list_inputs)[0] for func in funcs]
for layer_activations in layer_outputs:
activations.append(layer_activations)
if print_shape_only:
print(layer_activations.shape)
else:
print(layer_activations)
return activations
if __name__ == '__main__':
checkpoints = glob('checkpoints/*.h5')
# pip3 install natsort
from natsort import natsorted
from keras.models import load_model
if len(checkpoints) > 0:
checkpoints = natsorted(checkpoints)
assert len(checkpoints) != 0, 'No checkpoints found.'
checkpoint_file = checkpoints[-1]
print('Loading [{}]'.format(checkpoint_file))
model = load_model(checkpoint_file)
model.compile(optimizer='adam',
loss='mse ',
metrics=['accuracy'])
print(model.summary())
get_activations(model, x_test[0:1], print_shape_only=True) # with just one sample.
get_activations(model, x_test[0:200], print_shape_only=True) # with 200 samples. | akash13singh/lstm_anomaly_thesis | print_activations.py | print_activations.py | py | 2,053 | python | en | code | 221 | github-code | 13 |
7313219835 | MOD = 998244353
n = int(input())
cards = [list(map(int, input().split())) for _ in range(n)]
dp = [[0, 0] for _ in range(n)]
dp[0] = [1, 1]
for i in range(1, n):
for pre in range(2):
for nex in range(2):
if cards[i - 1][pre] != cards[i][nex]: # 前のカードのオモテウラが違うなら
dp[i][nex] += dp[i - 1][pre] # 次のdpに配っていく
dp[i][0] %= MOD
dp[i][1] %= MOD
print((dp[n - 1][1] + dp[n - 1][0]) % MOD) # オモテウラの場合の数を足し合わせる
| sugimotoyuuki/kyopro | contest/ABC/291/d.py | d.py | py | 536 | python | en | code | 0 | github-code | 13 |
34003249107 | from products.models import mobiles
# print(len(mobiles))
# print([mob.get("name") for mob in mobiles])
# print([mob.get("brand") for mob in mobiles])
# mobiles.sort(key=lambda m:m.get("price"),reverse=True)
# print(mobiles)
# costly_mobiles=max(mobiles,key=lambda m:m.get("price"))
# print(costly_mobiles)
# cheap=min(mobiles,key=lambda m:m.get("price"))
# print(cheap)
lst=[
[10,11],
[20,30],
[40,50]
]
a=max([n for sublist in lst for n in sublist])
print(a) | mhdsulaimzed/Pycharm-Practice | pythonDjango/products/views.py | views.py | py | 473 | python | en | code | 0 | github-code | 13 |
1955094724 | """Module with grid utils"""
import numpy as np
def read_grid(file_path):
"""Read grid from file"""
return np.loadtxt(file_path, delimiter=",", dtype=int)
def get_grid_size(grid):
"""Get grid size"""
return len(grid)
def get_grid_min_path_sum(grid):
"""Get grid minimum path sum"""
size = get_grid_size(grid)
min_path_sum = np.inf
min_matrix = np.zeros((size, size), dtype=int)
# Dynamic Programming
# We can move in four directions
for i in range(size):
min_matrix[i][0] = grid[i][0]
for j in range(1, size):
for i in range(size):
min_matrix[i][j] = min_matrix[i][j - 1] + grid[i][j]
for i in range(1, size):
min_matrix[i][j] = min(
min_matrix[i][j], min_matrix[i - 1][j] + grid[i][j]
)
for i in range(size - 2, -1, -1):
min_matrix[i][j] = min(
min_matrix[i][j], min_matrix[i + 1][j] + grid[i][j]
)
# find minimum path sum
for i in range(size):
min_path_sum = min(min_path_sum, min_matrix[i][size - 1])
return min_path_sum
| KubiakJakub01/ProjectEuler | src/utils/grid/grid.py | grid.py | py | 1,126 | python | en | code | 0 | github-code | 13 |
6820648365 | """
Title: Driver
Description: For running the NEML drivers
Author: Janzen Choi
"""
# Libraries
from neml import drivers
from moga_neml.helper.experiment import NEML_FIELD_CONVERSION
from moga_neml.helper.general import BlockPrint
from moga_neml.optimise.curve import Curve
from moga_neml.models.__model__ import __Model__
# General Driver Constants
TIME_HOLD = 11500.0 * 3600.0
NUM_STEPS = 500
REL_TOL = 1e-6
ABS_TOL = 1e-10
MAX_STRAIN = 1.0
VERBOSE = False
NUM_STEPS_UP = 50
DAMAGE_TOL = 0.95 # 0.95
STRESS_RATE = 0.0001
CYCLIC_RATIO = -1
# Driver class
class Driver:
def __init__(self, curve:Curve, model:__Model__) -> None:
"""
Initialises the driver class
Parameters:
* `curve`: The curve the driver is being used on
* `model`: The model to be run
"""
self.curve = curve
self.exp_data = curve.get_exp_data()
self.type = self.exp_data["type"]
self.model = model
self.conv_dict = NEML_FIELD_CONVERSION[self.type]
def run(self) -> dict:
"""
Runs the driver based on the experimental curve type;
returns the results
"""
# Get the results
try:
with BlockPrint():
results = self.run_selected()
except:
return
# Convert results and return
converted_results = {}
for field in list(self.conv_dict.keys()):
if field in results.keys():
converted_results[self.conv_dict[field]] = results[field]
return converted_results
def run_selected(self) -> dict:
"""
Runs the driver depending on the data type;
returns the results
"""
# Runs custom driver if it is defined
custom_driver, custom_driver_kwargs = self.curve.get_custom_driver()
if custom_driver != None:
custom_driver = getattr(drivers, custom_driver)
results = custom_driver(self.model, **custom_driver_kwargs)
return results
# Runs driver based on data type
if self.type == "creep":
return self.run_creep()
elif self.type == "tensile":
return self.run_tensile()
elif self.type == "cyclic":
return self.run_cyclic()
raise ValueError(f"The data type '{self.type}' is not supported; use the 'custom_driver' function to define a custom driver")
def run_creep(self) -> dict:
"""
Runs the creep driver;
returns the results
"""
results = drivers.creep(self.model, self.exp_data["stress"], STRESS_RATE, TIME_HOLD,
T=self.exp_data["temperature"], verbose=VERBOSE, check_dmg=True,
dtol=DAMAGE_TOL, nsteps_up=NUM_STEPS_UP, nsteps=NUM_STEPS, logspace=False)
return results
def run_tensile(self) -> dict:
"""
Runs the tensile driver;
returns the results
"""
results = drivers.uniaxial_test(self.model, erate=self.exp_data["strain_rate"], T=self.exp_data["temperature"],
emax=MAX_STRAIN, check_dmg=True, dtol=DAMAGE_TOL, nsteps=NUM_STEPS,
verbose=VERBOSE, rtol=REL_TOL, atol=ABS_TOL)
return results
def run_cyclic(self) -> dict:
"""
Runs the cyclic driver;
returns the results
"""
num_cycles = int(self.exp_data["num_cycles"])
results = drivers.strain_cyclic(self.model, T=self.exp_data["temperature"], emax=self.exp_data["max_strain"],
erate=self.exp_data["strain_rate"], verbose=VERBOSE, R=CYCLIC_RATIO,
ncycles=num_cycles, nsteps=NUM_STEPS)
return results
| ACME-MG/moga_neml | moga_neml/optimise/driver.py | driver.py | py | 4,042 | python | en | code | 0 | github-code | 13 |
71984728017 | #SWEA 문제 해결 기본 5176번 이진탐색
'''
1-N까지의 자연수를 이진탐색 트리에 저장
이진탐색 트리의 특성을 이용해서 풀어보기
2**n 으로 커지는 개수 특성
'''
import sys
sys.stdin = open("input.txt", "r")
def inorder(node):
global order
if node != 0:
# visit
inorder(tree[node][0])
order.append(node)
inorder(tree[node][1])
T = int(input())
for test_case in range(1,T+1):
N = int(input()) # 정점의 개수
tree = [[0,0,0] for i in range(N+1)] # [왼쪽 자식 노드, 오른쪽 자식 노드, 부모 노드]
name = 1
order = [0] # 1-N까지 들어가는순서 & 인덱스 사용할거라 0을 넣어줌
# 완전 이진 트리 제작
for i in range(1, N+1):
if tree[i][0] == 0: # i의 왼쪽 자식 노드가 0이라면
name += 1 # name 1 더함
if name > N: # name이 목표치를 넘으면 for문 끝냄
break
tree[i][0] = name # tree에 왼쪽에 더함
tree[name][2] = i # 부모노드는 i
if tree[i][1] == 0: # i의 오른쪽 자식 노드가 0이라면
name += 1 # name 1 더함
if name > N: # 목표치를 넘으면 for문 끝냄
break
tree[i][1] = name # tree의 오른쪽에 이름 추가
tree[name][2] = i # 부모노드는 i
inorder(1)
# print(tree)
# print(order) # [0, 8, 4, 2, 5, 1, 6, 3, 7]
# root는 order에서 1의 인덱스
# order에서 N//2 = 4 의 인덱스가 원래 노드
print(f'#{test_case}', order.index(1), order.index(N//2)) | euneuneunseok/TIL | SWEA/SWEA기본_5176_이진탐색_Tree.py | SWEA기본_5176_이진탐색_Tree.py | py | 1,796 | python | ko | code | 0 | github-code | 13 |
73857262096 | # -*- coding: utf-8 -*-
"""
Created on Tue Jan 29 11:52:05 2013
@author: Radek
"""
from Tkinter import *
hlavni=Tk()
hodnota=IntVar()
hodnota.set(100)
def Nastav(value):
l["text"]=str(value)
w = Scale(from_=0, to=1000, variable=hodnota,command=Nastav, label="Stupnice")
w.pack()
l= Label(hlavni,text="0")
l.pack()
mainloop()
"""
Úkol:
1) Pomocí tří posuvníků s rozsahem (0-255) a tlačítka
nastavte barvu komponenty Frame.
2) Přidejte tlačítko, které nastaví na posuvnících
náhodnou barvu.
""" | AskoldH/PRG | Stejskal's notes/79 Tkinter - Scale.py | 79 Tkinter - Scale.py | py | 567 | python | cs | code | 0 | github-code | 13 |
20415362392 | import time
import pandas as pd
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.keys import Keys
if __name__ == '__main__':
companyInformations = pd.DataFrame(columns=["Firma", "Na giełdzie od", "Liczba wyemitowanych akcji",
"Wartość rynkowa (mln zł)",
"Nazwa na GPW", "Skrót: 11B", "Nazwa pełna", "Adres siedziby",
"Województwo",
"Prezes Zarządu", "Numer telefonu", "Numer faksu",
"Strona www", "E-mail",
"Przynależność do indeksu", "Kurs ostatni", "Zmiana",
"Oferta kupna",
"Oferta sprzedaży", "Min. cena akcji", "Max. cena akcji",
"Wol. obrotu (szt.)",
"Wart. obrotu", "Data debiutu i kurs debiutu",
"Max historyczny (52 tyg.)",
"Min historyczny (52 tyg.)", "ISIN", "Rynek/Segment", "Sektor",
"Liczba wyemitowanych akcji", "Wartość rynkowa",
"Wartość księgowa", "C/WK", "C/Z",
"Stopa dywidendy (%)", "Akcjonariusze", "Dodatek"])
companiesData = pd.read_csv('dane.csv')
options = webdriver.ChromeOptions()
# options.headless = True
options.add_argument("disable-blink-features=AutomationControlled")
options.add_experimental_option("detach", True)
# options.add_argument ("user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/106.0.0.0 Safari/537.36")
service = Service('C:\Program Files (x86)\chromedriver.exe')
driver = webdriver.Chrome(service=service, options=options)
driver.implicitly_wait(0.5)
driver.get("https://www.gpw.pl/spolka?isin=PLAB00000019#")
# searchFied = driver.find_element(By.NAME,'searchText')
# searchFied.send_keys('AB SPÓŁKA AKCYJNA')
# searchFied.send_keys(Keys.RETURN)
company = 'AB SPÓŁKA AKCYJNA'
# time.sleep(10)
# company = driver.find_element(By.CLASS_NAME,'name')
# company.click()
allData = []
allData.append(company)
for i in range(1,7):
if i == 4 or i == 5:
continue
quotes = driver.find_element(By.XPATH,f'//*[@id="stateTabs"]/li[{i}]/a')
quotes.click()
# time.sleep(5)
driver.get(driver.current_url)
soup = BeautifulSoup(driver.page_source,'lxml')
table = soup.find('table','footable table')
if i == 6:
tds = table.find_all('td',class_='left')
shareholderList = ''
for shareholder in tds:
shareholderList += shareholder.text
shareholderList += ', '
allData.append(shareholderList)
else:
tds = table.find_all('td')
for element in tds:
if element != '\n':
element = element.text.replace('\t','').replace('\n','').replace(u'\xa0', u'').replace(' ','').strip()
allData.append(element)
allData = [allData]
companyInformations = pd.DataFrame(allData,columns=["Firma", "Na giełdzie od", "Liczba wyemitowanych akcji", "Wartość rynkowa (mln zł)",
"Nazwa na GPW", "Skrót: 11B", "Nazwa pełna", "Adres siedziby", "Województwo",
"Prezes Zarządu", "Numer telefonu", "Numer faksu", "Strona www", "E-mail",
"Przynależność do indeksu", "Kurs ostatni", "Zmiana", "Oferta kupna",
"Oferta sprzedaży", "Min. cena akcji", "Max. cena akcji", "Wol. obrotu (szt.)",
"Wart. obrotu", "Data debiutu i kurs debiutu", "Archiwalne (wg. cen zamknięcia sesji)","Max historyczny (52 tyg.)",
"Min historyczny (52 tyg.)", "ISIN", "Rynek/Segment", "Sektor",
"Liczba wyemitowanych akcji", "Wartość rynkowa", "Wartość księgowa", "C/WK", "C/Z",
"Stopa dywidendy (%)", "Akcjonariusze"],index=["Firma"])
# companyInformations.reset_index(inplace=True, drop=True)
# companiesData = pd.concat([companiesData, companyInformations], axis=0)
companyInformations.to_csv('dane.csv', encoding='utf-8-sig', mode='w', index=False)
# print(companiesData)
| przemekdan1/Forbes-web-scraping | scrapeData.py | scrapeData.py | py | 5,222 | python | pl | code | 0 | github-code | 13 |
35472669763 | from django.test import TestCase, Client
from django.contrib.auth import get_user_model
from http import HTTPStatus
from posts.models import Post, Group
User = get_user_model()
class PostURLTest(TestCase):
@classmethod
def setUpClass(cls) -> None:
super().setUpClass()
cls.user = User.objects.create_user(username='auth')
cls.group = Group.objects.create(
title='Тестовый заголовок',
slug='Тестовый слаг',
description='Тестовое описание'
)
cls.post = Post.objects.create(
text='Тестовый текст поста',
pub_date='Тестовая дата публикации',
author=cls.user,
)
cls.post_id = cls.post.id
cls.urls = {
'/': HTTPStatus.OK,
f'/group/{cls.group.slug}/': HTTPStatus.OK,
f'/posts/{cls.post_id}/': HTTPStatus.OK,
f'/profile/{cls.user.username}/': HTTPStatus.OK,
'/create/': HTTPStatus.OK,
f'/posts/{cls.post_id}/edit/': HTTPStatus.OK
}
def setUp(self) -> None:
# Создание неавторизованного пользователя
self.user = User.objects.create_user(username='NoName')
# Создание авторизованного пользователя
self.authorized_client = Client()
self.authorized_client.force_login(self.user)
def test_guest_client(self):
'''Какие страницы доступны неавторизованному пользователю '''
urls = self.urls
self.urls['/create/'] = HTTPStatus.FOUND
self.urls[f'/posts/{self.post_id}/edit/'] = HTTPStatus.FOUND
for url, response_code in urls.items():
with self.subTest(url=url):
status_code = self.client.get(url).status_code
self.assertEqual(response_code, status_code)
def test_authorized_client(self):
'''Какие страницы доступны авторизованному пользователю '''
# Если пользователь авторизован,
# и является автором поста - должны быть доступны все страницы
if self.post.author == self.user.username:
urls = self.urls
for url, response_code in urls.items():
with self.subTest(url=url):
status_code = self.authorized_client.get(url).status_code
self.assertEqual(response_code, status_code)
# Если пользователь не является автором поста
# должна быть недоступна страница редактирования поста
else:
self.urls[f'/posts/{self.post_id}/edit/'] = HTTPStatus.FOUND
urls = self.urls
for url, response_code in urls.items():
with self.subTest(url=url):
status_code = self.authorized_client.get(url).status_code
self.assertEqual(response_code, status_code)
def test_guest_redirect_login(self):
'''Направит ли неавторизованного пользователя
на страницу авторизации при попытке перейти на
'/create/', '/posts/<post_id>/edit/' '''
urls = {
'/create/': '/auth/login/?next=/create/',
f'/posts/{self.post_id}/edit/': '/auth/login/?next=/posts/1/edit/'
}
for url, template in urls.items():
with self.subTest(url=url):
response = self.client.get(url)
self.assertRedirects(response, template)
| Stanislav-Gutnikov/hw04_tests | yatube/posts/tests/test_urls.py | test_urls.py | py | 3,867 | python | ru | code | 0 | github-code | 13 |
37296086175 | # Sky Hoffert
# Utility stuff for ENEE623 Project.
import socket
import sys
import threading
PORT_TX_TO_CH = 5000
PORT_CH_TO_RX = 5001
Fs = 44100
def Log(s, end="\n"):
sys.stdout.write(s)
sys.stdout.write(end)
sys.stdout.flush()
def ConstellationToXY(c):
xs = []
ys = []
for pt in c:
xs.append(pt["I"])
ys.append(pt["Q"])
return (xs,ys) | skyhoffert/ENEE623_Project | util.py | util.py | py | 385 | python | en | code | 0 | github-code | 13 |
29860363860 | from ..database.db import Database
from .AiModel import ChatGPT
import json
class ChapterModel():
def fetch_all(story_id):
db = Database.open()
chapters = db.execute("SELECT * FROM chapter WHERE story_id = ? ORDER BY id", (story_id,)).fetchall()
return chapters
def fetch_one(id):
db = Database.open()
chapter = db.execute("SELECT * FROM chapter WHERE id = ?", (id,)).fetchone()
return chapter
def create_one(story_id, title, brief):
db = Database.open()
return_code = db.execute("INSERT INTO chapter (story_id, title, body, brief) VALUES (?, ?, ?, ?)", (story_id, title, brief, brief))
db.commit()
return return_code
def generate_one(story_id):
stories = ChapterModel.fetch_all(story_id=story_id)
book_summary = "BOOK START: {"
for story in stories:
book_summary += "\"" + str(story['id']) + "\":\"" + story['brief'] + "\","
book_summary += "} :BOOK END"
with open('src/static/prompt.txt', 'r') as file:
instruction = file.read()
prompt = book_summary + " " + instruction
response = ChatGPT.get_response(prompt=prompt)
response = response.replace("\n", " ")
response_dict = json.loads(response)
db = Database.open()
return_code = db.execute("INSERT INTO chapter (story_id, title, body, brief) VALUES (?, ?, ?, ?)", (story_id, response_dict['title'], response_dict['body'], response_dict['brief']))
db.commit()
return return_code
def update_one(id, title, body, brief):
db = Database.open()
return_code = db.execute("UPDATE chapter SET title = ?, body = ?, brief = ? WHERE id = ?", (title, body, brief, id))
db.commit()
return return_code
def delete_all(story_id):
db = Database.open()
return_code = db.execute("DELETE FROM chapter WHERE story_id = ?", (story_id,))
db.commit()
return return_code
def delete_one(id):
db = Database.open()
return_code = db.execute("DELETE FROM chapter WHERE id = ?", (id,))
db.commit()
return return_code | lundchristian/flask_api_v3 | src/model/ChapterModel.py | ChapterModel.py | py | 2,180 | python | en | code | 0 | github-code | 13 |
70061782099 | import socket
import sys
Dict = {}
#1. Insertion and update
#(a) The HTTP request method should be POST and the value to be inserted (or
#updated) constitutes the content body. There should also be a Content
#Length header indicating the number of bytes in the content body.
#(b) The server should respond with a 200 OK status code after inserting or up
#dating the value. The client always expects success statuses for insertions
#and updates.
def callPost(key, value):
Dict[key] = value
return "200 OK ".encode()
#2. Retrieval
#(a) The HTTP request method should be GET, and there is no content body.
#(b) If the key does not exist in the key-value store, the server returns a 404
#NotFound status code. Otherwise, the server should return a 200 OK code,
#the correct Content-Length header and the value data in the content body.
def callGet(key):
if(key in Dict):
contentBody = Dict[key]
contentLength = "content-length " + str(len(contentBody))
encodedmesg = "200 OK " + contentLength +" "
return encodedmesg.encode() + contentBody
else:
return "404 NotFound ".encode()
#3. Deletion
#(a) The HTTP request method is DELETE, and there is no content body.
#(b) If the key does not exist, the server returns a 404 NotFound code. Otherwise,
#it should delete the key-value pair from the store and respond with a 200 OK
#code and the deleted value string as the content body. The Content-Length
#header should also be sent accordingly.
def callDel(key):
if(key in Dict):
contentBody = Dict.pop(key,None)
contentLength = "content-length " + str(len(contentBody))
encodedmesg = "200 OK " + contentLength +" "
return encodedmesg.encode() + contentBody
else:
return "404 NotFound ".encode()
#gets user input
portNum = sys.argv[1]
#creates socket
soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Bind the socket to the port
server_address = ('',int(portNum))
print("starting up on" , server_address, "on port" , portNum)#for debugging
soc.bind(server_address)
# Listen for incoming connections
soc.listen(1)
connection, client_address = soc.accept()
sentence = connection.recv(1024)
while True:
# Wait for a connection
n=0
front = ""
while(sentence[n] != 32 or sentence[n+1] != 32 ): #gets the decodable part
n = n+1;
if(n == len(sentence)):
sentence = sentence + connection.recv(1024) #recover more in case it comes in 2 batches
front = (sentence[0:n]).decode() #decode it
print("front:",front)
data = front.split(" ") # 0 = httptype, 1= key, 2 = optional content header , 3 = numberofChar, 4 = message
method = (data[0]).lower() #ensure case insensitivity
keyPrefix = "/key/"
key = data[1].partition(keyPrefix)[2] #gets the key after prefix
print("The method: " + method)
print("key :", key)
#get rid of dummies here
if(method == "post"):
count = 2
while(count<len(data)):
if((data[count]).lower() == "content-length"):
lengthOfMessage = int(data[count + 1])
break
count = count +2 #skips pairs
print("The length: " + str(lengthOfMessage))
#serve the port
if(method == "get"):
returnData = callGet(key)
sentence = sentence[n+2:]
elif(method =="post"):
nextmsg = b''
sentenceMax = len(sentence)
if(sentenceMax - (n+2) > lengthOfMessage): #if it overshots
message = (sentence[n+2:n + 2 +lengthOfMessage])
sentence = sentence[n+2 +lengthOfMessage:] #incase the nextmsg stole the part of the header
else: #it does not overshot
message = (sentence[n+2:])
sentence = b''
while True:
if(len(message) < lengthOfMessage):
nextmsg = connection.recv(1024)
sentenceMax = len(nextmsg)
lenOfcurrent = len(message)
if(sentenceMax - (n+2) > lengthOfMessage): #if it overshots
message = message + (nextmsg[lenOfcurrent:lenOfcurrent +lengthOfMessage])
missingHeader = lenOfcurrent +lengthOfMessage
sentence = nextmsg[missingHeader:] #incase the nextmsg stole the part of the header
else: #it does not overshot
message = message + (nextmsg[lenOfcurrent:])
sentence = b''
else:
break
message = message + nextmsg #append the message
returnData = callPost(key,message) #submit post
elif(method == "delete"):
returnData = callDel(key)
sentence = sentence[n+2:]
else:
print("wrong comman FIAKED")
returnData = "404 NotFound".encode()
sentence = sentence[n+2:]
# Clean up the connection
print("THis is the leftover sentence" , sentence.decode())
connection.send(returnData)
if(len(sentence)==0): #will close socket if timeout error
check = connection.recv(1024) #check again
if(len(check) == 0):
connection.close()
print("connection close suc")
soc.listen(1)
connection, client_address = soc.accept()
print("connection openned suc")
sentence = connection.recv(1024)
else:
print("check is " , check.decode())
sentence = check | Deunitato/CS2105_Assignments | cs2105_assignment_1/test/WebServer-A0185403J.py | WebServer-A0185403J.py | py | 5,437 | python | en | code | 0 | github-code | 13 |
42045283758 | #!/usr/bin/env python3
import sys
import heapq
import collections
sys.setrecursionlimit(10 ** 8)
read = sys.stdin.buffer.read
readline = sys.stdin.buffer.readline
readlines = sys.stdin.buffer.readlines
H, W, T = map(int, readline().split())
grid = []
start = goal = None
for r in range(H):
s = readline().decode("utf-8").rstrip()
assert len(s) == W
grid.append(s)
for c in range(W):
if s[c] == "S":
assert start is None
start = (r, c)
elif s[c] == "G":
assert goal is None
goal = (r, c)
def check(x):
q = []
heapq.heappush(q, (0, start))
mind = collections.defaultdict(lambda: T + 1)
while q:
d, pos = heapq.heappop(q)
if d > T:
return False
if pos == goal:
return True
if mind[pos] <= d:
continue
mind[pos] = d
r, c = pos
for (rdelta, cdelta) in [(1, 0), (-1, 0), (0, 1), (0, -1)]:
rn = r + rdelta
cn = c + cdelta
if (0 <= rn < H) and (0 <= cn < W):
nxt = grid[rn][cn]
if nxt == "#":
dn = d + x
else:
dn = d + 1
if dn < mind[(rn, cn)]:
heapq.heappush(q, (dn, (rn, cn)))
return False
def solve():
ok, ng = 1, T + 1
while ng - ok > 1:
m = (ng + ok) // 2
if check(m):
ok = m
else:
ng = m
return ok
if __name__ == "__main__":
print(solve())
| keijak/comp-pub | atcoder/abc020/C/main.py | main.py | py | 1,562 | python | en | code | 0 | github-code | 13 |
37199477605 | import numpy as np
import pandas as pd
import random as rand
from tqdm import tqdm
from collections import Counter
# ======================================================================================================================
class Node:
def __init__(self, data):
self.data = data
self.left = None
self.right = None
self.parent = None
self.height = 1
self.counter = 0
class Tree:
def __init__(self, root):
self.root = root
self.nil = None
def insert(T, root, node):
if node.data < root.data:
if root.left:
insert(T, root.left, node)
else:
node.parent = root
root.left = node
else:
if root.right:
insert(T, root.right, node)
else:
node.parent = root
root.right = node
root.height = max(root.left.height if root.left else 0,
root.right.height if root.right else 0) + 1
# ======================================================================================================================
def generate_access_sequence():
keys = np.array(list(range(1, 1001)))
rand.shuffle(keys)
a = np.array([rand.randint(1, 100) for _ in range(1000)])
c = np.append(np.array([0]), np.cumsum(a))
s = []
A = c[-1]
for _ in range(10000):
j = rand.randint(1, A)
for i in range(1, len(c)):
if c[i - 1] < j <= c[i]:
s.append(keys[i - 1])
_keys, _ = zip(*sorted(list(Counter(s).items()), key=lambda x: x[0]))
_keys, _probabilities = zip(
*sorted(list(Counter(s).items()) + [(key, 0) for key in list(set(keys) - set(_keys))], key=lambda x: x[0]))
_probabilities = list(np.array(_probabilities, dtype=float))
res = {
'keys': keys,
'a': a,
'c': c,
's': s,
'p': _probabilities,
'A': A,
'n': 1000,
}
return res
# ======================================================================================================================
def left_rotate(T, x, cost):
cost['rotations'] += 1
y = x.right
x.right = y.left
if y.left != T.nil:
y.left.parent = x
y.parent = x.parent
if x.parent is T.nil:
T.root = y
elif x == x.parent.left:
x.parent.left = y
else:
x.parent.right = y
y.left = x
x.parent = y
x.height = max(x.left.height if x.left else 0,
x.right.height if x.right else 0) + 1
y.height = max(y.left.height if y.left else 0,
y.right.height if y.right else 0) + 1
def right_rotate(T, x, cost):
cost['rotations'] += 1
y = x.left
x.left = y.right
if y.right != T.nil:
y.right.parent = x
y.parent = x.parent
if x.parent == T.nil:
T.root = y
elif x == x.parent.left:
x.parent.left = y
else:
x.parent.right = y
y.right = x
x.parent = y
x.height = max(x.left.height if x.left else 0,
x.right.height if x.right else 0) + 1
y.height = max(y.left.height if y.left else 0,
y.right.height if y.right else 0) + 1
# splay.
# ======================================================================================================================
def splay(T, x, cost):
while x.parent is not None:
if x.parent.parent is None:
# L case
if x == x.parent.left:
right_rotate(T, x.parent, cost)
# R case
elif x == x.parent.right:
left_rotate(T, x.parent, cost)
# LL
elif x == x.parent.left and x.parent == x.parent.parent.left:
right_rotate(T, x.parent.parent, cost)
right_rotate(T, x.parent, cost)
# RR
elif x == x.parent.right and x.parent == x.parent.parent.right:
left_rotate(T, x.parent.parent, cost)
left_rotate(T, x.parent, cost)
# LR
elif x == x.parent.right and x.parent == x.parent.parent.left:
left_rotate(T, x.parent, cost)
right_rotate(T, x.parent, cost)
# RL
else:
right_rotate(T, x.parent, cost)
left_rotate(T, x.parent, cost)
def splay_search(T, root, data, cost):
if root:
cost['depth'] += 1
if root.data == data:
splay(T, root, cost)
return
if data < root.data:
splay_search(T, root.left, data, cost)
else:
splay_search(T, root.right, data, cost)
# obst.
# ======================================================================================================================
def obst(p, q, n):
p = np.array(p)
q = np.array(q)
e = np.zeros((n + 2, n + 1))
w = np.zeros((n + 2, n + 1))
root = np.zeros((n + 1, n + 1))
for i in range(1, n + 2):
e[i][i - 1] = q[i - 1]
w[i][i - 1] = q[i - 1]
for l in tqdm(range(1, n + 1), total=n, desc="OBST "):
for i in range(1, n - l + 2):
j = i + l - 1
e[i][j] = float('inf')
w[i][j] = w[i][j - 1] + p[j] + q[j]
for r in range(i, j + 1):
t = e[i][r - 1] + e[r + 1][j] + w[i][j]
if t < e[i][j]:
e[i][j] = t
root[i][j] = r
return e, root
# Move to root.
# ======================================================================================================================
def mtr(T, x, cost):
while x.parent is not None:
# L case
if x == x.parent.left:
right_rotate(T, x.parent, cost)
# R case
elif x == x.parent.right:
left_rotate(T, x.parent, cost)
def mtr_search(T, root, data, cost):
if root:
cost['depth'] += 1
if root.data == data:
mtr(T, root, cost)
return
if data < root.data:
mtr_search(T, root.left, data, cost)
else:
mtr_search(T, root.right, data, cost)
# dynamic monotone.
# ======================================================================================================================
def counter(x):
if x:
return x.counter
else:
return 0
def dm(T, x, cost):
while x.parent is not None:
# L case
if x == x.parent.left and counter(x.parent) < counter(x):
right_rotate(T, x.parent, cost)
# R case
elif x == x.parent.right and counter(x.parent) < counter(x):
left_rotate(T, x.parent, cost)
else:
break
def dm_search(T, root, data, cost):
if root:
cost['depth'] += 1
if root.data == data:
root.counter += 1
dm(T, root, cost)
return
if data < root.data:
dm_search(T, root.left, data, cost)
else:
dm_search(T, root.right, data, cost)
# ======================================================================================================================
def main():
all_costs = []
runs = 10
for run in range(runs):
print("=" * 25, " RUN {} ".format(run + 1), "=" * 25)
res = generate_access_sequence()
T_splay = Tree(Node(res['keys'][0]))
T_mtr = Tree(Node(res['keys'][0]))
T_dm = Tree(Node(res['keys'][0]))
for i in range(1, len(res['keys'])):
insert(T_splay, T_splay.root, Node(res['keys'][i]))
insert(T_mtr, T_mtr.root, Node(res['keys'][i]))
insert(T_dm, T_dm.root, Node(res['keys'][i]))
# OBST
p = res['p']
p.insert(0, 0)
e, root = obst(p, [0 for _ in range(1000 + 1)], res['n'])
# Splay.
splay_cost = {'depth': [], 'rotations': []}
cost = {'rotations': 0, 'depth': 0}
for i in tqdm(res['s'], total=len(res['s']), desc='SPLAY'):
splay_search(T_splay, T_splay.root, i, cost)
splay_cost['depth'].append(cost['depth'])
splay_cost['rotations'].append(cost['rotations'])
cost = {'rotations': 0, 'depth': 0}
# Move to root.
mtr_cost = {'depth': [], 'rotations': []}
cost = {'rotations': 0, 'depth': 0}
for i in tqdm(res['s'], total=len(res['s']), desc="MTR "):
mtr_search(T_mtr, T_mtr.root, i, cost)
mtr_cost['depth'].append(cost['depth'])
mtr_cost['rotations'].append(cost['rotations'])
cost = {'rotations': 0, 'depth': 0}
# Dynamic monotone.
dm_cost = {'depth': [], 'rotations': []}
cost = {'rotations': 0, 'depth': 0}
for i in tqdm(res['s'], total=len(res['s']), desc="DM "):
dm_search(T_dm, T_dm.root, i, cost)
dm_cost['depth'].append(cost['depth'])
dm_cost['rotations'].append(cost['rotations'])
cost = {'rotations': 0, 'depth': 0}
print("Static optimal BST cost = {}.".format(e[1][1000]))
print("Splay cost = {}.".format(
sum(splay_cost['depth'] + splay_cost['rotations'])))
print("Move to root cost = {}.".format(
sum(mtr_cost['depth'] + mtr_cost['rotations'])))
print("Dynamic monotone cost = {}.".format(
sum(dm_cost['depth'] + dm_cost['rotations'])))
all_costs.append([
e[1][1000],
sum(splay_cost['depth'] + splay_cost['rotations']),
sum(mtr_cost['depth'] + mtr_cost['rotations']),
sum(dm_cost['depth'] + dm_cost['rotations'])
])
all_costs.append([
"S.C.R",
sum(splay_cost['depth'] + splay_cost['rotations']) / e[1][1000],
sum(mtr_cost['depth'] + mtr_cost['rotations']) / e[1][1000],
sum(dm_cost['depth'] + dm_cost['rotations']) / e[1][1000]
])
pd.DataFrame(all_costs, columns=['OBST', 'Splay', 'MTR', 'DM']).to_csv(
'all_costs_02.csv', index=False)
if __name__ == '__main__':
main()
| hbatta/self-organizing-data-structures | source.py | source.py | py | 10,052 | python | en | code | 0 | github-code | 13 |
40306968202 | import tkinter as tk
from tkinter import simpledialog
from tkinter import filedialog
import numpy as np
import tables as pt
class PlotElement(tk.Frame):
def __init__(self, masterFrame, spwnd, r_var, val, comment, color, plot):
super().__init__(masterFrame.frame, highlightthickness=1, highlightbackground="blue")
self.color = color
self.is_hidden = False
self.spwnd = spwnd
self.plot = plot
self.masterFrame = masterFrame
self.comment = comment
self.grid_columnconfigure(0, weight=0)
self.grid_columnconfigure(1, weight=0)
self.grid_columnconfigure(2, weight=1)
self.grid_columnconfigure(3, weight=0)
self.btnHide = tk.Button(self, text=" ", command=self.hide, bg=color, activebackground=color)
self.btnHide.grid(column=0, row=0, padx=4, pady=0, sticky="ew")
self.rad = tk.Radiobutton(self, variable=r_var, value=val)
self.rad.grid(column=1, row=0, sticky="ew")
self.lbl = tk.Label(self, text=comment)
self.lbl.grid(column=2, row=0, padx=4, sticky="ew")
self.lbl.bind("<Button-3>", self.popup_menu)
self.btnExpand = tk.Button(self, text="X", command=self.expand)
self.btnExpand.grid(column=3, row=0, padx=4, pady=0, sticky="ew")
self.menu = tk.Menu(self, tearoff=0)
self.menu.add_command(label="Expand", command=self.expand)
self.menu.add_command(label="Hide", command=self.hide)
self.menu.add_command(label="Comment", command=self.menu_comment)
self.menu.add_command(label="Delete", command=self.menu_delete)
self.menu.add_command(label="Export", command=self.menu_export)
def expand(self):
if not self.is_hidden:
self.spwnd.expand_plot(self.plot)
def hide(self):
if self.is_hidden:
self.btnHide.config(bg=self.color, activebackground=self.color)
else:
self.btnHide.config(bg='white', activebackground='white')
self.is_hidden = not self.is_hidden
self.spwnd.hide_plot(self.plot, self.is_hidden)
def menu_comment(self):
comment = simpledialog.askstring("Comment", "Commentary", parent=self.masterFrame, initialvalue=self.comment)
if comment:
self.comment = comment
self.lbl['text'] = comment
def menu_delete(self):
self.grid_forget()
self.masterFrame.delete(self)
def menu_export(self):
f = filedialog.asksaveasfilename(parent=self, defaultextension=".csv", )
if f:
x, y = self.spwnd.xy_plot(self.plot)
np.savetxt(f, (x, y), delimiter=',')
def popup_menu(self, event):
self.menu.tk_popup(event.x_root, event.y_root, 0)
class TableRow(pt.IsDescription):
comment = pt.StringCol(128)
x = pt.Int32Col(256)
y = pt.Int32Col(256)
class PlotListWnd(tk.Frame):
def __init__(self, mainframe, spwnd):
super().__init__(mainframe, highlightthickness=1, highlightbackground="blue")
self.spwnd = spwnd
self.list = []
self.r_var = tk.IntVar()
self.canvas = tk.Canvas(self, borderwidth=0, background="#ffffff")
self.frame = tk.Frame(self.canvas, background="#ffffff")
self.vsb = tk.Scrollbar(self, orient="vertical", command=self.canvas.yview)
self.canvas.configure(yscrollcommand=self.vsb.set)
self.vsb.pack(side="right", fill="y")
self.canvas.pack(side="left", fill="both", expand=True)
self.canvas.create_window((4, 4), window=self.frame, anchor="nw", tags="self.frame")
self.frame.bind("<Configure>", self.on_frame_configure)
def on_frame_configure(self, event):
self.canvas.configure(scrollregion=self.canvas.bbox("all"), width=self.frame.winfo_width())
def add_plot(self, plot, color, comment="Plot",):
r = len(self.list)
p = PlotElement(self, self.spwnd, self.r_var, r, comment=comment, color=color, plot=plot)
p.grid(column=0, row=r, sticky="ew")
self.r_var.set(r)
self.list.append(p)
def delete(self, pl):
self.list.remove(pl)
self.spwnd.del_plot(pl.plot)
def save(self):
n = len(self.list)
h5file = pt.open_file("spw.h5", mode="w", title="spectrum")
group = h5file.create_group("/", "simple", "Single spectrum")
table = h5file.create_table(group, "sp", TableRow, "spectrum")
sprow = table.row
for i in range(n):
x, y = self.spwnd.xy_plot(self.list[i].plot)
sprow['x'] = x
sprow['y'] = y
sprow['comment'] = self.list[i].comment
sprow.append()
table.flush()
h5file.close()
def restore(self):
try:
h5file = pt.open_file("spw.h5", mode="r", title="spectrum")
table = h5file.root.simple.sp
for pl in table.iterrows():
p, c = self.spwnd.add_plot(pl['x'], pl['y'])
self.add_plot(p, c, pl['comment'])
h5file.close()
except ValueError:
pass
except OSError:
print("Cannot open file")
| optotekhnika/pySpectrRPi | plotlistwnd.py | plotlistwnd.py | py | 5,161 | python | en | code | 0 | github-code | 13 |
38263786866 | import scrapy
import csv
import traceback
from bs4 import BeautifulSoup
ship_file = open('equasis_ship', 'w')
ship_csv = csv.writer(ship_file, delimiter = ",")
company_file = open('equasis_company', 'w')
company_csv = csv.writer(company_file, delimiter = ",")
ship_cols = [
"IMO number :",
"Name of ship :",
"Call Sign :",
"MMSI :",
"Gross tonnage :",
"DWT :",
"Type of ship :",
"Year of build :",
"Flag :",
"Status of ship :",
"Last update :"
]
company_cols = [
"ship_imo",
"company_imo",
"role",
"company_name",
"company_address",
"date_of_effect"
]
class LoginSpider(scrapy.Spider):
name = 'equasis.org'
start_urls = ['http://www.equasis.org/EquasisWeb/public/HomePage']
download_delay = .5
concurrent_requests = 1
def request_next_imo(self):
if (len(self.imos)>0):
imo = self.imos.pop()
return scrapy.FormRequest.from_response(
self.front_page_response,
formdata = {'j_email': 'lyonwj@gmail.com', 'j_password': 'YEBp9azWtj'},
callback = self.after_login,
meta = {'imo': imo},
dont_filter=True
)
def parse(self, response):
print('STARTING!')
ship_csv.writerow(ship_cols)
company_csv.writerow(company_cols)
self.imos = self.get_imos("/Users/niccdias/Desktop/GSP2/columbia-shipping/data/current_nk_flags.csv")
self.front_page_response = response
return self.request_next_imo()
def after_login(self, response):
imo = response.meta['imo']
print("LOGGED IN for IMO " + imo)
if 'Please, try again' in str(response.body):
self.logger.error("Login failed")
return
else:
return scrapy.Request("http://www.equasis.org/EquasisWeb/restricted/ShipSearch?fs=ShipSearch",
callback=self.make_imo_request,
meta = {'imo': imo},
dont_filter=True,
)
def make_imo_request(self, response):
imo = response.meta['imo']
yield scrapy.FormRequest.from_response(
response,
formdata = {'P_IMO': imo, 'P_PAGE': '1', 'Submit': "SEARCH"},
callback = self.parse_ship,
meta = {'imo': imo},
dont_filter=True,
)
def parse_ship(self, response):
imo = response.meta['imo']
print("PARSING SHIP " + imo)
try:
soup = BeautifulSoup(response.text,'lxml')
cols = []
for col in ship_cols:
if soup.find(text=col) == None:
cols.append("")
else:
cols.append(soup.find(text=col).parent.findNext('td').contents[0].strip())
if cols[0]=="":
cols[0]=imo
ship_csv.writerow(cols)
self.scrape_manager_row(imo, soup, 'lignej')
self.scrape_manager_row(imo, soup, 'ligneb')
except Exception as e:
print(e)
traceback.print_exc()
print(response)
print(response.text)
return self.request_next_imo()
def scrape_manager_row(self, imo, soup, color):
company_table = soup.find(text=" Management detail").parent.findNext('table').contents[0]
yellow_rows = company_table.findAll('tr', {'class':color})
for row in yellow_rows:
yellow = []
yellow.append(imo)
yellow_cells = row.contents
for cell in yellow_cells[:5]:
yellow.append(cell.contents[0].string.strip())
company_csv.writerow(yellow)
def get_imos(self, fileurl):
imos = []
with open (fileurl) as csvfile:
reader = csv.reader(csvfile, delimiter=";")
for row in reader:
imos.append(row[0])
print("Running with " + str(len(imos)) + " imos")
return imos
| niccdias/scrapers | equasis/equasis_ship.py | equasis_ship.py | py | 3,302 | python | en | code | 1 | github-code | 13 |
18346268089 | user_agent_name = 'Python HTTP Server'
# server_ip = 'amadeus.local'
server_ip = '192.168.43.29'
ports = range(8080, 8090)
import console
args = console.process_args()
if 'port' in args.keys():
ports = [args['port']] + list(ports)
| aeirya/homework-winter2021 | network/hw1/q3/py/args.py | args.py | py | 254 | python | en | code | 0 | github-code | 13 |
20985346392 | import re
import openpyxl
# Define the log file path
log_file_path = ""
# Define the Excel file path
excel_file_path = "extract-excel.xlsx"
# Regular expression for extracting date and time
datetime_pattern = r'(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2})' # Assuming datetime format is YYYY-MM-DD HH:MM:SS
# Regular expression for extracting a number before the word 'successful'
number_pattern = r'(\d+)\s+successful' # Assuming the number is followed by ' successful'
# Function to extract data from the log file based on TABLE_NAME column in Excel
def extract_data_from_log(log_file_path, excel_file_path):
with open(log_file_path, 'r') as file:
data = file.readlines()
first_record = re.search(datetime_pattern, data[0])
last_record = re.findall(datetime_pattern, data[-1])
decrypting_indexes = [i for i, line in enumerate(data) if 'Decrypting...' in line]
decrypting_record = None
for idx in decrypting_indexes:
if idx + 1 < len(data):
match = re.search(datetime_pattern, data[idx + 1])
if match:
decrypting_record = match.group(0)
break
last_successful = None
for line in reversed(data):
match = re.search(number_pattern, line)
if match:
last_successful = match.group(1)
break
return first_record.group(0), last_record[-1], decrypting_record, last_successful
# Function to update Excel file with extracted data
def update_excel_with_data(row_number,excel_file_path, first_datetime, last_datetime, decrypting_datetime, last_successful):
wb = openpyxl.load_workbook(excel_file_path)
sheet = wb.active
#sheet.cell(row=1, column=1).value = "First Record Date and Time"
sheet.cell(row=row_number, column=1).value = first_datetime
#sheet.cell(row=2, column=1).value = "Last Record Date and Time"
sheet.cell(row=row_number, column=7).value = last_datetime
#sheet.cell(row=3, column=1).value = "Decrypting Record Date and Time"
sheet.cell(row=row_number, column=6).value = decrypting_datetime
#sheet.cell(row=4, column=1).value = "Last Successful Number"
sheet.cell(row=row_number, column=5).value = last_successful
wb.save(excel_file_path)
print("Data successfully updated in Excel file.")
wb = openpyxl.load_workbook(excel_file_path)
sheet = wb.active
table_names = [sheet.cell(row=i, column=3).value for i in range(2, sheet.max_row + 1)]
print(table_names)
if 'REG_MAP' in table_names:
log_file_path = "example.log"
first_datetime, last_datetime, decrypting_datetime, last_successful = extract_data_from_log(log_file_path, excel_file_path)
# Update the Excel file with the extracted data
if first_datetime and last_datetime and decrypting_datetime and last_successful:
update_excel_with_data(2,excel_file_path, first_datetime, last_datetime, decrypting_datetime, last_successful)
if "asa" in table_names:
log_file_path = "example1.log"
first_datetime, last_datetime, decrypting_datetime, last_successful = extract_data_from_log(log_file_path, excel_file_path)
if first_datetime and last_datetime and decrypting_datetime and last_successful:
update_excel_with_data(3,excel_file_path, first_datetime, last_datetime, decrypting_datetime, last_successful)
if "start_map2" in table_names:
log_file_path = "example2.log"
first_datetime, last_datetime, decrypting_datetime, last_successful = extract_data_from_log(log_file_path, excel_file_path)
if first_datetime and last_datetime and decrypting_datetime and last_successful:
update_excel_with_data(4,excel_file_path, first_datetime, last_datetime, decrypting_datetime, last_successful)
if 'REG_MAP' not in table_names and "asa" not in table_names and "start_map2" not in table_names:
print("No records found with TABLE_NAME as REG_MAP in the Excel file.")
first_datetime, last_datetime, decrypting_datetime, last_successful= None, None, None, None
# Extract data from the log file based on TABLE_NAME column in Excel
| Thanushbj7/LogToExcel | final_extract.py | final_extract.py | py | 4,200 | python | en | code | 0 | github-code | 13 |
456861560 | N=int(raw_input())
a=(raw_input()).split()
b=[]
s=[]
for i in a:
b.append(int(i))
for i in range(0,len(b)):
s1=0
s2=0
c1=0
c2=0
for j in range(i+1,len(b)):
s1=s1+b[j]
c1=c1+1
if(c1>0):
s1=int(s1/(c1))
for k in range(0,i+1):
s2=s2+b[k]
c2=c2+1
if(c2>0):
s2=int(s2/(c2))
if(s1==s2):
print("yes")
break
else:
print("no")
| chanduvenkyteju/pythonprogramming | equal avg of 2 arrays .py | equal avg of 2 arrays .py | py | 427 | python | en | code | 0 | github-code | 13 |
31003301926 | from django.urls import path
from . import views
urlpatterns = [
path('', views.goal_list, name='goal_list'),
path('add/', views.goal_add, name='goal_add'),
path('<slug:slug>/detail/', views.goal_detail, name='goal_detail'),
path('<slug:slug>/edit/', views.goal_edit, name='goal_edit'),
path('<slug:slug>/delete/', views.goal_delete, name='goal_delete'),
path('<slug:slug>/task/add/', views.task_add, name='task_add'),
path('task/<int:task_id>/', views.task_delete, name='task_delete')
]
| alex1the1great/Roadmap | roadmap/urls.py | urls.py | py | 519 | python | en | code | 0 | github-code | 13 |
73011552978 | from os import getenv
# if env is prod read from env vars else read from local file
cred_dct = {}
ENV = getenv("ENV")
check_env = getenv("ORACLE_HOST")
if check_env:
cred_dct["HOST"] = check_env
cred_dct["USERNAME"] = getenv("ORACLE_USER")
cred_dct["PASSWORD"] = getenv("ORACLE_PASS")
cred_dct["SID"] = getenv("ORACLE_SID")
cred_dct["PORT"] = getenv("ORACLE_PORT")
print(f'connected to oracle db: {cred_dct["HOST"]}')
else:
cred_file = "/Users/spencer.trinhkinnate.com/Documents/security_files/oracle2"
with open(cred_file, "r") as f:
lines = f.readlines()
for line in lines:
str_split = line.split(",")
key = str_split[0].strip()
value = str_split[1].strip()
cred_dct[key] = value
if ENV == "DEV":
cred_dct["HOST"] = cred_dct["HOST-DEV"]
print(f'connected to oracle db: {cred_dct["HOST"] }')
| sktrinh12/dm-sar-view | backend/app/credentials.py | credentials.py | py | 908 | python | en | code | 0 | github-code | 13 |
21850272952 | import os
import multiprocessing as mp
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
import cartopy.crs as ccrs
import cartowik.conventions as ccv
import cartowik.decorations as cde
import cartowik.naturalearth as cne
import cartowik.shadedrelief as csr
import absplots as apl
import pismx.open
# Color palette
# -------------
# set color cycle to colorbrewer Paired palette
plt.rc('axes', prop_cycle=plt.cycler(color=plt.get_cmap('Paired').colors))
# Plotting methods
# ----------------
def draw_natural_earth(ax, mode='gs'):
"""Add Natural Earth geographic data vectors."""
edgecolor = '#0978ab' if mode == 'co' else '0.25'
facecolor = '#c6ecff' if mode == 'co' else '0.95'
kwargs = dict(ax=ax, scale='50m', zorder=0)
cne.add_rivers(edgecolor=edgecolor, **kwargs)
cne.add_lakes(edgecolor=edgecolor, facecolor=facecolor, **kwargs)
cne.add_coastline(edgecolor=edgecolor, linestyle='dashed', **kwargs)
def plot_visual(ax, run, time, mode='gs'):
"""Plot interpolated map-plane model output."""
# get interpolated sea level
with pismx.open.dataset('~/pism/input/dsl/specmap.nc') as ds:
dsl = ds.delta_SL.interp(age=-time/1e3, method='linear')
# plot interpolated model output
with pismx.open.visual(
run+'/ex.{:07.0f}.nc',
'~/pism/input/boot/cordillera.etopo1bed.hus12.5km.nc',
'~/pism/input/boot/cordillera.etopo1bed.hus12.1km.nc',
ax=ax, time=time, shift=120000) as ds:
ds.topg.plot.imshow(
ax=ax, add_colorbar=False, zorder=-1,
cmap=(ccv.ELEVATIONAL if mode == 'co' else 'Greys'),
vmin=(-4500 if mode == 'co' else 0), vmax=4500)
csr.add_multishade(
ds.topg.where(ds.topg >= dsl)-dsl,
ax=ax, add_colorbar=False, zorder=-1)
ds.topg.plot.contour(
ax=ax, colors=('#0978ab' if mode == 'co' else '0.25'),
levels=[dsl], linestyles='solid', linewidths=0.25, zorder=0)
ds.usurf.plot.contour(
levels=[lev for lev in range(0, 5000, 200) if lev % 1000 == 0],
ax=ax, colors=['0.25'], linewidths=0.25)
ds.usurf.plot.contour(
levels=[lev for lev in range(0, 5000, 200) if lev % 1000 != 0],
ax=ax, colors=['0.25'], linewidths=0.1)
ds.velsurf_mag.notnull().plot.contour(
ax=ax, colors=['0.25'], levels=[0.5], linewidths=0.25)
mappable = ds.velsurf_mag.plot.imshow(
ax=ax, add_colorbar=False, cmap='Blues',
norm=mcolors.LogNorm(1e1, 1e3), alpha=0.75)
# return mappable for unique colorbar
return mappable
def plot_series(tsax, twax, run, time):
"""Plot model output time series."""
rec = run.split('.')[-2].upper()
dtfile = '.3222.'.join(run.split('.')[-2:])
color = 'C1' if 'grip' in run else 'C5'
# plot temperature forcing
with pismx.open.dataset('~/pism/input/dt/'+dtfile+'.nc') as ds:
data = ds.delta_T[ds.time <= time]
tsax.plot(data, data.age, color=color, alpha=0.25)
tsax.text(data[-1], -time/1e3, '{:.1f}°C'.format(float(data[-1])),
ha='center', va='bottom', clip_on=True, color=color,
alpha=0.25)
# plot ice volume time series
with pismx.open.mfdataset(run+'/ts.???????.nc') as ds:
data = ds.slvol[ds.age >= -time/1e3]
twax.plot(data, data.age, color=color, label=rec)
twax.text(data[-1], -time/1e3, '{:.1f} m'.format(float(data[-1])),
ha='center', va='bottom', clip_on=True, color=color)
def draw(time):
"""Plot complete figure for given time."""
# initialize figure (108*1500/2700=60)
fig, grid = apl.subplots_mm(
ncols=2, nrows=1, sharex=True, sharey=True, figsize=(192, 108),
gridspec_kw=dict(left=0, right=0, bottom=0, top=0, wspace=192-135),
subplot_kw=dict(projection=ccrs.LambertConformal(
central_longitude=-95, central_latitude=49,
standard_parallels=(49, 77))))
cax = fig.add_axes_mm([135/2+10, 108-10, 192-135-20, 5])
tsax = fig.add_axes_mm([135/2+5, 10, 192-135-20, 108-40])
twax = tsax.twiny()
# prepare map axes
for i, ax in enumerate(grid):
ax.set_extent([-2500e3, -1000e3, 100e3, 2500e3], crs=ax.projection)
ax.spines['geo'].set_ec('none')
ax.plot([1-i, 1-i], [0, 1], transform=ax.transAxes, color='k', lw=2)
# for each record
for i, rec in enumerate(['GRIP', 'EPICA']):
ax = grid[i]
offset = [6.2, 5.9][i]
run = '~/pism/output/0.7.2-craypetsc/ciscyc4.5km.{:s}.{:04d}'
run = run.format(rec.lower(), round(offset*100))
# plot model output
mappable = plot_visual(ax, run, time)
plot_series(tsax, twax, run, time)
# add map elements
draw_natural_earth(ax)
cde.add_subfig_label(rec, ax=ax, loc='ne')
# add unique colorbar
fig.colorbar(mappable, cax=cax, format='%g', orientation='horizontal',
label=r'surface velocity ($m\,a^{-1}$)', extend='both')
# set time series axes properties
tsax.set_ylim(120.0, 0.0)
tsax.set_xlim(-9.5, 1.5)
tsax.set_xlabel('temperature change (°C)', color='0.75')
tsax.yaxis.tick_right()
tsax.yaxis.set_label_position('right')
tsax.tick_params(axis='x', colors='0.75')
tsax.grid(axis='x')
# set twin axes properties
twax.set_xlim(-1.5, 9.5)
twax.set_xlabel('ice volume (m sea level equivalent)')
# legend appears after a bit
if time >= -105000:
twax.legend(loc='lower right')
# remove spines
tsax.spines['left'].set_visible(False)
tsax.spines['right'].set_visible(False)
twax.spines['left'].set_visible(False)
twax.spines['right'].set_visible(False)
# add cursor
tsax.axhline(-time/1e3, c='0.25', lw=0.5)
tsax.set_yticks([120, -(time+1)/1e3, 0]) # mpl confused with two 0 ticks
tsax.set_yticklabels([
r'120$\,$000' if time >= -110000 else '',
'{:,d}\nyears ago'.format(-time).replace(',', r'$\,$'),
'0' if time <= -10000 else ''])
# return figure
return fig
# Figure saving
# -------------
def save_animation_frame(func, outdir, time, *args, **kwargs):
"""Save figure produced by func as animation frame if missing."""
# check if file exists
fname = os.path.join(outdir, '{:06d}.png').format(time+120000)
if not os.path.isfile(fname):
# assemble figure and save
print('plotting {:s} ...'.format(fname))
fig = func(time, *args, **kwargs)
fig.savefig(fname)
plt.close(fig)
# Main program
# ------------
def main():
"""Main program for command-line execution."""
# start and end of animation
start, end, step = -120000, -0, 100
# output frame directories
outdir = os.path.join(os.environ['HOME'], 'anim', 'anim_cordillera_dual')
# iterable arguments to save animation frames
iter_args = [(draw, outdir, t) for t in range(start+step, end+1, step)]
# create frame directory if missing
if not os.path.isdir(outdir):
os.mkdir(outdir)
# plot all frames in parallel
with mp.Pool(processes=4) as pool:
pool.starmap(save_animation_frame, iter_args)
if __name__ == '__main__':
main()
| juseg/cordillera | movies/anim_cordillera_dual.py | anim_cordillera_dual.py | py | 7,310 | python | en | code | 0 | github-code | 13 |
74718310736 | import os
from copy import deepcopy
from decimal import Decimal
from typing import Optional, Dict, Tuple, List, Set
from dataclasses import dataclass, field
import boto3
import marshy
from boto3.dynamodb.conditions import Not as DynNot, ConditionBase, Key
from botocore.exceptions import ClientError
from marshy.types import ExternalItemType
from persisty.attr.attr import Attr
from persisty.attr.generator.attr_value_generator_abc import AttrValueGeneratorABC
from persisty.errors import PersistyError
from persisty.impl.dynamodb.partition_sort_index import PartitionSortIndex
from persisty.search_filter.and_filter import And
from persisty.search_filter.exclude_all import EXCLUDE_ALL
from persisty.search_filter.search_filter_abc import SearchFilterABC
from persisty.attr.attr_filter import AttrFilter, AttrFilterOp
from persisty.batch_edit import BatchEdit
from persisty.batch_edit_result import BatchEditResult
from persisty.result_set import ResultSet
from persisty.search_filter.include_all import INCLUDE_ALL
from persisty.search_order.search_order import SearchOrder
from persisty.store.store_abc import StoreABC, T
from persisty.store_meta import StoreMeta
from persisty.util import filter_none, get_logger
from persisty.util.undefined import UNDEFINED
logger = get_logger(__name__)
def catch_client_error(fn):
def wrapper(*args, **kwargs):
try:
return fn(*args, **kwargs)
except ClientError as e:
raise PersistyError(e) from e
return wrapper
# pylint: disable=R0902
@dataclass(frozen=True)
class DynamodbTableStore(StoreABC[T]):
"""Store backed by a dynamodb table. Does not do single table design or anything of that nature."""
meta: StoreMeta
table_name: str
index: PartitionSortIndex
global_secondary_indexes: Dict[str, PartitionSortIndex] = field(
default_factory=dict
)
aws_profile_name: Optional[str] = None
region_name: Optional[str] = field(
default_factory=lambda: os.environ.get("AWS_REGION")
)
decimal_format: str = "%.9f"
max_local_search_size: int = None
def __post_init__(self):
if self.max_local_search_size is None:
object.__setattr__(self, "max_local_search_size", self.meta.batch_size * 5)
def get_meta(self) -> StoreMeta:
return self.meta
@catch_client_error
def create(self, item: T) -> T:
item = self._dump_create(item)
self._dynamodb_table().put_item(
Item=item,
ConditionExpression=DynNot(self.index.to_condition_expression(item)),
)
loaded = self._load(item)
return loaded
@catch_client_error
def read(self, key: str) -> Optional[T]:
table = self._dynamodb_table()
if not isinstance(key, str):
key = str(key)
key_dict = self.meta.key_config.to_key_dict(key)
response = table.get_item(Key=key_dict)
loaded = self._load(response.get("Item"))
return loaded
@catch_client_error
def read_batch(self, keys: List[str]) -> List[Optional[T]]:
assert len(keys) <= self.meta.batch_size
key_config = self.meta.key_config
resource = self._dynamodb_resource()
kwargs = {
"RequestItems": {
self.table_name: {
"Keys": [key_config.to_key_dict(key) for key in set(keys)]
}
}
}
results_by_key = {}
response = resource.batch_get_item(**kwargs)
for item in response["Responses"][self.table_name]:
loaded = self._load(item)
key = key_config.to_key_str(loaded)
results_by_key[key] = loaded
assert not response.get(
"UnprocessedKeys"
) # Batch size would have been greater than 16 Mb
return [results_by_key.get(key) for key in keys]
@catch_client_error
def update(
self, updates: T, precondition: SearchFilterABC = INCLUDE_ALL
) -> Optional[T]:
if precondition is not INCLUDE_ALL:
search_filter = precondition.lock_attrs(self.meta.attrs)
item = self.read(self.meta.key_config.to_key_str(updates))
if not search_filter.match(item, self.meta.attrs):
return None
updates = self._dump_update(updates)
key_dict = self.index.to_dict(updates)
table = self._dynamodb_table()
updates = {**updates}
updates.pop(self.index.pk)
if self.index.sk:
updates.pop(self.index.sk)
update = _build_update(updates)
response = table.update_item(
Key=key_dict,
ConditionExpression=self.index.to_condition_expression(key_dict),
UpdateExpression=update["str"],
ExpressionAttributeNames=update["names"],
ExpressionAttributeValues=update["values"],
ReturnValues="ALL_NEW",
)
loaded = self._load(response.get("Attributes"))
return loaded
def _update(
self,
key: str,
item: T,
updates: T,
search_filter: SearchFilterABC = INCLUDE_ALL,
) -> Optional[T]:
return self.update(updates, search_filter)
@catch_client_error
def delete(self, key: str) -> bool:
table = self._dynamodb_table()
key_dict = self.meta.key_config.to_key_dict(key)
response = table.delete_item(Key=key_dict, ReturnValues="ALL_OLD")
attributes = response.get("Attributes")
return bool(attributes)
def _delete(self, key: str, item: T) -> bool:
return self.delete(key)
@catch_client_error
def search(
self,
search_filter: SearchFilterABC = INCLUDE_ALL,
search_order: Optional[SearchOrder] = None,
page_key: Optional[str] = None,
limit: Optional[int] = None,
) -> ResultSet[T]:
if limit is None:
limit = self.meta.batch_size
assert limit <= self.meta.batch_size
search_filter = search_filter.lock_attrs(self.meta.attrs)
if search_order:
search_order.validate_for_attrs(self.meta.attrs)
if search_filter is EXCLUDE_ALL:
return ResultSet([])
index_name, index = self._get_index_for_search(search_filter, search_order)
key_filter, other_filter = _separate_index_from_filter(index, search_filter)
if other_filter:
(
filter_expression,
search_filter_handled_natively,
) = other_filter.build_filter_expression(self.meta.attrs)
else:
filter_expression = None
search_filter_handled_natively = True
query_args = filter_none(
{
"KeyConditionExpression": self._to_key_condition_expression(key_filter),
"IndexName": index_name,
"Select": "SPECIFIC_ATTRIBUTES",
"ProjectionExpression": ",".join(
a.name for a in self.meta.attrs if a.readable
),
"FilterExpression": filter_expression,
"ScanIndexForward": _get_scan_index_forward(index, search_order),
}
)
search_order_handled_natively = _is_search_order_handled_natively(
index, search_order
)
if search_order_handled_natively:
return self._search_native_order(
query_args,
index,
other_filter,
search_filter_handled_natively,
page_key,
limit,
)
return self._search_local_order(
query_args,
index,
other_filter,
search_filter_handled_natively,
search_order,
page_key,
limit,
)
# pylint: disable=R0913
def _search_native_order(
self,
query_args: Dict,
index: Optional[PartitionSortIndex],
search_filter: SearchFilterABC,
search_filter_handled_natively: bool,
page_key: Optional[str],
limit: int,
) -> ResultSet[T]:
if page_key:
query_args["ExclusiveStartKey"] = self.meta.key_config.to_key_dict(page_key)
table = self._dynamodb_table()
results = []
while True:
response = _get_search_response(table, index, query_args)
items = self._load_items(
response, search_filter, search_filter_handled_natively
)
results.extend(items)
if len(results) >= limit:
results = results[:limit]
next_page_key = self.meta.key_config.to_key_str(results[-1])
assert next_page_key != page_key # Paranoid prevent infinite loop!
return ResultSet(results, next_page_key)
last_evaluated_key = response.get("LastEvaluatedKey")
if not last_evaluated_key:
return ResultSet(results)
query_args["ExclusiveStartKey"] = last_evaluated_key
# pylint: disable=R0914
def _search_local_order(
self,
query_args: Dict,
index: Optional[PartitionSortIndex],
search_filter: SearchFilterABC,
search_filter_handled_natively: bool,
search_order: SearchOrder,
page_key: Optional[str],
limit: int,
) -> ResultSet[T]:
table = self._dynamodb_table()
results = []
while True:
response = _get_search_response(table, index, query_args)
items = self._load_items(
response, search_filter, search_filter_handled_natively
)
results.extend(items)
if len(items) > self.max_local_search_size:
raise PersistyError("sort_failed")
last_evaluated_key = response.get("LastEvaluatedKey")
if not last_evaluated_key:
results = list(search_order.sort(results))
key_config = self.meta.key_config
offset = 0
if page_key:
offset = next(
i + 1
for i, result in enumerate(results)
if key_config.to_key_str(result) == page_key
)
next_page_key = None
if len(results) > offset + limit:
next_page_key = key_config.to_key_str(results[offset + limit - 1])
results = results[offset : (offset + limit)]
return ResultSet(results, next_page_key)
query_args["ExclusiveStartKey"] = last_evaluated_key
@catch_client_error
def count(self, search_filter: SearchFilterABC = INCLUDE_ALL) -> int:
search_filter = search_filter.lock_attrs(self.meta.attrs)
if search_filter is EXCLUDE_ALL:
return 0
index_name, index = self._get_index_for_search(search_filter, None)
key_filter, other_filter = _separate_index_from_filter(index, search_filter)
if other_filter:
(
filter_expression,
search_filter_handled_natively,
) = other_filter.build_filter_expression(self.meta.attrs)
else:
filter_expression = None
search_filter_handled_natively = True
if not search_filter_handled_natively:
result = sum(1 for _ in self.search_all(search_filter))
return result
kwargs = filter_none(
{
"KeyConditionExpression": self._to_key_condition_expression(key_filter),
"IndexName": index_name,
"Select": "COUNT",
"FilterExpression": filter_expression,
}
)
table = self._dynamodb_table()
count = 0
while True:
if index:
response = table.query(**kwargs)
else:
response = table.scan(**kwargs)
count += response["Count"] # Items
last_evaluated_key = response.get("LastEvaluatedKey")
kwargs["ExclusiveStartKey"] = last_evaluated_key
if not last_evaluated_key:
return count
def _to_key_condition_expression(self, key_filter: Optional[AttrFilter]):
if not key_filter:
return
attr = next(a for a in self.meta.attrs if a.name == key_filter.name)
marshy.dump(key_filter.value, attr.schema.python_type)
value = marshy.dump(key_filter.value, attr.schema.python_type)
return Key(key_filter.name).eq(value)
def _edit_batch(
self, edits: List[BatchEdit], items_by_key: Dict[str, T]
) -> List[BatchEditResult]:
assert len(edits) <= self.meta.batch_size
results = []
key_config = self.meta.key_config
table = self._dynamodb_table()
with table.batch_writer() as batch:
for edit in edits:
if edit.create_item:
item = self._dump_create(edit.create_item)
batch.put_item(Item=item)
results.append(BatchEditResult(edit, True))
elif edit.update_item:
updates = edit.update_item
key = key_config.to_key_str(updates)
item = items_by_key[key]
to_put = {}
for attr in self.meta.attrs:
value = UNDEFINED
if attr.update_generator:
if attr.updatable:
value = attr.update_generator.transform(
getattr(updates, attr.name), updates
)
else:
value = attr.update_generator.transform(
UNDEFINED, updates
)
elif attr.updatable:
value = getattr(updates, attr.name)
if value is UNDEFINED:
value = getattr(item, attr.name)
else:
setattr(item, attr.name, value)
value = marshy.dump(value, attr.schema.python_type)
to_put[attr.name] = self._convert_to_decimals(value)
to_put = self._convert_to_decimals(to_put)
batch.put_item(Item=to_put)
edit.update_item = deepcopy(item) # In case of multi put
results.append(BatchEditResult(edit, True))
else:
key = key_config.to_key_dict(edit.delete_key)
batch.delete_item(Key=key)
results.append(BatchEditResult(edit, True))
return results
def _load(self, item) -> T:
if item is None:
return None
item = self._convert_from_decimals(item)
kwargs = {}
for attr in self.meta.attrs:
value = item.get(attr.name, UNDEFINED)
if attr.readable and value is not UNDEFINED:
# noinspection PyTypeChecker
kwargs[attr.name] = marshy.load(attr.schema.python_type, value)
result = self.meta.get_read_dataclass()(**kwargs)
return result
def _convert_from_decimals(self, item):
if isinstance(item, dict):
return {k: self._convert_from_decimals(v) for k, v in item.items()}
if isinstance(item, list):
return [self._convert_from_decimals(i) for i in item]
if isinstance(item, Decimal):
int_val = int(item)
if int_val == item:
return int_val
return float(item)
return item
def _dump_create(self, to_create: T):
result = {}
for attr in self.meta.attrs:
self._dump_attr(
to_create, attr, attr.creatable, attr.create_generator, result
)
return result
def _dump_update(self, to_update: T):
result = {}
for attr in self.meta.attrs:
self._dump_attr(
to_update, attr, attr.updatable, attr.update_generator, result
)
return result
def _dump_attr(
self,
item: T,
attr: Attr,
accepts_input: bool,
generator: Optional[AttrValueGeneratorABC],
target: ExternalItemType,
):
value = UNDEFINED
if accepts_input:
value = getattr(item, attr.name, UNDEFINED)
if generator:
value = generator.transform(value, item)
if value is not UNDEFINED:
value = marshy.dump(value, attr.schema.python_type)
target[attr.name] = self._convert_to_decimals(value)
def _convert_to_decimals(self, item):
if isinstance(item, dict):
return {k: self._convert_to_decimals(v) for k, v in item.items()}
if isinstance(item, list):
return [self._convert_to_decimals(i) for i in item]
if isinstance(item, float):
int_val = int(item)
if int_val == item:
return int_val
return Decimal(self.decimal_format % item)
return item
def _get_index_for_search(
self,
search_filter: SearchFilterABC,
search_order: Optional[SearchOrder],
) -> Tuple[Optional[str], Optional[PartitionSortIndex]]:
eq_attr_names = _get_top_level_eq_attrs(search_filter)
sort_attr_names = (
{s.attr for s in search_order.orders} if search_order else set()
)
name = None
score = _get_score_for_index(self.index, eq_attr_names, sort_attr_names)
index = self.index if score else None
for gsi_name, gsi in self.global_secondary_indexes.items():
gsi_score = _get_score_for_index(gsi, eq_attr_names, sort_attr_names)
if gsi_score > score:
name = gsi_name
score = gsi_score
index = gsi
if not score:
index = None
return name, index
def _dynamodb_table(self):
if hasattr(self, "_table"):
return self._table
resource = self._dynamodb_resource()
table = resource.Table(self.table_name)
object.__setattr__(self, "_table", table)
return table
def _dynamodb_resource(self):
if hasattr(self, "_resource"):
return self._resource
kwargs = filter_none(
{"profile_name": self.aws_profile_name, "region_name": self.region_name}
)
session = boto3.Session(**kwargs)
resource = session.resource("dynamodb")
object.__setattr__(self, "_resource", resource)
return resource
def _load_items(self, response, search_filter, search_filter_handled_natively):
items = [self._load(item) for item in response["Items"]]
if not search_filter_handled_natively:
items = [
item for item in items if search_filter.match(item, self.meta.attrs)
]
return items
def _build_update(updates: dict):
update_str = "set "
update_list = []
names = {}
values = {}
for k, v in updates.items():
update_list.append(f"#n_{k} = :v_{k}")
names[f"#n_{k}"] = k
values[f":v_{k}"] = v
update_str += ", ".join(update_list)
return {"str": update_str, "names": names, "values": values}
def _get_top_level_eq_attrs(search_filter: SearchFilterABC) -> List[str]:
if isinstance(search_filter, AttrFilter) and search_filter.op == AttrFilterOp.eq:
return [search_filter.name]
if isinstance(search_filter, And):
return [
f.name
for f in search_filter.search_filters
if isinstance(f, AttrFilter) and f.op == AttrFilterOp.eq
]
return []
def _separate_index_filters(
index: PartitionSortIndex, eq_filters: List[AttrFilter]
) -> Tuple[ConditionBase, SearchFilterABC, bool]:
index_filters = [f for f in eq_filters if f.name == index.pk]
value = index_filters[0].value
if value.__class__ not in (str, int, float, bool):
value = marshy.dump(value)
condition = Key(index.pk).eq(value)
non_index_filters = tuple(f for f in eq_filters if f.name != index.pk)
non_index_filter = None
if non_index_filters:
non_index_filter = And(non_index_filters)
handled = len(index_filters) == 1
return condition, non_index_filter, handled
def _get_score_for_index(
index: PartitionSortIndex, eq_attrs: Set[str], sort_attrs: Set[str]
):
if index.pk not in eq_attrs:
return 0
if index.sk in sort_attrs:
return 20
return 10
def _separate_index_from_filter(
index: Optional[PartitionSortIndex], search_filter: SearchFilterABC
) -> Tuple[Optional[AttrFilter], Optional[SearchFilterABC]]:
if not index:
return None, search_filter
if isinstance(search_filter, AttrFilter):
return search_filter, None
index_filter = None
filters = []
# noinspection PyUnresolvedReferences
for f in search_filter.search_filters:
if f.name == index.pk:
index_filter = f
else:
filters.append(f)
filter_expression = And(filters) if filters else None
return index_filter, filter_expression
def _get_scan_index_forward(
index: Optional[PartitionSortIndex], search_order: Optional[SearchOrder]
) -> Optional[bool]:
if search_order and _is_search_order_handled_natively(index, search_order):
return not search_order.orders[0].desc
def _is_search_order_handled_natively(
index: Optional[PartitionSortIndex], search_order: Optional[SearchOrder]
) -> bool:
if not search_order:
return True
if len(search_order.orders) > 1 or not index:
return False
return search_order.orders[0].attr == index.sk
def _get_search_response(table, index, query_args):
if index:
response = table.query(**query_args)
else:
response = table.scan(**query_args)
return response
| tofarr/persisty | persisty/impl/dynamodb/dynamodb_table_store.py | dynamodb_table_store.py | py | 22,182 | python | en | code | 1 | github-code | 13 |
20165630869 | '''
Created on Sep 27, 2017
@author: uhuhuh
'''
# Is this how "Class" work???
# Centers the program window
from Tkinter import *
class center:
#init and receives master widget
def __init__(self, master=None):
self.update_idletasks()
w = self.winfo_screenwidth()
h = self.winfo_screenheight()
size = tuple(int(_) for _ in self.geometry().split('+')[0].split('x'))
x = w/2 - size[0]/2
y = h/2 - size[1]/2
self.geometry("%dx%d+%d+%d" % (size + (x,y))) | wfSeg/pythonlearning | GUItutorial/TkinterLearning/CenterClass.py | CenterClass.py | py | 551 | python | en | code | 0 | github-code | 13 |
1191072852 | #!/usr/local/bin/python
# SlidingWindow.py
import wx
class SlidingWindow(wx.Frame):
def __init__(self, parent, title):
wx.Frame.__init__(self, parent, title = title, size = (1000, 800))
self.topPacketNum = 20
self.BottomPacketNum = 20
self.ID_TIMER = 1
self.windowSize = 5
self.sendStrategy = "GoBackN"
self.timeOut = 5
self.timeCount = 0
self.speed = 5
self.fly = []
self.fly_max = 0
# self.fly_signal = True
self.sendingPackets = []
self.backingPackets = []
self.initFrame()
def initFrame(self):
self.statusbar = self.CreateStatusBar()
self.statusbar.SetStatusText("Status: stop")
self.timer = wx.Timer(self, self.ID_TIMER)
self.initController()
# bind events
self.Bind(wx.EVT_PAINT, self.drawPacket)
self.Bind(wx.EVT_TIMER, self.OnTimer, id = self.ID_TIMER)
self.Bind(wx.EVT_BUTTON, self.OnClick, self.cbtn1)
self.createPacket()
# show panel
self.Centre()
self.Show(True)
# click start button
def OnClick(self, event):
self.windowSize = self.sc1.GetValue()
self.speed = 11 - self.sc2.GetValue()
self.timeOut = self.sc3.GetValue()
self.timeCount = self.timeOut
for i in range(self.windowSize):
self.fly.append(i)
self.fly_max = i
self.timer.Start(100)
self.sc1.Enable(False)
self.sc2.Enable(False)
self.sc3.Enable(False)
self.rb1.Enable(False)
self.rb2.Enable(False)
self.statusbar.SetStatusText("Status: start")
def OnTimer(self, event):
if event.GetId() == self.ID_TIMER:
self.sendPacket()
else:
event.Skip()
# change the positions of packets
def move(self):
for packet in self.sendingPackets:
if packet.y <= 700:
packet.y = packet.y + self.speed
else:
self.sendingPackets.remove(packet)
self.BottomPacket[packet.index].isSent = -1
self.BottomPacket[packet.index].state = "pink"
for packet in self.backingPackets:
if packet.y >= 200:
packet.y = packet.y - self.speed
else:
self.backingPackets.remove(packet)
self.topPacket[packet.index].isSent = 2
self.BottomPacket[packet.index].isSent = 2
self.topPacket[packet.index].state = "yellow"
if packet.index in self.fly:
self.fly_max = self.fly_max + 1
if (self.fly_max + 1) % self.windowSize == 0:
self.timeCount = self.timeOut
self.fly.append(self.fly_max)
self.fly.remove(packet.index)
self.Refresh()
# choose what packets to send when Timer is triggered
def sendPacket(self):
print(self.timeCount)
self.timeCount = self.timeCount - 1
if self.timeCount <= 0:
self.timeCount = self.timeOut
if self.sendStrategy == "Selective":
self.topPacket[i].isSent = 0
elif self.sendStrategy == "GoBackN":
for i in self.fly:
self.topPacket[i].isSent = 0
for i in range(self.topPacketNum):
if (i in self.fly) and (self.topPacket[i].isSent == 0):
print(self.fly)
self.topPacket[i].isSent = 1
self.sendingPackets.append(Packet("lightBlue", 20+50*self.topPacket[i].index, 200, self.topPacket[i].index))
for i in range(self.BottomPacketNum):
if (i in self.fly) and (self.BottomPacket[i].isSent == -1):
self.BottomPacket[i].isSent = 1
self.backingPackets.append(Packet("green", 20+50*self.BottomPacket[i].index, 700, self.BottomPacket[i].index))
self.move()
# create packets on the top and bottom of the panel
def createPacket(self):
self.topPacket = []
for i in range(self.topPacketNum):
self.topPacket.append(Packet("blue", 20+50*i, 200, i))
self.BottomPacket = []
for i in range(self.BottomPacketNum):
self.BottomPacket.append(Packet("white", 20+50*i, 700, i))
# responsible for draw packets
def drawPacket(self, event):
dc = wx.PaintDC(self)
dc.SetPen(wx.Pen("#d4d4d4"))
for i in range(self.topPacketNum):
if self.topPacket[i].state == "blue":
dc.SetBrush(wx.Brush("#0000ff"))
dc.DrawCircle(self.topPacket[i].x, self.topPacket[i].y, self.topPacket[i].width)
elif self.topPacket[i].state == "yellow":
dc.SetBrush(wx.Brush("#cccc66"))
dc.DrawCircle(self.topPacket[i].x, self.topPacket[i].y, self.topPacket[i].width)
for i in range(self.BottomPacketNum):
if self.BottomPacket[i].state == "white":
dc.SetBrush(wx.Brush("#a52a2a"))
elif self.BottomPacket[i].state == "pink":
dc.SetBrush(wx.Brush("#cc66cc"))
dc.DrawCircle(self.BottomPacket[i].x, self.BottomPacket[i].y, self.BottomPacket[i].width)
for i in range(len(self.sendingPackets)):
dc.SetBrush(wx.Brush("#66cccc"))
dc.DrawCircle(self.sendingPackets[i].x, self.sendingPackets[i].y, self.sendingPackets[i].width)
for i in range(len(self.backingPackets)):
dc.SetBrush(wx.Brush("#cccccc"))
dc.DrawCircle(self.backingPackets[i].x, self.backingPackets[i].y, self.backingPackets[i].width)
def initController(self):
# layout
self.hbox = wx.BoxSizer(wx.HORIZONTAL)
self.vbox1 = wx.BoxSizer(wx.VERTICAL)
self.p1 = wx.StaticText(self, label = "Protocol")
self.vbox1.Add(self.p1, flag = wx.ALL, border = 8)
self.rb1 = wx.RadioButton(self, label = "Go Back N", style = wx.RB_GROUP)
self.rb1.SetValue(1)
self.vbox1.Add(self.rb1, flag = wx.ALL, border = 8)
self.rb2 = wx.RadioButton(self, label = "Selective Repeat")
self.vbox1.Add(self.rb2, flag = wx.ALL, border = 8)
self.hbox.Add(self.vbox1, flag = wx.ALL, border = 25)
self.vbox2 = wx.BoxSizer(wx.VERTICAL)
self.p2 = wx.StaticText(self, label = "Window Size")
self.vbox2.Add(self.p2, flag = wx.ALL, border = 8)
# self.sld1 = wx.Slider(self, value = 5, minValue = 1, maxValue = 10, style = wx.SL_HORIZONTAL)
# self.vbox2.Add(self.sld1, flag = wx.ALL, border = 8)
self.sc1 = wx.SpinCtrl(self, value = '5')
self.sc1.SetRange(1, 10)
self.vbox2.Add(self.sc1, flag = wx.ALL, border = 8)
self.hbox.Add(self.vbox2, flag = wx.ALL, border = 25)
self.vbox3 = wx.BoxSizer(wx.VERTICAL)
self.p3 = wx.StaticText(self, label = "End to End Delay")
self.vbox3.Add(self.p3, flag = wx.ALL, border = 8)
# self.sld2 = wx.Slider(self, value = 5, minValue = 1, maxValue = 10, style = wx.SL_HORIZONTAL)
# self.vbox3.Add(self.sld2, flag = wx.ALL, border = 8)
self.sc2 = wx.SpinCtrl(self, value = '5')
self.sc2.SetRange(1, 10)
self.vbox3.Add(self.sc2, flag = wx.ALL, border = 8)
self.hbox.Add(self.vbox3, flag = wx.ALL, border = 25)
self.vbox4 = wx.BoxSizer(wx.VERTICAL)
self.p4 = wx.StaticText(self, label = "Timeout")
self.vbox4.Add(self.p4, flag = wx.ALL, border = 8)
# self.sld3 = wx.Slider(self, value = 5, minValue = 1, maxValue = 10, style = wx.SL_HORIZONTAL)
# self.vbox4.Add(self.sld3, flag = wx.ALL, border = 8)
self.sc3 = wx.SpinCtrl(self, value = '100')
self.sc3.SetRange(40, 200)
self.vbox4.Add(self.sc3, flag = wx.ALL, border = 8)
self.hbox.Add(self.vbox4, flag = wx.ALL, border = 25)
self.vbox5 = wx.BoxSizer(wx.VERTICAL)
self.p5 = wx.StaticText(self, label = "Start")
self.vbox5.Add(self.p5, flag = wx.ALL, border = 8)
self.cbtn1 = wx.Button(self, label = "Start")
self.vbox5.Add(self.cbtn1, flag = wx.ALL, border = 8)
# self.p6 = wx.StaticText(self, label = "Pause")
# self.vbox5.Add(self.p6, flag = wx.ALL, border = 8)
# self.cbtn2 = wx.Button(self, label = "Pause")
# self.vbox5.Add(self.cbtn2, flag = wx.ALL, border = 8)
self.hbox.Add(self.vbox5, flag = wx.ALL, border = 25)
self.SetSizer(self.hbox)
# every packet has its properties
class Packet(object):
#white: no data received yet
#blue: data buffered (ready to send, delivered or sent but no ack received yet)
#green: ack
#yellow: transmission confirmed
#purple: data has been delivered to upper network layer
#lightblue: sending packet
def __init__(self, state, x, y, index):
self.state = state
self.x = x
self.y = y
self.width = 15
self.isSent = 0
self.index = index
# program starts
app = wx.App()
SlidingWindow(None, title = "Sliding Window")
app.MainLoop() | wuzhaoxi/ComputerNetworksHW4 | SlidingWindow.py | SlidingWindow.py | py | 7,800 | python | en | code | 0 | github-code | 13 |
73476387538 | import sys
class Graph:
def __init__(self, vertices_count, adj_matrix):
self.vertices_count = vertices_count
self.graph = adj_matrix
def dijkstra(self, start):
visited = [False for _ in range(self.vertices_count)]
distance = [sys.maxsize for _ in range(self.vertices_count)]
prev = [0 for _ in range(self.vertices_count)]
distance[start - 1] = 0
for i in range(self.vertices_count):
vertex = self.get_min_vertex(visited, distance)
if vertex == float('-inf'):
continue
visited[vertex] = True
for adj_vertex in range(self.vertices_count):
if self.graph[vertex][adj_vertex] >= 0 \
and not visited[adj_vertex]:
new_key = self.graph[vertex][adj_vertex] + distance[vertex]
if new_key < distance[adj_vertex]:
distance[adj_vertex] = new_key
prev[adj_vertex] = vertex + 1
# print(distance, prev)
return distance, prev
def get_min_vertex(self, visited, distance):
min_key = sys.maxsize
vertex = float('-inf')
for i in range(self.vertices_count):
if not visited[i] and min_key > distance[i]:
min_key = distance[i]
vertex = i
return vertex
def get_route(self, distance, prev, start, end):
dist = distance[end - 1]
if dist == sys.maxsize:
return 'N'
vertex = end
output = f'{end} '
for i in range(self.vertices_count):
vertex = prev[vertex - 1]
output += f'{vertex} '
if vertex == start:
break
return 'Y\n' + ' '.join(reversed(output.split())) + f'\n{dist}'
class FileParser:
@staticmethod
def parse_file(name):
with open(name) as file:
content = file.read().split('\n')
start = content[-2]
end = content[-1]
vertices_count = int(content[0])
adj_matrix = [[float('-inf') for _ in range(vertices_count)]
for _ in range(vertices_count)]
for i in range(1, len(content) - 1):
info = content[i].split()
for j in range(0, len(info) - 1, 2):
v_from = int(info[j])
v_in = i
price = int(info[j + 1])
adj_matrix[v_from - 1][v_in - 1] = price
# [print(x) for x in adj_matrix]
return vertices_count, adj_matrix, int(start), int(end)
def main():
count, adj_matrix, start, end = FileParser().parse_file('in.txt')
graph = Graph(count, adj_matrix)
distance, prev = graph.dijkstra(start)
answer = graph.get_route(distance, prev, start, end)
with open('out.txt', 'w') as file:
file.write(answer)
if __name__ == '__main__':
main()
| lapakota/combinatorial_algorithms | 3_dijkstra/python/dijkstra.py | dijkstra.py | py | 2,968 | python | en | code | 1 | github-code | 13 |
24580335214 | from django import urls
from django.urls import path
from .views import (
doctor_dash_view,
doctor_profile_view,
doctor_patient_view,
doctor_search_view,
doctor_appointment_view,
doctor_schedule_view,
doctor_schedule_week_view,
doctor_support_view,
doctor_support_success_view,
doctor_consultation_view,
doctor_confirm_view,
doctor_edit_view,
)
urlpatterns = [
path('doctor-dash/', doctor_dash_view, name = "doctor-dash"),
path('doctor-patient/', doctor_patient_view, name = "doctor-patient"),
path('doctor-search/', doctor_search_view, name = "doctor-search"),
path('doctor-consultations/', doctor_consultation_view, name = "doctor-consultations"),
path('doctor-appointment/', doctor_appointment_view, name = "doctor-appointment"),
path('doctor-schedule/', doctor_schedule_view, name = 'doctor-schedule'),
path('doctor-schedule-week/', doctor_schedule_week_view, name = "schedule-week"),
path('doctor-support/', doctor_support_view, name = "doctor-support"),
path('doctor-support-success/', doctor_support_success_view, name = "doctor-support-success"),
path('doctor-profile/', doctor_profile_view, name = "doctor-profile"),
path('doctor-confirm/', doctor_confirm_view, name = "doctor-confirm"),
path('doctor-edit/', doctor_edit_view, name = "doctor-edit"),
]
| reskillamericans/Medical-Aid-Group1-BE | doctor/urls.py | urls.py | py | 1,374 | python | en | code | 1 | github-code | 13 |
5929728626 | from tkinter import N, E, W, S, StringVar, Tk
from tkinter import ttk
def calculate():
global feet
value = float(feet.get())
meters.set(f"{int(0.3048 * value):.2f}")
root = Tk()
root.title("Feet to Meters")
mainframe = ttk.Frame(root, padding="3 3 12 12")
mainframe.grid(column=0, row=0, sticky=(N, W, E, S))
root.columnconfigure(0, weight=1)
root.rowconfigure(0, weight=1)
feet = StringVar()
feet_entry = ttk.Entry(mainframe, width=7, textvariable=feet)
feet_entry.grid(column=2, row=1, sticky=(W, E))
meters = StringVar()
ttk.Label(mainframe, textvariable=meters).grid(column=2, row=2, sticky=(W, E))
ttk.Button(mainframe, text="Calculate", command=calculate).grid(
column=3, row=3, sticky=W)
ttk.Label(mainframe, text="feet").grid(column=3, row=1, sticky=W)
ttk.Label(mainframe, text="is equivalent to").grid(column=1, row=2, sticky=E)
ttk.Label(mainframe, text="meters").grid(column=3, row=2, sticky=W)
for child in mainframe.winfo_children():
child.grid_configure(padx=5, pady=5)
root.mainloop()
| NaveenRaphael/NPTEL_CS108-2023 | Week 12/tkinter_feet2m.py | tkinter_feet2m.py | py | 1,032 | python | en | code | 0 | github-code | 13 |
24823680495 | import numpy as np
import pywt
from statsmodels.robust import mad
def window_smooth(x, window_len=11, window='hanning'):
"""Smoothing the data using a window with requested size.
This method is based on the convolution of a scaled window with the signal.
The signal is prepared by introducing reflected copies of the signal
(with the window size) in both ends so that transient parts are minimized
in the begining and end part of the output signal.
Parameters
----------
x : 1D numpy array, shape (n_samples,)
The input signal.
window_len : int
The dimension of the smoothing window, should be an odd integer.
window : str
The type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'
flat window will produce a moving average smoothing.
Returns
-------
y : 1D numpy array, shape (n_samples,)
The smoothed signal.
"""
if x.ndim != 1:
raise ValueError("smooth only accepts 1 dimension arrays.")
if x.size < window_len:
raise ValueError("Input vector needs to be bigger than window size.")
if window_len<3:
return x
if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
raise ValueError("Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'")
s=np.r_[x[window_len-1:0:-1],x,x[-2:-window_len-1:-1]]
if window == 'flat': #moving average
w=np.ones(window_len,'d')
else:
w=eval('np.'+window+'(window_len)')
y=np.convolve(w/w.sum(),s,mode='valid')
return y[(window_len//2-1):-(window_len//2)]
def wavelet_smooth(x, wavelet="db4", level=1):
"""Smoothing the data using discrete wavelet transform
Parameters
----------
x : 1D numpy array, shape (n_samples,)
The input signal.
wavelet : str
Discrete wavelet transform name.
level : int
Level of smooth.
Returns
-------
y : 1D numpy array, shape (n_samples,)
The smoothed signal.
"""
# calculate the wavelet coefficients
coeff = pywt.wavedec(x, wavelet, mode = "per")
# calculate a threshold
sigma = mad(coeff[-level])
uthresh = sigma * np.sqrt(2*np.log(len(x)))
coeff[1:] = (pywt.threshold( i, value=uthresh, mode="soft" ) for i in coeff[1:])
# reconstruct the signal using the thresholded coefficients
y = pywt.waverec(coeff, wavelet, mode = "per")
return y
def fourier_smooth(x, frequency):
"""Smoothing data using Fourier transform
Parameters
----------
x : 1D numpy array, shape (n_samples,)
The input signal.
frequency : int
Frequency using for filtering data.
Returns
-------
y : 1D numpy array, shape (n_samples,)
The smoothed signal.
"""
# transform signal to frequency domain
rft = np.fft.rfft(x)
if frequency >= rft.shape[0]:
raise ValueError("Frequency is not bigger than rft size")
rft[frequency:] = 0
#reconstruct the signal
y = np.fft.irfft(rft)
return y | KienMN/GR-Curve-Unit-Breakdown | unit_breakdown/smoothing_functions.py | smoothing_functions.py | py | 2,890 | python | en | code | 1 | github-code | 13 |
33767526996 | import ctypes
c_uint32 = ctypes.c_uint32
c_uint16 = ctypes.c_uint16
c_uint8 = ctypes.c_uint8
MAX_BYTES = 56
class HABPacketImageSeqStart(ctypes.LittleEndianStructure):
name = "HABPacketImageSeqStart"
_pack_ = 1
_fields_ = [
("packetType", c_uint16),
("imageFileID", c_uint8),
("gwID", c_uint8),
("fileSize", c_uint32)
]
class HABPacketImageSeqData(ctypes.LittleEndianStructure):
name = "HABPacketImageSeqData"
_pack_ = 1
_fields_ = [
("packetType", c_uint16),
("imageFileID", c_uint8),
("imageSeqNum", c_uint16),
("imageDataLen", c_uint8),
("gwID", c_uint8),
("imageData", c_uint8*MAX_BYTES)
] | wb9coy/HAB_WebServer | python/packetDefs.py | packetDefs.py | py | 857 | python | en | code | 1 | github-code | 13 |
7931825596 | # https://web-programmist.ru/news/2/2.jpg
# Простите но мой максимум породии интерфейса вот он
from tkinter import *
from tkinter.ttk import Combobox
root = Tk()
root.title("Практическая работа 12 №1")
root.geometry("1000x800+450+100")
root.resizable(height=False, width=False)
zagalovok = Label(root, text='Параметры скидки', font=('Arial', 20), padx=20, pady=10, bd=1)
zagalovok.grid(row=0, column=0, stick='nw')
aktivnost = Label(root, text='Активность: ', font=('Arial', 14))
aktivnost2 = Checkbutton(root)
aktivnost.grid(row=1, column=0, stick='e')
aktivnost2.grid(row=1, column=1, stick='w')
nazvanie = Label(root, text='Название: ', font=('Arial', 14))
nazvanie2 = Entry(root, width=50)
nazvanie.grid(row=2, column=0, stick='e')
nazvanie2.grid(row=2, column=1, stick='w')
sait = Label(root, text='Сайт: ', font=('Arial', 14))
sait2 = Combobox(root, values=('(s1) Моя компания', 'Что-то ещё'))
sait2.current(0)
sait.grid(row=3, column=0, stick='e')
sait2.grid(row=3, column=1, stick='w')
period = Label(root, text='Тип скидки: ', font=('Arial', 14))
period2 = Combobox(root, values=('В процентах', 'Что-то ещё'))
period2.current(0)
period.grid(row=4, column=0, stick='e')
period2.grid(row=4, column=1, stick='w')
Velich_skidki = Label(root, text='Величина скидки: ', font=('Arial', 14))
Velich_skidki2 = Entry(root, width=20)
Velich_skidki.grid(row=5, column=0, stick='e')
Velich_skidki2.grid(row=5, column=1, stick='w')
valuta = Label(root, text='Валюта скидки: ', font=('Arial', 14))
valuta2 = Combobox(root, values=('RUB', 'Что-то ещё'), width= 5)
valuta2.current(0)
valuta.grid(row=6, column=0, stick='e')
valuta2.grid(row=6, column=1, stick='w')
max_summa_skidki = Label(root, text='Максимальная сумма скидки (в валюте скидки; \n'
'0 - скидка не ограничена) : ', font=('Arial', 12))
max_summa_skidki2 = Entry(root, width=20)
max_summa_skidki2.insert(0,'0')
max_summa_skidki.grid(row=7, column=0, stick='e')
max_summa_skidki2.grid(row=7, column=1, stick='w')
opisanie = Label(root, text='Краткое описание (до 255 символов): ', font=('Arial', 14))
opisanie2 = Entry(root, width=50)
opisanie.grid(row=8, column=0, stick='ne')
opisanie2.grid(row=8, column=1, stick='wens')
root.grid_columnconfigure(0, minsize=400)
root.grid_columnconfigure(1, minsize=400)
root.grid_rowconfigure(0, minsize=100)
root.grid_rowconfigure(8, minsize=200)
root.mainloop()
| PavelFedkov/Proj_1sem_Fedkov | PZ_12_var26/PZ_12_1.py | PZ_12_1.py | py | 2,668 | python | ru | code | 0 | github-code | 13 |
17034562414 | # Author: Evan Wiederspan <evanw@alleninstitute.org>
import unittest
import numpy as np
from itertools import permutations
from aicsimageprocessing.alignMajor import (
align_major,
get_align_angles,
get_major_minor_axis,
angle_between,
)
class TestAlignMajor(unittest.TestCase):
def setUp(self):
# binary CZYX image with major along x, minor along z
self.testCube = np.zeros((3, 10, 10, 10))
self.testCube[:, 5, 5, :] = 1
self.testCube[:, 5, 0:5, 5] = 1
self.testCube[:, 6, 5, 5] = 1
def getRandAxes(self):
"""
Helper function to get random arrangement of 'xyz'
"""
return "".join(np.random.permutation(["x", "y", "z"]).tolist())
def test_angleBetween(self):
self.assertEqual(
angle_between(np.array([0, 1]), np.array([0, 1])), 0, "0 degree check"
)
self.assertEqual(
angle_between(np.array([0, 1]), np.array([1, 0])), 90, "90 degree check"
)
with self.assertRaises(ValueError, msg="Must take 1d numpy arrays as input"):
angle_between(np.ones((2, 2)), np.ones((2, 2)))
def test_getMajorMinorAxis(self):
# binary CZYX image with major along x axis
test = np.zeros((3, 10, 10, 10))
test[:, 5, 5, :] = 1
# major axis should be parallel to x axis
major, minor = get_major_minor_axis(test)
self.assertTrue(
angle_between(major, np.array([1, 0, 0])) < 1, msg="Major Axis Pre-rotation"
)
def test_alignMajorInputs(self):
with self.assertRaises(ValueError, msg="img must be 4d numpy array"):
align_major([[1]], "xyz")
with self.assertRaises(ValueError, msg="axis must be arrangement of 'xyz'"):
align_major(self.testCube, "aaa")
def test_alignMajorAlignment(self):
a_map = {"x": 0, "y": 1, "z": 2}
# try every alignment possibility
for axes in list("".join(p) for p in permutations("xyz")):
angles = get_align_angles(self.testCube, axes)
res = align_major(self.testCube, angles)
major, minor = get_major_minor_axis(res)
self.assertTrue(
np.argmax(np.abs(major)) == a_map[axes[0]],
"Major aligned correctly rotating to " + axes,
)
self.assertTrue(
np.argmax(np.abs(minor)) == a_map[axes[-1]],
"Minor aligned correctly rotating to " + axes,
)
def test_alignMajorReshape(self):
axes = self.getRandAxes()
angles = get_align_angles(self.testCube, axes)
res = align_major(self.testCube, angles, False)
self.assertEqual(
self.testCube.shape,
res.shape,
"Shape stays constant when not reshaping with axes " + axes,
)
def test_alignMajorMultiple(self):
axes = self.getRandAxes()
angles = get_align_angles(self.testCube, axes)
res = align_major([self.testCube, self.testCube], angles)
self.assertTrue(len(res) == 2, "Output same number of images as passed in")
self.assertTrue(
np.array_equal(res[0], res[1]), "Multiple images rotated by same amount"
)
def test_getAnglesNDim(self):
axes = self.getRandAxes()
test3d = np.mean(self.testCube, axis=0)
test4d = self.testCube
test5d = np.expand_dims(self.testCube, axis=0)
self.assertEqual(
get_align_angles(test3d, axes),
get_align_angles(test4d, axes),
"Angles for 3d image equal 4d",
)
self.assertEqual(
get_align_angles(test4d, axes),
get_align_angles(test5d, axes),
"Angles for 4d image equal 5d",
)
def test_alignMajorNDim(self):
axes = self.getRandAxes()
# create a 3d, 4d, and 5d test image
# rotate them all and compare the last 3 dimensions for each
# They should all be the same
test3d = np.mean(self.testCube, axis=0)
test4d = self.testCube
test5d = np.expand_dims(self.testCube, axis=0)
angles = get_align_angles(test5d, axes)
align_major([test3d, test4d, test5d], angles)
self.assertTrue(
np.array_equal(test3d, test4d[0]), "3d equals 4d image after rotate"
)
self.assertTrue(
np.array_equal(test4d[0], test5d[0, 0]), "4d equals 5d image after rotate"
)
| AllenCellModeling/aicsimageprocessing | aicsimageprocessing/tests/test_AlignMajor.py | test_AlignMajor.py | py | 4,491 | python | en | code | 2 | github-code | 13 |
7956280471 | # Dependencies
from flask import Flask, render_template, jsonify, redirect
import pymongo
from pymongo import MongoClient
import scrape_mars
# Flask setup
app = Flask(__name__)
conn = "mongodb://rc:C00k1eBaba@ds143245.mlab.com:43245/heroku_n5qzr3nx"
# client = MongoClient("mongodb://localhost:27017")
# conn = 'mongodb://localhost:27017'
client = pymongo.MongoClient(conn)
# db = client.mars_db
db = client.heroku_n5qzr3nx
collection = db.mars
@app.route("/")
def index():
mars = db.mars.find_one()
return render_template("index.html", mars=mars)
@app.route('/scrape')
def scrape():
mars = db.mars
data = scrape_mars.scrape()
print(data)
mars.update({}, data, upsert=True)
return redirect("http://localhost:5000/", code=302)
if __name__ == "__main__":
app.run(debug=True)
app.jinja_env.auto_reload = True
app.config['TEMPLATES_AUTO_RELOAD'] = True | ruchichandra/Mission-to-Mars | app.py | app.py | py | 905 | python | en | code | 0 | github-code | 13 |
42596663924 | import requests
import string
print ("Vul een wachtwoord in om te checken")
pwd = input()
# Een online woordenlijst met zwakke wachtwoorden die ik online heb gevonden. In de laatste regel van deze snippet word gekeken of het ingvulde
# wachtwoord in de lijst voorkomst.
pwd_lijst = requests.get('https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/Common-Credentials/worst-passwords-2017-top100-slashdata.txt')
x = pwd_lijst.text
find = x.find(pwd)
# In deze list worden alle cijfers inguld. Later word er gekeken of er één van deze cijfers in de string voor komen.
cijfer_list = 0, 1, 2, 3, 4, 5, 6, 7, 8, 9
cijfer_list_string = str(cijfer_list)
# De testjes die worden gedaan.
letter_test = not pwd.islower() and not pwd.isupper()
lengte_test = len(pwd)
cijfer_test = any(ele in pwd for ele in cijfer_list_string)
leestekentest = set(string.punctuation)
# Als het wachtwoord niet in de lijst voorkomt dan krijg je als output -1. Als dat wel zo het geval is dan krijg je een getal dat groter is dan 0.
if find > -1:
print("Het wachtwoord is gevonden in de lijst met onveilige wachtwoorden.")
elif letter_test == False:
print ("Het wachtwoord bevat geen hoofdletter en/of kleine letter.")
elif lengte_test < 10:
print ("Het wachtwoord voldoet niet aan de minimale lengte.")
elif cijfer_test == False:
print("Het wachtwoord heeft geen cijfer(s).")
elif any(str in leestekentest for str in pwd):
print("Het wachtwoord voldoet aan alle eisen.")
else:
print ("Het wachtwoord heeft geen leestekens.")
| rouwens/S2-Applicatie | Oefen challanges/Password checker/password_checker.py | password_checker.py | py | 1,558 | python | nl | code | 0 | github-code | 13 |
30140438590 | from functools import wraps
from flask import request
import logging
from flask_restful import Resource, abort
from app.app import socketio, api
from app.models import *
logger = logging.getLogger(__name__)
#######################
# API Decorators
#######################
def authenticate_api(func):
@wraps(func)
def wrapper(*args, **kwargs):
try:
# Get apikey and check it against the database
apikey = request.args['apikey']
found_key = ApiKey.query.filter_by(key=apikey).scalar()
if found_key is not None:
# If valid, return
return func(*args, **kwargs)
# If invalid, abort
logger.warning("authenticate_api: abort 401")
abort(401)
except KeyError:
# If apikey is not even passed
logger.warning("authenticate_api KeyError: abort 401", exc_info=True)
abort(401)
return wrapper
def validate_api_scraper_key(func):
@wraps(func)
def wrapper(*args, **kwargs):
rdata = {'success': False,
'message': ""
}
try:
organization_id = None
api_key = request.args['apikey']
if 'scraperKey' in request.args:
scraper_key = request.args['scraperKey']
# Check that the apikey has acccess to the sensor
organization_id = Scraper.query.filter_by(key=scraper_key).scalar().group.organization.id
else:
rdata['message'] = "Missing scraper key"
return rdata
key_organization_id = ApiKey.query.filter_by(key=api_key).scalar().organization_id
if key_organization_id == organization_id:
# The api key and sensor/group both belong to the same user
return func(*args, **kwargs)
else:
logger.warning("Invalid scraper key")
rdata['message'] = "Invalid scraper key"
except AttributeError:
logger.warning("Invalid scraper key", exc_info=True)
rdata['message'] = "Invalid scraper key"
except Exception:
logger.exception("Oops, somthing went wrong when validating your scraper")
rdata['message'] = "Oops, somthing went wrong when validating your scraper"
return rdata
return wrapper
#######################
# API Endpoints
#######################
class APIScraperLogging(Resource):
method_decorators = [validate_api_scraper_key, authenticate_api]
def post(self):
rdata = {'success': False,
'message': ""
}
client_data = request.args
log_data = request.form
log = ScraperLog()
log.run_uuid = client_data['scraperRun']
log.exc_info = log_data['exc_info']
log.exc_text = log_data['exc_text']
log.filename = log_data['filename']
log.func_name = log_data['funcName']
log.level_name = log_data['levelname']
log.level_no = log_data['levelno']
log.line_no = log_data['lineno']
log.message = log_data['message']
log.module = log_data['module']
log.name = log_data['name']
log.pathname = log_data['pathname']
log.process = log_data['process']
log.process_name = log_data['processName']
log.relative_created = log_data['relativeCreated']
log.stack_info = log_data['stack_info']
log.thread = log_data['thread']
log.thread_name = log_data['threadName']
db.session.add(log)
db.session.commit()
data = [{'rowId': client_data['scraperRun'],
'scraperKey': client_data['scraperKey'],
'groupId': log.scraper_run.scraper.group.id,
}]
if log.level_name == 'CRITICAL':
data[0]['criticalCount'] = 1
if log.level_name == 'ERROR':
data[0]['errorCount'] = 1
if log.level_name == 'WARNING':
data[0]['warningCount'] = 1
socketio.emit('data-scrapers',
{'data': data, 'action': 'increment'},
namespace='/data/scrapers/{env}/'.format(env=client_data['environment'].lower()),
room='organization-{org_id}'.format(org_id=log.scraper_run.scraper.group.organization_id)
)
rdata['success'] = True
rdata['message'] = ""
return rdata
class APIScraperDataStart(Resource):
method_decorators = [validate_api_scraper_key, authenticate_api]
def post(self):
rdata = {'success': False,
'message': ""
}
client_data = request.args
data = request.json
group_id = Scraper.query.filter_by(key=client_data['scraperKey']).scalar().group_id
run = ScraperRun()
run.scraper_key = client_data['scraperKey']
run.group_id = group_id
run.environment = client_data.get('environment')
run.uuid = client_data['scraperRun']
run.start_time = data['startTime']
db.session.add(run)
db.session.commit()
data = [run.serialize]
socketio.emit('data-scrapers',
{'data': data, 'action': 'start'},
namespace='/data/scrapers/{env}/'.format(env=client_data['environment'].lower()),
room='organization-{org_id}'.format(org_id=run.scraper.group.organization_id)
)
rdata['success'] = True
rdata['message'] = ""
return rdata
class APIScraperDataStop(Resource):
method_decorators = [validate_api_scraper_key, authenticate_api]
def post(self):
rdata = {'success': False,
'message': ""
}
client_data = request.args
data = request.json
run = ScraperRun.query.filter_by(uuid=client_data['scraperRun']).scalar()
run.total_urls_hit = data.get('totalUrls')
run.ref_data_count = data.get('refDataCount')
run.ref_data_success_count = data.get('refDataSuccessCount')
run.num_rows_added_to_db = data.get('rowsAddedToDb')
run.stop_time = datetime.datetime.strptime(data['stopTime'], "%Y-%m-%d %H:%M:%S.%f")
# Calc runtime and get counts
runtime = run.stop_time - run.start_time
run.runtime = runtime.total_seconds()
counts = ScraperLog.query.filter_by(run_uuid=client_data['scraperRun'])
run.critical_count = counts.filter_by(level_name='CRITICAL').count()
run.error_count = counts.filter_by(level_name='ERROR').count()
run.warning_count = counts.filter_by(level_name='WARNING').count()
run.url_error_count = ScraperUrlError.query.filter_by(run_uuid=client_data['scraperRun']).count()
db.session.commit()
data = [run.serialize]
socketio.emit('data-scrapers',
{'data': data, 'action': 'stop'},
namespace='/data/scrapers/{env}/'.format(env=client_data['environment'].lower()),
room='organization-{org_id}'.format(org_id=run.scraper.group.organization_id)
)
rdata['success'] = True
rdata['message'] = ""
return rdata
class APIScraperErrorUrl(Resource):
method_decorators = [validate_api_scraper_key, authenticate_api]
def post(self):
rdata = {'success': False,
'message': ""
}
client_data = request.args
data = request.json
url_error = ScraperUrlError()
url_error.run_uuid = client_data['scraperRun']
url_error.num_tries = data.get('numTries')
url_error.reason = data.get('reason')
url_error.ref_id = data.get('ref_id')
url_error.ref_table = data.get('ref_table')
url_error.status_code = data.get('statusCode')
url_error.thread_name = data.get('threadName')
url_error.url = data.get('url')
db.session.add(url_error)
db.session.commit()
data = [{'rowId': client_data['scraperRun'],
'urlErrorCount': 1
}]
socketio.emit('data-scrapers',
{'data': data, 'action': 'increment'},
namespace='/data/scrapers/{env}/'.format(env=client_data['environment'].lower()),
room='organization-{org_id}'.format(org_id=url_error.scraper_run.scraper.group.organization_id)
)
rdata['success'] = True
rdata['message'] = ""
return rdata
# Logs from the python logging HTTPHandler
api.add_resource(APIScraperLogging, '/logs')
# General data about the scraper
api.add_resource(APIScraperDataStart, '/data/start')
api.add_resource(APIScraperDataStop, '/data/stop')
# Scraper errors that are not logs
api.add_resource(APIScraperErrorUrl, '/error/url')
| xtream1101/scraper-monitor | app/api.py | api.py | py | 8,957 | python | en | code | 0 | github-code | 13 |
25736029151 | #!/usr/bin/env python3
#coding: utf-8
import openai
from openai import OpenAI
import json
import tiktoken
import random
import cgi
import sys
import random
from image_generation import get_image_for_line
from sound_generation import get_audio_for_line
prompts = list()
# OPENAI SETUP
# path to file with authentication key
with open('apikey.txt') as infile:
apikey = infile.read()
# The model shoould try to follow this sort-of meta-instruction
system_message = "You are an author of short stories."
# This is the limit of the model
model_max_tokens = 2048
# How many tokens to generate max
max_tokens = 500
# Model identifier
model = "gpt-3.5-turbo"
def generate_with_openai(messages):
# Debug output
# print('MESSAGES:', *messages ,sep='\n')
# https://platform.openai.com/docs/guides/chat/introduction
ok = False
while not ok:
try:
response = client.chat.completions.create(
model = model,
messages = messages, # this one only for chat
max_tokens = max_tokens,
temperature = 1,
top_p = 1,
stop = [], # can be e.g. stop = ['\n']
presence_penalty = 0,
frequency_penalty = 0,
logit_bias = {},
user = "pribehy",
)
ok = True
result = response.choices[0].message.content
except openai.BadRequestError:
# assume this is because max length is exceeded
# keep the system message, the prompt and the story title
# keep removing from the third message
# TODO do this in a more clever way!
# explicitly check number of tokens and cut it!
# print(openai.InvalidRequestError)
messages.pop(3)
except Exception as e:
type, value, traceback = sys.exc_info()
print("EXCEPTION", e, type, value, traceback, sep="\n")
result = str(e)
ok = True
if result == '':
# end of text
print('Nothing was generated, maybe the model assumes that this is a good ending and there is nothing more to add.')
return result
def append_message_user(messages, message):
messages.append({"role": "user", "content": message})
def append_message_assistant(messages, message):
messages.append({"role": "assistant", "content": message})
def get_image(title, text):
system_message = "You are a skilled artist drawing comics images."
prompt = f'You are generating images for a story titled "{title}". Generate an English description of an image for an image generator. The image should depict a scene illustrating the following part of a story: {text}'
messages = [
{"role": "system", "content": system_message},
{"role": "user", "content": prompt},
]
prompts.append(f"SYSTEM MESSAGE FOR {model}: {system_message}")
prompts.append(f"PROMPT FOR {model}: {prompt}")
image_description = generate_with_openai(messages)
prompts.append(f"PROMPT FOR StableDiffusion v1-5: {image_description}")
image_filename = get_image_for_line(image_description, seed)
return f"genimgs/{image_filename}"
# MAIN
print("Content-type: text/html")
print()
# openai
try:
client = OpenAI(api_key=apikey)
except Exception as e:
print(e)
# next word choices
CHOICES=5
FIRST_CHOICES=15
nouns = list()
with open('nouns.txt') as infile:
for line in infile:
nouns.append(line.strip())
# read in params
form = cgi.FieldStorage()
seed = int(form.getvalue("seed", random.randint(0, 10000000)))
title = form.getvalue("title")
prompt = form.getvalue("prompt")
word = form.getvalue("word")
end = form.getvalue("end")
messages_initial = [
{"role": "system", "content": system_message},
]
messages_passed = form.getvalue("messages")
if messages_passed:
messages = json.loads(messages_passed)
else:
messages = messages_initial
base_title = "Nech si vygenerovat příběh na přání!"
first_sentence = "Vygeneruj první větu příběhu."
text_for_audio = ""
if not prompt:
# welcome screen
title = base_title
sentence = ""
hint = "O čem by měl být vygenerovaný příběh?"
prompt = "Vygeneruj název příběhu, ve kterém se vyskytne "
words = random.choices(nouns, k=FIRST_CHOICES)
text_for_audio = f"{base_title} {hint}"
else:
if end:
message = 'Vygeneruj konec příběhu.'
else:
message = prompt + word
append_message_user(messages, message)
prompts.append(f"SYSTEM MESSAGE FOR {model}: {system_message}")
prompts.append(f"PROMPT FOR {model}: {message}")
if title == base_title:
# first generate the title
title = generate_with_openai(messages)
title = title.replace('"', '')
title = title.replace("'", '')
append_message_assistant(messages, title)
append_message_user(messages, first_sentence)
text_for_audio = f"{title}. . . . "
prompts.append(f"PROMPT FOR {model}: {first_sentence}")
# generate a continuation
sentence = generate_with_openai(messages)
text_for_audio += sentence
append_message_assistant(messages, sentence)
# next
hint = "Co by se mělo nyní v příběhu objevit?"
prompt = "Vygeneruj další větu příběhu, ve které se vyskytne "
words = random.choices(nouns, k=CHOICES)
if sentence:
display_mode_classes = ""
image = f"<img id='story-img' src='{get_image(title, sentence)}'>"
konec = f'<input type="submit" class="button small autowidth highlighted" name="end" value="✔ konec">'
else:
display_mode_classes = "vertical centered-content"
image = ""
konec = ""
if end:
display_mode_classes = "vertical centered-content"
if text_for_audio:
sound = f"""
<audio autoplay controls>
<source src="{get_audio_for_line(text_for_audio)}" type="audio/mpeg">
</audio>"""
else:
sound=""
# Result
# TODO určitě tohle přesunout to extra souboru a použít na to HTML template s proměnnýma
print(f"""
<!DOCTYPE html>
<html lang="cs">
<head>
<title>{title}</title>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no">
<meta http-equiv="X-UA-Compatible" content="IE=edge, chrome=1">
<link href='https://fonts.googleapis.com/css?family=Questrial|Open Sans' rel='stylesheet'>
<link rel="stylesheet" href="../common.css">
<link rel="stylesheet" href="../style_menu.css">
<link rel="stylesheet" href="./css/styles.css">
<script defer src="../!scripts/script_menu.js"></script>
</head>
<body>
<div class="menu-header">
<button id="menu-btn" onclick="showMenu()">
<svg width="60" height="60" viewBox="0 0 50 50" fill="none" xmlns="http://www.w3.org/2000/svg">
<path d="M 13.185675,15.139108 H 37.115233" stroke="#ffffff" stroke-width="3.57143" stroke-linecap="round" id="path4" />
<path d="M 13.185675,24.906275 H 37.115233" stroke="#ffffff" stroke-width="3.57143" stroke-linecap="round" id="path6" />
<path d="M 13.185675,34.673441 H 37.115233" stroke="#ffffff" stroke-width="3.57143" stroke-linecap="round" id="path8" />
</svg>
</button>
</div>
<div id="menu-items" class="hide" >
<div class="menu-item">
<a class="menu-href" href="#"></a>
</div>
<div class="menu-item">
<a class="menu-href" href="#"></a>
</div>
<div class="menu-item">
<a class="menu-href" href="#"></a>
</div>
<div class="menu-item">
<a class="menu-href" href="#"></a>
</div>
<div class="menu-item">
<a class="menu-href" href="#"></a>
</div>
<div class="menu-item">
<a class="menu-href" href="#"></a>
</div>
<div class="menu-item">
<a class="menu-href" href="#"></a>
</div>
<div class="menu-item">
<a class="menu-href" href="#"></a>
</div>
<div class="menu-item">
<a class="menu-href" href="#"></a>
</div>
</div>
<div id='cover' onclick='unshow()'>
</div>
<div id='prev-button' class='nav-arrow' onclick='history.back()'>
<img src='../!assets/img/arrow_left_white.png'>
</div>
<div id="content">
<h1 class="nadpis">{title}</h2>
<div class="horizontal-cells offset-bottom {display_mode_classes}">
<div class="cell left">
{image}
</div>
<div class="cell right">
<p id="story-text" class="offset-bottom">
{sentence}
</p>
{sound}
</div>
</div>""")
# Next
if end:
# End
print(f"""
<h2 class="offset-bottom">KONEC</h2>
<a class="icon-text-link offset-bottom" href="./index.html">
<div id="home-button" class="nav-arrow static">
<img src="../!assets/img/home.webp">
</div>
<h3>
Zpět k sekci Příběhy
</h3>
</a>""")
else:
# Continue
print(f"""
<h3>{hint}</h3>
<div class="button-form-holder">
<form method="post">
<input type="hidden" name="seed" value="{seed}">
<input type="hidden" name="messages" value='{json.dumps(messages)}'>
<input type="hidden" name="title" value="{title}">
<input type="hidden" name="prompt" value="{prompt}">
{' '.join([ f'<input type="submit" class="button small" name="word" value="{word}">' for word in words])}
{konec}
</form>
</div>""")
# Prompts
'''
print(f"""
<hr>
<kbd>{'<br>'.join(prompts)}</kbd>
</div>
</body>
</html>
""")
'''
# TODO možná přidat i možnost dostat jiný pokračování nebo jiný slova...
# TODO možná nějak zobrazovat celou story nebo nějak umožnit se vrátit zpět
| ufal/didaktikon | exponat/sekce/pribehy/story.py | story.py | py | 10,366 | python | en | code | 0 | github-code | 13 |
285339285 | class colorborder(object):
def _init_(self):
self.R
self.C
self.visited
self.finalColor
def colorBorder(self, grid, r0, c0, color):
self.R = len(grid)
self.C = len(grid[0])
self.finalColor = color
self.visited = [[False for y in range(self.C)] for x in range(self.R)]
self.dfs(grid, r0, c0, grid[r0][c0])
for r in range(self.R):
for c in range(self.C):
if grid[r][c] < 0:
grid[r][c] = self.finalColor
return grid
def dfs(self,grid, r0, c0, targetColor):
if r0 >= self.R or r0 < 0 or c0 >= self.C or c0 < 0:
return
elif self.visited[r0][c0]:
return
elif grid[r0][c0] != targetColor:
return
self.visited[r0][c0] = True
self.dfs(grid, r0, c0 - 1, targetColor)
self.dfs(grid, r0 , c0 + 1, targetColor)
self.dfs(grid, r0-1, c0, targetColor)
self.dfs(grid, r0+1, c0, targetColor)
if self.isborder(grid, r0, c0, targetColor):
grid[r0][c0] = -grid[r0][c0]
def isborder(self, grid, r0, c0, targetColor):
if r0 == self.R - 1 or r0 == 0 or c0 == self.C - 1 or c0 == 0:
return True
left = grid[r0][c0-1]
top = grid[r0-1][c0]
right = grid[r0][c0 + 1]
down = grid[r0 - 1][c0]
if left != targetColor or top != targetColor or right != targetColor or down != targetColor:
return True
return False
ob = colorborder()
print(ob.colorBorder([[1,1],[1,2]], 0, 0, 3)) | soniaarora/Algorithms-Practice | Solved in Python/LeetCode/arrays/colorBorder.py | colorBorder.py | py | 1,623 | python | en | code | 0 | github-code | 13 |
15725729270 | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
#1-1
sampleFile = open("sample.txt", "r")
rsampleFile = sampleFile.read()
sampleFile.close()
pun=[',','?','0','1','2','-','/','\n','"','.' ]
for a in pun:
rsampleFile=rsampleFile.replace(a," ")#移除標點符號與換行
wordList = rsampleFile.split(" ")
sampleWordList = [ ]
for h in wordList:
if len(h) > 4:
sampleWordList.append(h)#偵測長度大於五的字並移除
#Python 的每次縮排是四個空格,不要把空格和 TAB 混用。
print(sampleWordList)
#1-2
num = 1
while num==1:
quest = input("請輸入五個字母以上的英文字")
if len(quest)<=4:
print("字數不足")
elif quest in wordList:
print("清單裡有這個單字")
else:
print("清單裡沒有這個單字")
| PeterWolf-tw/ESOE-CS101-2016 | homework01_b05505052.py | homework01_b05505052.py | py | 805 | python | en | code | 15 | github-code | 13 |
16007323850 | import os
import code
import time
import torch
from collections import namedtuple
import nimblephysics as nimble
from solver.envs.rigidbody3d.sapien_viewer import SapienViewer
from sapien.core import Pose
import numpy as np
import cv2
import gym.spaces
import transforms3d.euler
import transforms3d.quaternions
from solver.envs.rigidbody3d.utils import exp_coord2angle_axis, write_text
Actor = namedtuple('Actor', ['urdf', 'nimble_actor', 'sapien_actor'])
class Rigid3dSim:
def __init__(self, dt=0.001, frame_skip=0,
resolution=(1366, 1024), distance=3, gravity=-9.81
) -> None:
# nimble simulation world
self.world = nimble.simulation.World()
self.world.setGravity([0, gravity, 0])
self.world.setTimeStep(dt)
# sapien viewer
self.viewer = SapienViewer(resolution, distance)
self.resolution = resolution
# constants
self.dt = dt
self.frame_skip = frame_skip
self.actors = []
self.state = None
# self.enforce_limit = lambda world, x: x
def load_urdf(self, urdf_path, restitution_coeff=1, friction_coeff=0, mass=None, inertia=None):
ASSETDIR = (os.path.join(os.path.split(os.path.realpath(__file__))[0], "assets"))
urdf_path = f"{ASSETDIR}/{urdf_path}"
# sapien load
sapien_actor = self.viewer.load_urdf(urdf_path)
# print("sapien load successful")
# nimble load
nimble_actor = self.world.loadSkeleton(urdf_path)
for bodyNode in nimble_actor.getBodyNodes():
bodyNode.setRestitutionCoeff(restitution_coeff)
bodyNode.setFrictionCoeff(friction_coeff)
if mass is not None:
# nimble mass is 1 by default
bodyNode.setMass(mass)
if inertia is not None:
bodyNode.setMomentOfInertia(*inertia)
# save
actor = Actor(urdf_path, nimble_actor, sapien_actor)
self.actors.append(actor)
return actor
def update_viewer(self):
# self.viewer.scene.step()
for actor in self.actors:
pos = actor.nimble_actor.getPositions()
if actor.sapien_actor.dof > 0:
# handle nimble load arm
actor.sapien_actor.set_qpos(pos)
elif len(pos) > 0:
# handle nimble load robot with no joint
initpose = Pose(
p=[0, 0, 0],
q=transforms3d.euler.euler2quat(
np.pi / 2, 0, -np.pi / 2
)
)
theta, omega = exp_coord2angle_axis(pos[:3])
inputpose = Pose(
p=pos[3:],
q=transforms3d.quaternions.axangle2quat(
vector=omega, theta=theta
)
)
actor.sapien_actor.set_pose(
initpose.transform(inputpose)
)
self.viewer.scene.update_render()
if self.viewer.window is not None:
self.viewer.window.render()
def nimble_loop(self, states):
gui = nimble.NimbleGUI(self.world)
gui.serve(8080) # host the GUI on localhost:8080
gui.loopStates(states) # tells the GUI to animate our list of states
gui.blockWhileServing() # block here so we don't exit the program
def sapien_loop(self):
""" hold sapien window open """
while not self.viewer.window.closed:
self.update_viewer()
def get_init_state(self):
return torch.zeros(self.world.getStateSize())
def set_init_state(self, state):
self.state = state
self.world.setState(self.state.detach().cpu().numpy())
# print("set_init_state", self.state)
def reset(self):
init_state = self.get_init_state()
self.set_init_state(init_state)
return self.state
def get_reward(self, state, action, next_state):
return 0
def get_assist(self):
return 0
# return torch.tensor(
# self.arm.nimble_actor.getInverseDynamics(
# torch.zeros(self.world.getActionSize())
# )
# )
def step(self, action):
assist = self.get_assist()
next_state = nimble.timestep(self.world, self.state, action + assist)
# next_state = self.enforce_limit(self.world, next_state)
for _ in range(self.frame_skip):
assist = self.get_assist()
next_state = nimble.timestep(self.world, next_state, action + assist)
# next_state = self.enforce_limit(self.world, next_state)
reward = self.get_reward(self.state, action, next_state)
self.state = next_state
return self.state, reward, False, None
def render(self, text=None):
self.update_viewer()
img = self.viewer.take_picture()[:, :, :3]
return write_text(img, text, fontsize=int(24/1024*self.resolution[1]))
@property
def action_space(self):
return gym.spaces.Box(-1, 1, (self.world.getActionSize(), ))
@property
def observation_space(self):
return gym.spaces.Box(-1, 1, (self.world.getStateSpace(), )) | haosulab/RPG | solver/envs/rigidbody3d/rigid3d_simulator.py | rigid3d_simulator.py | py | 5,235 | python | en | code | 18 | github-code | 13 |
986372030 | import re
from django.conf import settings
from django.core.management.base import BaseCommand
from openpyxl import load_workbook
from painter.models import Card
class Command(BaseCommand):
help = ('Clears the database of cards, then fills it with the contents of one or' +
' more specified XLSX files.')
def add_arguments(self, parser):
parser.add_argument(
'filenames',
nargs='*',
type=str,
help='One or more XLSX file names. The extension is optional.',
)
def ensure_extension(self, filename, extension):
"""Tag a filename with a given file format if it doesn't have one already."""
extension = '.' + extension
if filename.endswith(extension):
return filename
return filename + extension
def load_all_worksheets(self, filenames, verbosity=0):
"""
Open a given series of Excel files and return all their worksheets.
Ignore worksheets whose names start with an @ symbol - these are
used as metadata.
"""
all_sheets = []
for filename in filenames:
filename = self.ensure_extension(filename, 'xlsx')
if verbosity:
print("Loading {}".format(filename))
workbook = load_workbook(
filename=filename,
# read_only=True, # read_only mode causes sharing violations...
data_only=True, # Load the values computed by formulae, not the formulae
keep_vba=False, # Throw away any VBA scripting
)
valid_sheets = [w for w in workbook.worksheets if w.title[0] != '@']
all_sheets += valid_sheets
if verbosity:
titles = [w.title for w in all_sheets]
titles = ', '.join(titles)
print('Loading worksheets: {}'.format(titles))
return all_sheets
def make_safe_name(self, value):
"""
Return a form of `value` that's usable as a variable name in a Django template.
"""
# Replace all spaces with underscores.
value = value.lower().replace(' ', '_')
# Remove any non-alphanumeric characters from the name.
# https://stackoverflow.com/a/2779487
# https://docs.python.org/3/howto/regex.html#matching-characters
value = re.sub(r'\W+', '', value)
return value
def parse_header_row(self, worksheet_row, start_column=0, width=-1):
"""
Return a list of parsed header fields.
Each "field" is a pair of (field_name, is_list). The field_name is suitable for
use as a variable name; is_list is True if the field is expected to contain a
list of data, and False otherwise. Any header preceded by an asterisk denotes
a list field.
The parser will travel along the row from start_column until it reaches either
start_column + width or a blank cell, whichever comes first.
"""
header_row = worksheet_row[start_column:]
if (width > -1):
header_row = header_row[:width]
result = []
for header_cell in header_row:
header = header_cell.value
if header:
is_list = False
# Any header preceded by an asterisk denotes a list field.
if header[0] == '*':
header = header[1:]
is_list = True
# Make the header name into a suitable variable name,
# by forcing lowercase and replacing spaces with underscores.
header = self.make_safe_name(header)
result.append((header, is_list))
else:
# If we find a column with a blank header, stop processing new headers.
break
return result
def parse_data_row(self, worksheet_row, headers, start_column=0):
"""
Turn a row of data from the sheet into a dictionary.
The keys of the dictionary are given by the corresponding headers.
Starting at start_column in the worksheet_row, loop until we run out of headers,
and create a dictionary entry for each cell.
For headers that represent list fields, parse the cell value into a list
(separated by newlines).
Return None if the row is completely blank (every cell is empty/None).
"""
result = {}
is_empty = True
for i, header_data in enumerate(headers):
key = header_data[0]
is_list = header_data[1]
value = worksheet_row[start_column + i].value
# Convert to string to ensure zeros are displayed correctly,
# and that calling split() doesn't explode.
# However, if the value is None, skip the string conversion. This means
# the value shows up correctly as empty, instead of the string "None".
if value is not None:
value = str(value)
is_empty = False
# Parse the value as a list if the column was marked as a list type.
if is_list and value:
value = value.split('\n')
result[key] = value
if is_empty:
return None
return result
def parse_table(
self, worksheet_rows,
start_row=0, start_column=0, height=-1, width=-1
):
"""
Parse an entire table.
- The first row (start_row) is taken to be the header row;
the rest are data rows.
- First, generate the header row, from start_column to start_column + width
(or the end of the sheet if width=-1).
- Then, generate data rows. Iterate from start_row + 1 until start_row +
height (if specified) or the end of the sheet.
parse_data_row is called on each data row, and the results are accumulated
into a list.
Note that height includes the header row.
"""
table_rows = worksheet_rows[start_row:]
if height > -1:
table_rows = table_rows[:height]
header_row = table_rows[0]
headers = self.parse_header_row(header_row, start_column, width)
data_rows = table_rows[1:]
parsed_rows = [
self.parse_data_row(data, headers, start_column)
for data in data_rows
]
nonempty_rows = [r for r in parsed_rows if r is not None]
return nonempty_rows
def convert_to_python(self, worksheet):
"""
Turn an openpyxl worksheet into a list of dictionaries.
Each dictionary represents one card or group of cards that collectively
form a single game 'entity'. This could be one spell, one attack, a series
of stat cards for a single unit, and so on.
The base implementation treats the first row as the headers of a table,
and all the other rows as entries.
If the worksheet is empty - such as the extra default sheets in an Excel
file - or only contains a header row, return an empty list.
"""
all_rows = list(worksheet.rows)
if len(all_rows) < 2:
return []
return self.parse_table(all_rows)
def convert_to_cards(self, card_data):
"""
Convert a dictionary into one or more Card objects, returned in a list.
The dictionary is intended to represent a single entry - convert_to_python
returns a list of such 'entries'. Often, this will be a single card.
The basic implementation pops 'name', 'template' and 'quantity' out of
card_data, then creates one Card with the rest of card_data saved in its
data field.
This function is allowed to modify card_data, to avoid copying it.
"""
# Remove the name, template, and quantity fields from the rest of the data,
# since they go directly on the Card instance.
name = card_data.pop('name', None)
template_string = card_data.pop('template', None)
quantity = card_data.pop('quantity', 1)
# If it has both a name and a template, add it. Otherwise, leave it out.
# This allows for blank/incomplete/ignored rows to exist in the Excel file.
if not name or not template_string:
return []
# If multiple templates are supplied, create one Card entry for each one
# (duplicating the card data).
template_list = template_string.split(',')
return [
Card(
name=name,
template_name=template.strip(),
quantity=quantity,
data=card_data,
)
for template in template_list
]
def handle(self, *args, **options):
"""DO ALL THE THINGS"""
verbosity = options['verbosity']
# The filenames are defined in a setting.
filenames = settings.IP_DATA_FILES
if not filenames:
return
# Clear all card data before we go any further.
Card.objects.all().delete()
# Import!
worksheets = self.load_all_worksheets(filenames, verbosity)
python_data = []
for sheet in worksheets:
python_data += self.convert_to_python(sheet)
# Stop right here if we don't have any data.
if not python_data:
if verbosity:
print('No cards were created.')
return
# Create the card objects.
cards = []
for card_data in python_data:
cards += self.convert_to_cards(card_data)
# Use bulk_create to store them for an easy performance bump.
Card.objects.bulk_create(cards)
# Chirp triumphantly to stdout.
if verbosity:
print('{} cards created!'.format(len(cards)))
print(', '.join([c.name for c in cards]))
| adam-thomas/imperial-painter | painter/importers/import_cards.py | import_cards.py | py | 9,933 | python | en | code | 0 | github-code | 13 |
3072227515 | import datetime
import mohawk
import pytest
from django.conf import settings
from django.utils import timezone
from freezegun import freeze_time
from rest_framework import status
from rest_framework.reverse import reverse
from rest_framework.test import APIClient
from activitystream.authentication import NO_CREDENTIALS_MESSAGE
from wagtail.models import Page
URL = 'http://testserver' + reverse('activitystream:cms-content')
URL_INCORRECT_DOMAIN = 'http://incorrect' + reverse('activitystream:cms-content')
URL_INCORRECT_PATH = 'http://testserver' + reverse('activitystream:cms-content') + 'incorrect/'
EMPTY_COLLECTION = {
'@context': 'https://www.w3.org/ns/activitystreams',
'type': 'Collection',
'orderedItems': [],
}
@pytest.fixture
def api_client():
return APIClient()
def article_attribute(activity, attribute):
return activity['object'][attribute]
def auth_sender(
key_id=settings.ACTIVITY_STREAM_ACCESS_KEY_ID,
secret_key=settings.ACTIVITY_STREAM_SECRET_ACCESS_KEY,
url=URL,
method='GET',
content='',
content_type='',
):
credentials = {
'id': key_id,
'key': secret_key,
'algorithm': 'sha256',
}
return mohawk.Sender(
credentials,
url,
method,
content=content,
content_type=content_type,
)
@pytest.mark.django_db
def test_site_pages_returned_with_authentication(api_client, en_locale):
"""If the Authorization and X-Forwarded-For headers are correct, then
the correct, and authentic, data is returned
"""
Page.objects.all().update(url_path='/great-international-home/')
sender = auth_sender()
response = api_client.get(
URL,
content_type='',
HTTP_AUTHORIZATION=sender.request_header,
HTTP_X_FORWARDED_FOR='1.2.3.4, 123.123.123.123',
)
assert response.status_code == status.HTTP_200_OK
body = response.json()
assert body['@context'] == 'https://www.w3.org/ns/activitystreams'
assert body['type'] == 'Collection'
assert len(body['orderedItems']) == 3
# sender.accept_response will raise an error if the
# inputs are not valid
sender.accept_response(
response_header=response['Server-Authorization'],
content=response.content,
content_type=response['Content-Type'],
)
with pytest.raises(mohawk.exc.MacMismatch):
sender.accept_response(
response_header=(
response['Server-Authorization'][:-12] + 'incorrect' + response['Server-Authorization'][-3:]
),
content=response.content,
content_type=response['Content-Type'],
)
with pytest.raises(mohawk.exc.BadHeaderValue):
sender.accept_response(
response_header=response['Server-Authorization'] + 'incorrect',
content=response.content,
content_type=response['Content-Type'],
)
with pytest.raises(mohawk.exc.MisComputedContentHash):
sender.accept_response(
response_header=response['Server-Authorization'],
content='incorrect',
content_type=response['Content-Type'],
)
with pytest.raises(mohawk.exc.MisComputedContentHash):
sender.accept_response(
response_header=response['Server-Authorization'],
content=response.content,
content_type='incorrect',
)
@pytest.mark.django_db
def test_authentication_fails_if_url_mismatched(api_client, en_locale):
"""Creates a Hawk header with incorrect domain"""
sender = auth_sender(url=URL_INCORRECT_DOMAIN)
response = api_client.get(
URL,
content_type='',
HTTP_AUTHORIZATION=sender.request_header,
HTTP_X_FORWARDED_FOR='1.2.3.4, 123.123.123.123',
)
assert response.status_code == status.HTTP_401_UNAUTHORIZED
"""Creates a Hawk header with incorrect path"""
sender = auth_sender(url=URL_INCORRECT_PATH)
response = api_client.get(
URL,
content_type='',
HTTP_AUTHORIZATION=sender.request_header,
HTTP_X_FORWARDED_FOR='1.2.3.4, 123.123.123.123',
)
assert response.status_code == status.HTTP_401_UNAUTHORIZED
@pytest.mark.django_db
def test_if_61_seconds_in_past_401_returned(api_client, en_locale):
"""If the Authorization header is generated 61 seconds in the past, then a
401 is returned
"""
past = timezone.now() - datetime.timedelta(seconds=61)
with freeze_time(past):
auth = auth_sender().request_header
response = api_client.get(
reverse('activitystream:cms-content'),
content_type='',
HTTP_AUTHORIZATION=auth,
HTTP_X_FORWARDED_FOR='1.2.3.4, 123.123.123.123',
)
assert response.status_code == status.HTTP_401_UNAUTHORIZED
error = {'detail': 'Incorrect authentication credentials.'}
assert response.json() == error
@pytest.mark.django_db
def test_error_for_no_authorization_field_in_header(api_client):
response = api_client.get(
URL,
content_type='',
HTTP_X_FORWARDED_FOR='1.2.3.4, 123.123.123.123',
)
assert response.status_code == 401
assert response.json()['detail'] == NO_CREDENTIALS_MESSAGE
| uktrade/directory-cms | tests/activitystream/test_views.py | test_views.py | py | 5,226 | python | en | code | 5 | github-code | 13 |
7885920830 | #!/usr/bin/env python
"""
Run representative cases with varying number of representative weeks.
"""
import json
import logging
import os
import time
from collections import OrderedDict
import matplotlib.pyplot as plt
import pandas as pd
from pkg_resources import resource_filename
import progressbar
from misc.SDH_Conference_TestCases import CaseFuture
from pyomo.opt import SolverStatus, TerminationCondition
def get_json(filepath):
with open(filepath) as filehandle:
json_data = json.loads(filehandle.read())
fulldict = json_str2int(json_data)
outdict = {}
for key, value in fulldict.items():
outdict[key] = json_str2int(value['repr_days'])
return outdict
def json_str2int(ordereddict):
"""
Transform string keys to int keys in json representation
:param ordereddict: input ordered dict to be transformed
:return:
"""
out = {}
for key, value in ordereddict.items():
try:
intkey = int(key)
out[intkey] = value
except ValueError:
pass
return out
if __name__ == '__main__':
dffull = pd.read_csv('refresult.txt', sep=' ')
logging.basicConfig(level=logging.WARNING,
format='%(asctime)s %(name)-36s %(levelname)-8s %(message)s',
datefmt='%m-%d %H:%M')
time_step = 3600
input_data = {
'1dnewsol': {
'dur': 1,
'sel': get_json(resource_filename('TimeSliceSelection',
'../Scripts/MultiCorr/ordered_solutions1_20bins_weighted.txt'))
}
}
for time_duration in input_data: # ['time_duration', 'nocorr']:
sels = input_data[time_duration]['sel']
duration_repr = input_data[time_duration]['dur']
for num in sels: # sels:
df = pd.DataFrame(
columns=['A', 'VSTC', 'VWat', 'E_backup_full', 'E_backup_repr',
'E_loss_stor_full', 'E_loss_stor_repr',
'E_curt_full',
'E_curt_repr', 'E_sol_full', 'E_sol_repr', 't_repr', 'C_elec_full', 'C_elec_repr',
't_comp'])
repr_days = sels[num]
print(len(set(int(round(i)) for i in list(repr_days.values()))))
print(sorted(set(int(round(i)) for i in list(repr_days.values()))))
bar = progressbar.ProgressBar(maxval=4 * 3 * 4, \
widgets=[
progressbar.Bar('=', '[', ']'),
' ', progressbar.Percentage()])
bar.start()
repr_model = CaseFuture.setup_opt(repr=repr_days,
time_step=time_step)
for i, VWat in enumerate([50000, 75000, 100000, 125000]):
for j, A in enumerate(
[25000, 50000, 75000, 100000]): # , 60000, 80000]:
for k, VSTC in enumerate([50000, 100000, 150000]): # , 3.85e6, 4.1e6, 4.35e6, 4.6e6]:
# print 'A:', str(A)
# print 'VWat:', str(VWat)
# print 'VSTC:', str(VSTC)
# print '========================='
# print ''
# Solve representative weeks
start_full = time.clock()
repr_model.change_param(node='SolarArray', comp='solar',
param='area', val=A)
repr_model.change_param(node='SolarArray', comp='tank',
param='volume', val=VSTC)
repr_model.change_param(node='WaterscheiGarden',
comp='tank', param='volume',
val=VWat)
repr_model.change_param(node='Production',
comp='backup', param='ramp',
val=0)
repr_model.change_param(node='Production',
comp='backup',
param='ramp_cost', val=0)
repr_model.compile('20140101')
repr_model.set_objective('cost')
compilation_time = time.clock() - start_full
energy_sol_repr = None
energy_backup_repr = None
energy_stor_loss_repr = None
energy_curt_repr = None
energy_net_loss_repr = None
energy_net_pump_repr = None
cost_elec_repr = None
start = time.clock()
repr_model.solve(tee=False, solver='gurobi',
warmstart=True)
repr_solution_and_comm = time.clock() - start
if (
repr_model.results.solver.status == SolverStatus.ok) and not (
repr_model.results.solver.termination_condition == TerminationCondition.infeasible):
energy_backup_repr = CaseFuture.get_backup_energy(
repr_model)
energy_stor_loss_repr = CaseFuture.get_stor_loss(
repr_model)
energy_curt_repr = CaseFuture.get_curt_energy(
repr_model)
energy_sol_repr = CaseFuture.get_sol_energy(
repr_model)
energy_net_loss_repr = CaseFuture.get_network_loss(
repr_model)
energy_net_pump_repr = CaseFuture.get_network_pumping(
repr_model)
energy_demand_repr = CaseFuture.get_demand_energy(
repr_model)
cost_elec_repr = repr_model.get_objective()
result_full = dffull[
(dffull['A'] == A) & (dffull['VSTC'] == VSTC) & (
dffull['VWat'] == VWat)]
# full_model = SolarPanelSingleNode.fullyear(storVol=V,
# solArea=A,
# backupPow=P)
# if SolarPanelSingleNode.solve_fullyear(full_model) == 0:
# energy_backup_full = SolarPanelSingleNode.get_backup_energy(
# full_model)
# energy_stor_loss_full = SolarPanelSingleNode.get_stor_loss(
# full_model)
# energy_curt_full = SolarPanelSingleNode.get_curt_energy(
# full_model)
# energy_sol_full = \
# SolarPanelSingleNode.get_sol_energy(full_model)
# fig2 = SolarPanelSingleNode.plot_single_node(
# full_model)
# fig2.savefig(os.path.join('comparison', time_duration,
# '{}w_{}A_{}V_{}P_full.png'.format(
# num, A, V, P)),
# dpi=100, figsize=(8, 6))
# print 'Full time:', str(repr_solution_and_comm + compilation_time)
df = df.append({'A': A, 'VSTC': VSTC, 'VWat': VWat,
'E_backup_full': float(
result_full['E_backup_full']),
'E_backup_repr': energy_backup_repr,
'E_loss_stor_full': float(
result_full['E_loss_stor_full']),
'E_loss_stor_repr': energy_stor_loss_repr,
'E_curt_full': float(
result_full['E_curt_full']),
'E_curt_repr': energy_curt_repr,
'E_sol_full': float(
result_full['E_sol_full']),
'E_sol_repr': energy_sol_repr,
'E_net_loss_full': float(
result_full['E_net_loss_full']),
'E_net_loss_repr': energy_net_loss_repr,
'E_net_pump_full': float(
result_full['E_net_pump_full']),
'E_net_pump_repr': energy_net_pump_repr,
'E_demand_full': float(
result_full['E_demand_full']),
'E_demand_repr': energy_demand_repr,
'C_elec_full': float(result_full['C_elec_full']),
'C_elec_repr': cost_elec_repr,
't_repr': repr_solution_and_comm + compilation_time,
't_comp': compilation_time},
ignore_index=True)
path = os.path.join('results_ordered', time_duration)
if not os.path.isdir(path):
os.makedirs(path)
df.to_csv(
os.path.join(path, 'result_ordered{}p.txt'.format(num)),
sep=' ')
bar.update(12 * i + 3 * j + k + 1)
bar.finish()
print(df)
# df.to_csv('result6w.txt', sep=' ')
| energyville/modesto | misc/RepresentativePeriodsMILP/runOpt.py | runOpt.py | py | 10,353 | python | en | code | 13 | github-code | 13 |
41162987976 | print('-'*20)
print('Sequencia de Fibonacci')
print('-'*20)
n = int(input('Quantos numeros deseja ver: '))
t1 = 0
t2 = 1
print('{} - {} '.format(t1, t2), end='')
c = 3
while c <= n:
t3 = t1 + t2
print(' - {}'.format(t3), end='')
t1 = t2
t2 = t3
c += 1
print('FIM!')
| Pauloa90/Python | ex063.py | ex063.py | py | 288 | python | en | code | 0 | github-code | 13 |
42704016445 | import time
from usrf_grove import USRF
from machine import Pin
Sensor = USRF(pin = 5, echo_timeout_us = 1000000)
Relay = Pin(15, Pin.OUT)
while (True):
time.sleep_ms(5)
Dist = Sensor.distance_cm()
if Dist < 10:
Relay.on()
time.sleep(2)
else:
Relay.off()
| ffich/Ganimede | 10_Python/020_Sensors/30_UltrasonicRF/us_rf.py | us_rf.py | py | 301 | python | en | code | 1 | github-code | 13 |
74176773139 | # -*- coding: utf-8 -*-
"""
Created on Wed Sep 26 09:15:18 2018
@author: gregz
"""
import numpy as np
from astropy.io import fits
filenames = [line.rstrip('\n').split() for line in open('/work/03730/gregz/maverick/test_2.dat', 'r')]
ext = 'extracted_spectrum'
fitslist = []
cnt = 0
for filename in filenames:
F = fits.open(filename[0])
if cnt == 0:
f = fits.PrimaryHDU(F[ext].data)
f.header['EXTNAME']=F[0].header['OBJECT']
else:
f = fits.ImageHDU(F[ext].data)
f.header['EXTNAME']=F[0].header['OBJECT']
fitslist.append(f)
cnt += 1
fits.HDUList(fitslist).writeto('response_orange.fits', overwrite=True)
| grzeimann/Panacea | strip_ext_multi.py | strip_ext_multi.py | py | 660 | python | en | code | 8 | github-code | 13 |
22101876812 | int1 = 100
counter=0
while (int1 >= 100 and int1 < 1000):
if (int1 % 17 == 0):
int1+=1
print(int1)
counter+=1
else:
int1+=1
print("\n",counter, "3 digit numbers are divisable by 17") | JLevins189/Python | Labs/Lab2/Ex5Q7.py | Ex5Q7.py | py | 229 | python | en | code | 0 | github-code | 13 |
36275454447 | N=int(input())
s={}
for _ in range(N):
si=input()
if si in s:
s[si]+=1
else:
s[si]=1
M=int(input())
t={}
for _ in range(M):
ti=input()
if ti in t:
t[ti]+=1
else:
t[ti]=1
ans=0
for word in s.keys():
if word in t:
ans = max(ans, s[word]-t[word])
else:
ans = max(ans, s[word])
print(ans) | syagi/atcoder_training | ant/2-4/2-4-2-2_abc091b.py | 2-4-2-2_abc091b.py | py | 368 | python | en | code | 0 | github-code | 13 |
9235305448 | import sys
import pandas as pd
def main():
if len(sys.argv) is not 4:
print("argv: all_csv_path pos_csv_path neg_csv_path")
sys.exit(1)
all_csv_path = sys.argv[1]
pos_csv_path = sys.argv[2]
neg_csv_path = sys.argv[3]
df_all = pd.read_csv(all_csv_path)
df_pos = pd.read_csv(pos_csv_path)
output_size = len(df_pos) * 7 // 3
df_neg = df_all[~df_all.ACCNO.isin(df_pos.ACCNO)].sample(frac=1)
df_neg[:output_size].to_csv(neg_csv_path, index=False)
if __name__ == "__main__":
main()
| tsaiid/femh-dicom | generate-negative-csv.py | generate-negative-csv.py | py | 539 | python | en | code | 2 | github-code | 13 |
17056480534 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class MybankCreditSceneprodDataUploadModel(object):
def __init__(self):
self._app_seqno = None
self._data_config = None
self._data_content = None
self._org_code = None
self._product_code = None
@property
def app_seqno(self):
return self._app_seqno
@app_seqno.setter
def app_seqno(self, value):
self._app_seqno = value
@property
def data_config(self):
return self._data_config
@data_config.setter
def data_config(self, value):
self._data_config = value
@property
def data_content(self):
return self._data_content
@data_content.setter
def data_content(self, value):
self._data_content = value
@property
def org_code(self):
return self._org_code
@org_code.setter
def org_code(self, value):
self._org_code = value
@property
def product_code(self):
return self._product_code
@product_code.setter
def product_code(self, value):
self._product_code = value
def to_alipay_dict(self):
params = dict()
if self.app_seqno:
if hasattr(self.app_seqno, 'to_alipay_dict'):
params['app_seqno'] = self.app_seqno.to_alipay_dict()
else:
params['app_seqno'] = self.app_seqno
if self.data_config:
if hasattr(self.data_config, 'to_alipay_dict'):
params['data_config'] = self.data_config.to_alipay_dict()
else:
params['data_config'] = self.data_config
if self.data_content:
if hasattr(self.data_content, 'to_alipay_dict'):
params['data_content'] = self.data_content.to_alipay_dict()
else:
params['data_content'] = self.data_content
if self.org_code:
if hasattr(self.org_code, 'to_alipay_dict'):
params['org_code'] = self.org_code.to_alipay_dict()
else:
params['org_code'] = self.org_code
if self.product_code:
if hasattr(self.product_code, 'to_alipay_dict'):
params['product_code'] = self.product_code.to_alipay_dict()
else:
params['product_code'] = self.product_code
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = MybankCreditSceneprodDataUploadModel()
if 'app_seqno' in d:
o.app_seqno = d['app_seqno']
if 'data_config' in d:
o.data_config = d['data_config']
if 'data_content' in d:
o.data_content = d['data_content']
if 'org_code' in d:
o.org_code = d['org_code']
if 'product_code' in d:
o.product_code = d['product_code']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/MybankCreditSceneprodDataUploadModel.py | MybankCreditSceneprodDataUploadModel.py | py | 2,955 | python | en | code | 241 | github-code | 13 |
4791202068 | #!/usr/bin/python
# -*- coding: utf-8 -*-
class Solution(object):
def __init__(self, word1: str, word2: str) -> None:
self.word1 = word1
self.word2 = word2
self.minStep = 0
self.data = [None for _ in range(len(word1))]
def findSmallestModification(self):
"""
word1转换成word2的最少操作次数,无需具体操作步骤。
插入一个字符,删除一个字符,替换一个字符.
!只对一个字符进行操作
"""
if self.word1 == self.word2:
self.minStep = -1
else:
start = 0
for index, value in enumerate(self.word2):
res = self.word1.find(value, start)
if res != -1:
start = res
self.data[start] = value
# print(self.data)
self.minStep = self.data.count(None)
def isNum(self) -> int:
self.findSmallestModification()
if self.minStep == -1:
return 0
return self.minStep
if __name__ == "__main__":
word1 = "horse"
word2 = "ros"
demo = Solution(word1, word2)
res = demo.isNum()
print(res)
| LeroyK111/BasicAlgorithmSet | 代码实现算法/edit-distance.py | edit-distance.py | py | 1,246 | python | en | code | 1 | github-code | 13 |
14239030837 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import thumt.utils as utils
from thumt.modules.module import Module
from thumt.modules.dmb.affine import Affine
class FeedForward(Module):
def __init__(self, input_size, hidden_size, output_size=None, dropout=0.0,
n=1, shared_private=False, quantization=False,
input_threshold=6.0, weight_threshold=1.0,
name="feed_forward"):
super(FeedForward, self).__init__(name=name)
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size or input_size
self.dropout = dropout
self.n = n
with utils.scope(name):
self.input_transform = Affine(input_size, hidden_size, n=n,
shared_private=shared_private,
quantization=quantization,
input_threshold=input_threshold,
weight_threshold=weight_threshold,
name="input_transform")
self.output_transform = Affine(hidden_size, self.output_size, n=n,
shared_private=shared_private,
quantization=quantization,
input_threshold=input_threshold,
weight_threshold=weight_threshold,
name="output_transform")
self.reset_parameters()
def forward(self, x, partitions):
x = self.input_transform(x, partitions)
h = nn.functional.relu(x)
h = nn.functional.dropout(h, self.dropout, self.training)
return self.output_transform(h, partitions)
def reset_parameters(self):
if self.n == 1:
nn.init.xavier_uniform_(self.input_transform.weight)
nn.init.xavier_uniform_(self.output_transform.weight)
nn.init.constant_(self.input_transform.bias, 0.0)
nn.init.constant_(self.output_transform.bias, 0.0)
else:
for i in range(self.n):
nn.init.xavier_uniform_(
self.input_transform.branches[i].weight)
nn.init.xavier_uniform_(
self.output_transform.branches[i].weight)
nn.init.constant_(
self.input_transform.branches[i].bias, 0.0)
nn.init.constant_(
self.output_transform.branches[i].bias, 0.0)
| THUNLP-MT/Transformer-DMB | thumt/modules/dmb/feed_forward.py | feed_forward.py | py | 2,714 | python | en | code | 1 | github-code | 13 |
73913807696 | #!/usr/bin/python3
from __future__ import print_function
from dronekit import connect, VehicleMode
import numpy as np
import cv2
import cv2.aruco as aruco
import sys, time, math, _thread, argparse
connection_string = "/dev/ttyACM0"
baud_rate = 57600
#1678 -> 10 deg
#1507 -> Neutral
#1186 -> -30 deg
#-------------(deg) 3 0 -5 -10 -15 -20 -25 -30
# simulate_angle = [50.84, 48.48, 44.57, 40.43, 36.66, 32.11, 28.41, 26.18]
# radio_in_elevator = [1558, 1507, 1451, 1398, 1345, 1292, 1239, 1186]
# delta_angle = [3, 0, -5, -10, -15, -20, -25, -30]
#[1924,1500,900]
# simulate_angle = [50.84, 48.48, 44.57, 40.43, 36.66, 32.11, 28.41]
simulate_angle = [52.5, 49.82, 45.51, 40.33, 35.11, 28.86, 23.81]
radio_in_elevator = [1467, 1478, 1602, 1683, 1764, 1838, 1924] #ch2in radio calibration
delta_angle = [3, 0, -5, -10, -15, -20, -25]
timer_exit = 10
print('Connecting to Vehicle on: %s' %connection_string)
vehicle = connect(connection_string, baud=baud_rate, wait_ready=True)
vehicle.wait_ready('autopilot_version')
def deepstall(check_deepstall):
print("Thread-2")
time.sleep(1)
#if check_deepstall:
while True:
# print("Checking Guided Mode...")
# print(" Mode: %s" % vehicle.mode.name)
# if vehicle.mode.name=='STABILIZE':
print("ch7: ", vehicle.channels['7'])
if int(vehicle.channels['7']) > 1514:
print("Deep stall mode")
# print("starting deepstall")
# time.sleep(0.5)
# print("3")
# time.sleep(1)
# print("2")
# time.sleep(1)
# print("1")
# time.sleep(2.5)
pitch_down_start_time = time.time()
while True:
pitch_angle = math.degrees(vehicle.attitude.pitch)
pitch_down_now_time = time.time()
print("pitch angle: ", pitch_angle)
if pitch_down_now_time - pitch_down_start_time >= 5:
print("time out pitch down")
break
if pitch_angle >= -12:
print("Elevator down")
vehicle.channels.overrides['2'] = 1312
else:
vehicle.channels.overrides['2'] = 1500
print("Pitch angle has been adjusted to -15[deg]")
break
time.sleep(0.5)
time.sleep(3)
pitch_up_start_time = time.time()
while True:
pitch_angle = math.degrees(vehicle.attitude.pitch)
pitch_up_now_time = time.time()
print("pitch angle: ", pitch_angle)
if pitch_up_now_time - pitch_up_start_time >= 5:
print("time out pitch up")
break
if pitch_angle <= 27:
print("Elevator up")
vehicle.channels.overrides['2'] = 1924
else:
vehicle.channels.overrides['2'] = 1500
print("Pitch angle has been adjusted to 60[deg]")
break
# time.sleep(0.5)
# print("Deep stall")
# vehicle.channels.overrides['2'] = radio_in_elevator[6] #deepstall
# time.sleep(1)
break
#check_deepstall = False
def timer(cap, out):
print("Thread-1")
print("start time")
start = time.time()
while True:
ret, frame = cap.read()
out.write(frame)
cv2.imshow('frame', frame)
now = time.time()
key = cv2.waitKey(1) & 0xFF
# if key == ord('q'):
time_ago = now - start
current_alttitude = vehicle.location.global_relative_frame.alt
if (time_ago >= 60*10) or (time_ago >= 60 and current_alttitude <= 1):
time.sleep(3)
cap.release()
out.release()
cv2.destroyAllWindows()
break
try:
parser = argparse.ArgumentParser()
parser.add_argument("number_of_run")
args = parser.parse_args()
video_filename = "../../../Videos/ground/12-10-64_sikan_test_" + args.number_of_run + ".avi"
check_deepstall = True
cap = cv2.VideoCapture(0)
if (cap.isOpened() == False):
print("Error reading video file")
frame_width = int(cap.get(3))
frame_height = int(cap.get(4))
size = (frame_width, frame_height)
out = cv2.VideoWriter(video_filename,
cv2.VideoWriter_fourcc(*'MJPG'),
10, size)
_thread.start_new_thread( timer, (cap, out, ))
_thread.start_new_thread( deepstall, (check_deepstall, ))
except:
print ("Error: unable to start thread")
while 1:
pass
| ekaratst/deep-stall-landing-using-image-processing | fixed-wing/experiment/test/run-deepstall-bytime.py | run-deepstall-bytime.py | py | 3,957 | python | en | code | 0 | github-code | 13 |
26246857271 | import json
import sqlite3
from flask import Flask, jsonify, request, Response
from flask_cors import CORS
app = Flask(__name__)
CORS(app)
todos = [{
"id": 1,
"title": 'todo1',
"completed": True
}, {
"id": 2,
"title": 'todo2',
"completed": False
}]
dbname = "data.db"
def to_response(data, message, code):
response = {
"result": data,
"error": None,
"message": message
}
return Response(json.dumps(response), status=code, mimetype="application/json")
def to_error_response(error, message, code=500):
response = {
"result": None,
"error": error,
"message": message
}
return Response(json.dumps(response), status=code, mimetype="application/json")
@app.route('/')
def hello_world():
return 'Hello World!'
@app.route('/api/search', methods=['GET'])
def get_match_list():
where = "where 1"
list = []
args = request.args
args.to_dict()
for key in args:
if args.get(key) != "":
where = where + " and " + key + "=" + repr(args.get(key))
query = "select * from `2019_match_data` " + where + " limit 100"
print(query)
with sqlite3.connect(dbname) as conn:
conn.row_factory = access_fields_by_name
for row in conn.execute(query).fetchall():
list.append({
"id": row['id'],
"gameid": row['gameid'],
"side": row['side'],
"url": row['url'],
"league": row['league'],
'position': row['position'],
'playername': row['playername'],
'teamname': row['teamname'],
'champion': row['champion'],
'kda': str(row['kills']) + "/" + str(row['deaths']) + "/" + str(row["assists"]),
'result': row['result']
})
return to_response(list, "todo list loaded", 200)
def quote(string):
return
def access_fields_by_name(cursor, row):
""" `row_factory` function to access fields by name. """
header = [field[0].lower() for field in cursor.description]
return {name: value for name, value in zip(header, row)}
if __name__ == '__main__':
app.run()
| Faye6155/Final | app/app.py | app.py | py | 2,219 | python | en | code | 0 | github-code | 13 |
36580374013 | from argparse import ArgumentParser
from src.encoder.binary_arithmetic_encoder import BinaryArithmeticEncoder
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument('--file', type=str, help="Path to file to encode")
parser.add_argument('--probabilities', type=str, help='Path to file with symbols probabilities')
args = parser.parse_args()
calculate_probabilities = True
probabilities = {}
if args.probabilities:
with open(args.probabilities, 'r') as prob_file:
line = prob_file.readline()
while line:
line_parts = line.split()
probabilities[line_parts[0]] = float(line_parts[1])
line = prob_file.readline()
print(f"Read probabilities: {probabilities}")
calculate_probabilities = False
encoder = BinaryArithmeticEncoder(probabilities)
with open(args.file, 'r') as target_file:
text = target_file.read()
print(f"Read text: {text}")
code = encoder.encode(text, calculate_prob=calculate_probabilities)
print(f"Code: {code}")
| Dymasik/BinaryArithmeticEncoder | src/main.py | main.py | py | 1,111 | python | en | code | 0 | github-code | 13 |
17669049940 | # Idea is to use a monotonic queue with stores numbers in decreasing order. We can use this to our advantage
# as the max element for every sub list will the first element of the queue however we will nee to maintain this
# state of the queue. Whenever we add a num to the queue we need to check if that num is less than the num on
# the rightmost index of the queue. If not then we pop until we find a num greatre or queue is empty and then
# add the num. When the sub list size is k we need to add the leftmost num in queue to the result and increament
# left pointer. When the left pointer is not equal to the leftmost num of queue wepop from left.
# Use the same logic up store indexes in queue.
# TC = O(N) and SC = O(N)
class Solution:
def maxSlidingWindow(self, nums: List[int], k: int) -> List[int]:
window_queue = deque()
left, right, res = 0, 0, []
while right < len(nums):
# Add nums to queue monotonically.
while window_queue and nums[right] > nums[window_queue[-1]]:
window_queue.pop()
window_queue.append(right)
# When l is shifted we need to rearrange queue if the l was the max ie at index 0.
if left > window_queue[0]:
window_queue.popleft()
if right + 1 >= k:
res.append(nums[window_queue[0]])
left += 1
right += 1
return res | SharmaManjul/DS-Algo | LeetCode/Blind75/SlidingWindow/slidingWindowMaximum.py | slidingWindowMaximum.py | py | 1,432 | python | en | code | 0 | github-code | 13 |
35540506180 | # This script uses Pandas and Bokeh to provide a line graph based off large amounts of data.
# Data is read in through a CSV file and plots X vs Y based off column titles.
#
# To install Pandas: $ pip3 install pandas
# To install Bokeh: $ pip3 install bokeh
# importing bokeh and pandas
from bokeh.plotting import figure
from bokeh.io import output_file, show
import pandas
# Define constants here
CSV_INPUT = "data.csv"
HTML_OUTPUT = "MyChart.html"
X_AXIS_DATA_COLUMN = "Year"
Y_AXIS_DATA_COLUMN = "Engineering"
# prepare some data from csv
df = pandas.read_csv(CSV_INPUT)
x = df[X_AXIS_DATA_COLUMN]
y = df[Y_AXIS_DATA_COLUMN]
# data is read in like this, as a list...
# x=[1,2,3,4,5]
# y=[6,7,8,9,10]
# prepare the output file
output_file(HTML_OUTPUT)
# create a figure object
f = figure()
# create line plot
f.line(x, y)
# triangle glyphs
# f.triangle(x,y)
# circle glyphs
# f.circle(x,y)
show(f)
| ejrach/my-python-utilities | SpreadsheetUtilities/DataVisualizationLine/app.py | app.py | py | 916 | python | en | code | 0 | github-code | 13 |
13955892385 | # export TF_CPP_MIN_LOG_LEVEL=2 (Ignore warnings)
# 25 Sep 2017
# Daniel Kho
# Seungmin Lee
# GDO number
# initial variable: w&b
# Learning rate
# Sample size
# ====> shape of regression
from __future__ import print_function
from decimal import *
from tensorflow.contrib.learn.python import SKCompat
import time
import io
import numpy as np
import random
import matplotlib
matplotlib.use('Agg') #Generate Image without window appear
import matplotlib.pyplot as plt
import tensorflow as tf
start = time.time() # Record performance time
filename_queue = tf.train.string_input_producer(["RFID_bathdata_random10000.csv"])
reader = tf.TextLineReader()
key, value = reader.read(filename_queue)
# Default values, in case of empty columns. Also specifies the type of the
# decoded result.
# ID BatchMainID UserID ProcCode ProcSeqnum Quantity Good Number Time Location newTime
record_defaults = [tf.constant([1], dtype = tf.float32),
tf.constant([1], dtype = tf.float32),
tf.constant([1], dtype = tf.float32),
tf.constant([1], dtype = tf.float32),
tf.constant([1], dtype = tf.float32),
tf.constant([1], dtype = tf.float32),
tf.constant([1], dtype = tf.float32),
tf.constant([1], dtype = tf.float32),
tf.constant([1], dtype = tf.float32),
tf.constant([1], dtype = tf.float32)]
# Convert CSV records to tensors. Each column maps to one tensor.
ID, BatchMainID, UserID, ProcCode, ProcSeqnum, Quantity, Good_Number, Time_Original, Location, Time_Integer = tf.decode_csv(value, record_defaults=record_defaults)
features = tf.stack([ID, BatchMainID, UserID, ProcCode, ProcSeqnum, Quantity, Good_Number, Time_Original, Location, Time_Integer])
def gen_plot_all(cluster, centroid_values, num_clusters):
"""Create a pyplot plot and save to buffer."""
plt.figure()
plt.title('Number of Processes vs Time takes to finish Batches')
plt.xlabel('Number of Processes')
plt.ylabel('Time (days)')
plt.legend()
plt.show()
colour=['ro','bo','go','ko','mo']
colour_centroid=['rx','bx','gx','kx','mx']
for i in xrange(num_clusters):
plt.plot(centroid_values[i][0],centroid_values[i][1], colour_centroid[ i%(len(colour)) ], markersize=8)
xaxis_all=[]
yaxis_all=[]
for j in xrange(num_clusters):
xaxis, yaxis = cluster_reshape(cluster,num_clusters,j)
xaxis_all.append(xaxis)
yaxis_all.append(yaxis)
plt.plot(xaxis, yaxis, colour[j])
print('Plot',j,'cluster of colour',colour[j])
plt.show()
buf = io.BytesIO()
plt.savefig(buf, format='png')
buf.seek(0)
return buf, xaxis_all, yaxis_all
def gen_plot(xaxis, yaxis, clusterNo):
"""Create a pyplot plot and save to buffer."""
plt.figure()
plt.title('Number of Processes vs Time takes to finish Batches')
plt.xlabel('Number of Processes')
plt.ylabel('Time (days)')
plt.legend()
plt.show()
colour=['ro','bo','go','ko','mo']
colour_reg=['r-','b-','g-','k-','m-']
xaxis, yaxis = zip( *sorted(zip(xaxis, yaxis)) )
plt.plot(xaxis, yaxis, colour[ clusterNo ])
print("Each cluster length",len(xaxis),"on ",clusterNo)
# Gradient Decent Optimization method
w = tf.Variable(tf.random_uniform([1],0, 50), name = 'weight')
b = tf.Variable(tf.random_normal([1],-10,10), name='bias1')
xaxis = np.asarray(xaxis,dtype=np.float32)
if (clusterNo == 0):
#y = w*(tf.reciprocal(xaxis)) + b
y = w*(tf.log(xaxis)) + b
if (clusterNo == 1):
plt.plot([5,5],[0,100],colour_reg[ clusterNo ])
y = w*(tf.reciprocal(xaxis)) + b
if (clusterNo == 2):
y = w*(tf.reciprocal(xaxis)) + b
if (clusterNo == 3):
y = w*(tf.reciprocal(xaxis)) + b
if (clusterNo == 4):
y = w*(tf.reciprocal(xaxis)) + b
loss = tf.reduce_mean(tf.square(y - yaxis), name='cost')
optimizer = tf.train.GradientDescentOptimizer(0.015)
train = optimizer.minimize(loss)
# Initialisation
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
learningNumber = 101
#if (clusterNo != 1):
for step in xrange(learningNumber):
sess.run(train)
if ( (step % 10) == 0):
print(step,"w = ",sess.run(w), "b = ", sess.run(b), "Loss = ", sess.run(loss))
yOut = sess.run(y)
#if (clusterNo != 1):
plt.plot(xaxis, yOut, colour_reg[ clusterNo ])
plt.axis([0,max(xaxis)+5,0,max(yaxis)+5])
plt.show()
buf = io.BytesIO()
plt.savefig(buf, format='png')
buf.seek(0)
return buf
def gen_plot_eval(xaxis, yaxis, clusterNo, w, b):
"""Create a pyplot plot and save to buffer."""
plt.figure()
plt.title('Number of Processes vs Time takes to finish Batches')
plt.xlabel('Number of Processes')
plt.ylabel('Time (days)')
plt.legend()
plt.show()
colour=['ro','bo','go','ko','mo']
colour_reg=['r-','b-','g-','k-','m-']
xaxis, yaxis = zip( *sorted(zip(xaxis, yaxis)) )
plt.plot(xaxis, yaxis, colour[ clusterNo ])
print("Each cluster length",len(xaxis),"on ",clusterNo)
xaxis = np.asarray(xaxis,dtype=np.float32)
if (clusterNo == 0):
#y = w*(tf.reciprocal(xaxis)) + b
y = w*(tf.log(xaxis)) + b
if (clusterNo == 1):
plt.plot([5,5],[0,100],colour_reg[ clusterNo ])
y = w*(tf.reciprocal(xaxis)) + b
if (clusterNo == 2):
y = w*(tf.reciprocal(xaxis)) + b
if (clusterNo == 3):
y = w*(tf.reciprocal(xaxis)) + b
if (clusterNo == 4):
y = w*(tf.reciprocal(xaxis)) + b
yOut = sess.run(y)
if (clusterNo != 1):
plt.plot(xaxis, yOut, colour_reg[ clusterNo ])
plt.axis([0,max(xaxis)+5,0,max(yaxis)+5])
plt.show()
buf = io.BytesIO()
plt.savefig(buf, format='png')
buf.seek(0)
return buf
def clustering(xaxis, yaxis, assignment_values, num_clusters):
cluster = []
for i in xrange(num_clusters):
temp_array = []
for j in xrange(len(xaxis)):
if(assignment_values[j] == i):
temp_array.append( [ xaxis[j], yaxis[j] ] )
cluster.append(temp_array)
return cluster
def cluster_reshape(cluster, num_clusters, selected_clusterNo):
temp = cluster[selected_clusterNo]
xaxis = []
yaxis = []
for i in xrange(len(temp)):
xaxis.append(temp[i][0])
yaxis.append(temp[i][1])
return xaxis, yaxis
sess = tf.InteractiveSession()
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
BatchMainIDArray=[]
timeArray=[]
locationArray=[]
IDArray=[]
rowsArray=[]
loop=10000
# try 50 250 500 etc
# note computational cost for each sets
# mention data size too large. How to reduce comp. cost? Not fully utilized whole data size
for i in range(loop):
if ( (i % 500) == 0):
print(i,'read data over',loop)
rows, IDNumber, BatchMainIDNumber, Time, location = sess.run([features, ID, BatchMainID, Time_Integer, Location])
rowsArray = np.append(rowsArray, rows)
IDArray = np.append(IDArray, IDNumber)
BatchMainIDArray = np.append(BatchMainIDArray, BatchMainIDNumber)
timeArray = np.append(timeArray, Time)
locationArray = np.append(locationArray, location)
BatchMainIDArray = BatchMainIDArray.tolist()
timeArray = timeArray.tolist()
cmpBatchMainID = None
calledBatchMainID = []
noofProcArray=[]
diffTime=[]
for i,j in enumerate(BatchMainIDArray):
if ( (i % 500) == 0):
print(i,'classified data over',loop)
cmpBatchMainID = j
sameBatchMainID = []
corrspTime=[]
if (cmpBatchMainID not in calledBatchMainID):
calledBatchMainID.append(j)
for k,l in enumerate(BatchMainIDArray):
if(cmpBatchMainID == l):
sameBatchMainID.append(k)
corrspTime.append(timeArray[k])
corrspTime.sort()
noofProcArray.append(len(sameBatchMainID))
diffTime.append(corrspTime[len(corrspTime) - 1] - corrspTime[0])
print("Classified Data Length",len(noofProcArray))
# K-mean
num_clusters = 5
vector_values = []
for i in range(len(noofProcArray)):
vector_values.append([noofProcArray[i],diffTime[i]])
# K means for evaluation
centroid_user_values = [[20.46451569, 11.56920528],
[5, 44.69249344],
[8.88530445, 8.81974125],
[3.50498343, 0.81601208],
[6.15176725, 3.04871345]]
w_values = [13.24903679,23.40257263,42.70797348,1.66092837,12.11023426]
b_values = [-28.31702423,38.09459686,2.82725811,0.34326613,0.47114417]
vectors = tf.constant(vector_values)
#centroids = tf.Variable(tf.slice(tf.random_shuffle(vectors,seed=1),[0,0],[num_clusters,-1])) #10000,50000first seed=1, 10000,50000last seed=8, 10000random seed=8, 50000random seed=11 / first samples only
centroids = tf.constant(centroid_user_values)
expanded_vectors = tf.expand_dims(vectors, 0)
expanded_centroids = tf.expand_dims(centroids, 1)
distances = tf.reduce_sum(tf.square(tf.subtract(expanded_vectors, expanded_centroids)), 2)
assignments = tf.argmin(distances, 0)
#means = tf.concat([ # First samples only
# tf.reduce_mean(
# tf.gather(vectors,
# tf.reshape(
# tf.where(
# tf.equal(assignments, c)
# ),[1,-1])
# ),reduction_indices=[1])
# for c in xrange(num_clusters)], 0)
#update_centroids = tf.assign(centroids, means) # First samples only
# Gradient Decent Optimization method was below here:
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
# K mean
num_k_steps = 10
#for step in xrange(num_k_steps):
#_, centroid_values, assignment_values = sess.run([update_centroids,centroids,assignments]) # First samples only
centroid_values, assignment_values = sess.run([centroids,assignments])
print("Assignmnet_eval")
print(assignment_values)
print("Centroids")
print(centroid_values)
# coord
coord.request_stop()
coord.join(threads)
# Clustering
cluster = clustering(noofProcArray, diffTime, assignment_values, num_clusters)
# Prepare the plot with Gradient Decent optimizaion method
plot_buf, xaxis_all, yaxis_all = gen_plot_all(cluster, centroid_values, num_clusters)
# Prediction
predictNo = 100
predict_NoProc = np.random.randint( 1, max(max(xaxis_all)), size = predictNo )
print("Predict Input")
print(predict_NoProc)
predictCluster = []
predictPercentage = []
for i in xrange(predictNo):
predictCountArray = []
for j in xrange(num_clusters):
predictCount = 0
for k in xrange( len(xaxis_all[j]) ):
if (xaxis_all[j][k] == predict_NoProc[i]):
predictCount = predictCount + 1
predictCountArray.append(predictCount)
if ( max(predictCountArray) == 0):
predictCluster.append(-1)
predictPercentage.append(0.0)
else:
predictCluster.append( predictCountArray.index( max(predictCountArray) ) )
predictPercentage.append( round( 100.0*max(predictCountArray)/sum(predictCountArray), 2) )
for i in xrange(predictNo): # Find approx prediction near -1s
approx=2
if (predictCluster[i]==-1):
predictCountArray2 = [0]*num_clusters
for j in range(predict_NoProc[i]-approx, predict_NoProc[i]+approx+1):
for k in xrange(num_clusters):
predictCount = 0
for n in xrange( len(xaxis_all[k]) ):
if (xaxis_all[k][n] == j):
predictCount = predictCount + 1
predictCountArray2[k] = predictCountArray2[k] + predictCount
if ( max(predictCountArray2) != 0):
predictCluster[i] = predictCountArray2.index( max(predictCountArray2) )
predictPercentage[i] = round( 100.0*max(predictCountArray2)/sum(predictCountArray2), 2)
print("Predict Output")
print(predictCluster)
print("Predict Percentage")
print(predictPercentage)
# Continue ploting
subplot=[]
for i in xrange(num_clusters):
print(len(xaxis_all[i]))
if ( (len(xaxis_all[i])!=0) and (len(yaxis_all[i])!=0) ):
#subplot.append( gen_plot(xaxis_all[i], yaxis_all[i], i) ) # First samples only
subplot.append( gen_plot_eval(xaxis_all[i], yaxis_all[i], i, w_values[i], b_values[i]) )
else:
#subplot.append( gen_plot(0, 0, i) ) # First samples only
subplot.append( gen_plot_eval([0,0], [0.0,0.0], i, w_values[i], b_values[i]) )
# Convert PNG buffer to TF image
image = tf.image.decode_png(plot_buf.getvalue(), channels=4)
subImage_temp=[]
for i in xrange(num_clusters):
subImage_temp.append( tf.image.decode_png(subplot[i].getvalue(), channels=4) )
# Add the batch dimension
image = tf.expand_dims(image, 0)
subImage=[]
for i in xrange(num_clusters):
subImage.append( tf.expand_dims(subImage_temp[i], 0) )
# Add image summary
summary_op = tf.summary.image("Overall Plot", image, max_outputs=3)
summary_op_sub = []
for i in xrange(num_clusters):
title = "Each Cluster Plot"+str(i)
summary_op_sub.append( tf.summary.image(title, subImage[i], max_outputs=3) )
# Run
summary = tf.summary.merge_all()
summary = sess.run(summary_op)
summary_sub=[]
for i in xrange(num_clusters):
summary_sub.append( sess.run(summary_op_sub[i]) )
# Write summary
writer = tf.summary.FileWriter('/notebooks/logs', sess.graph)
writer.add_summary(summary)
for i in xrange(num_clusters):
writer.add_summary(summary_sub[i])
end = time.time()
print("Elapsed time ",end-start, "seconds")
# export TF_CPP_MIN_LOG_LEVEL=2 (Ignore warnings)
import tensorflow as tf
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import io
def gen_plot(xaxis, yaxis):
"""Create a pyplot plot and save to buffer."""
plt.figure()
plt.title('Clench Force vs Time')
plt.xlabel('Time (s)')
plt.ylabel('Clench Force (kg)')
plt.legend()
plt.show()
plt.plot(xaxis, yaxis)
# Gradient Decent Optimization method
w = tf.Variable(tf.random_uniform([1],0, 50), name = 'weight')
b = tf.Variable(tf.random_normal([1],-10,10), name = 'constant')
xaxis = np.asarray(xaxis,dtype=np.float32)
print(xaxis)
y = w * xaxis + b
loss = tf.reduce_mean(tf.square(y - forceArray), name='cost')
optimizer = tf.train.GradientDescentOptimizer(0.015)
train = optimizer.minimize(loss)
# Initialisation
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
learningNumber = 101
for step in xrange(learningNumber):
sess.run(train)
if ( (step % 10) == 0):
print(step,"w = ",sess.run(w), "b = ", sess.run(b), "Loss = ", sess.run(loss))
yOut = sess.run(y)
plt.plot(xaxis, yOut,'ro')
plt.xlabel('Time(s)')
plt.ylabel('Clench Force (kg)')
plt.legend()
plt.show()
buf = io.BytesIO()
plt.savefig(buf, format='png')
buf.seek(0)
return buf
filename_queue = tf.train.string_input_producer(["decreasingForce.txt"])
reader = tf.TextLineReader()
key, value = reader.read(filename_queue)
record_defaults = [tf.constant([1], dtype = tf.float32),
tf.constant([1], dtype = tf.float32),
tf.constant([1], dtype = tf.float32),
tf.constant([1], dtype = tf.float32)]
firstCol, secondCol, thirdCol, fourthCol = tf.decode_csv(value, record_defaults=record_defaults)
features = tf.stack([firstCol, secondCol, thirdCol, fourthCol])
EMGArray=[]
forceArray=[]
sess = tf.InteractiveSession()
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
coord.request_stop()
coord.join(threads)
sampleRange = 1000#24206
for i in range(sampleRange):
rows, firstCol1, secondCol1, thirdCol1, fourthCol1 = sess.run([features, firstCol, secondCol, thirdCol, fourthCol])
EMGArray = np.append(EMGArray, secondCol1)
forceArray = np.append(forceArray, thirdCol1)
timeArray=[]
#timeArray.np.asarray(timeArray, dtype = np.float32)
timeRate = 0.002024 # time(s) per sample
# Place zeros in the time array with the same number of sample range
for i in range(sampleRange):
timeArray.append(0)
# Time at index 0 is placed to be 0.002024 like this to avoid multiplying by 0 in the for loop when the for loop range is between 0 and the sample range
timeArray[0] = timeRate
for i in range(1, sampleRange):
timeArray[i] = timeRate * i
plot_buf = gen_plot(timeArray, forceArray)
plot=[]
plot.append(plot_buf)
image = tf.image.decode_png(plot_buf.getvalue(), channels=4)
image = tf.expand_dims(image, 0)
# Add image summary
summary_op = tf.summary.image("Control Lab 1", image, max_outputs=3)
# Run
summary = tf.summary.merge_all()
summary = sess.run(summary_op)
# Write summary
writer = tf.summary.FileWriter('/notebooks/logs', sess.graph)
writer.add_summary(summary)
##########################################################################################################
# export TF_CPP_MIN_LOG_LEVEL=2 (Ignore warnings)
# 25 Sep 2017
# Daniel Kho
# Seungmin Lee
# GDO number
# initial variable: w&b
# Learning rate
# Sample size
# ====> shape of regression
from __future__ import print_function
from decimal import *
from tensorflow.contrib.learn.python import SKCompat
import time
import io
import numpy as np
import random
import matplotlib
matplotlib.use('Agg') #Generate Image without window appear
import matplotlib.pyplot as plt
import tensorflow as tf
start = time.time() # Record performance time
filename_queue = tf.train.string_input_producer(["BuzzData.csv"])
reader = tf.TextLineReader()
key, value = reader.read(filename_queue)
# Default values, in case of empty columns. Also specifies the type of the
# decoded result.
record_defaults = [tf.constant([1], dtype = tf.float32),
tf.constant([1], dtype = tf.float32),
tf.constant([1], dtype = tf.float32),
]
# Convert CSV records to tensors. Each column maps to one tensor.
thickness, R, conductivity = tf.decode_csv(value, record_defaults=record_defaults)
#features = tf.stack([L, R, k])
def gen_plot(xaxis, yaxis):
"""Create a pyplot plot and save to buffer."""
plt.figure()
plt.title('Number of Processes vs Time takes to finish Batches')
plt.xlabel('Number of Processes')
plt.ylabel('Time (days)')
plt.legend()
plt.show()
plt.plot(xaxis, yaxis)
# Gradient Decent Optimization method
w = tf.Variable(tf.random_uniform([1],0, 50), name = 'weight')
b = tf.Variable(tf.random_normal([1],-10,10), name='bias1')
xaxis = np.asarray(xaxis,dtype=np.float32)
y = w*(tf.reciprocal(xaxis)) + b
loss = tf.reduce_mean(tf.square(y - yaxis), name='cost')
optimizer = tf.train.GradientDescentOptimizer(0.00015)
train = optimizer.minimize(loss)
# Initialisation
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
learningNumber = 101
for step in xrange(learningNumber):
sess.run(train)
if ( (step % 10) == 0):
print(step,"w = ",sess.run(w), "b = ", sess.run(b), "Loss = ", sess.run(loss))
yOut = sess.run(y)
plt.plot(xaxis, yOut)
plt.axis([0,max(xaxis)+0.01,0,max(yaxis)+0.01])
plt.show()
buf = io.BytesIO()
plt.savefig(buf, format='png')
buf.seek(0)
return buf
sess = tf.InteractiveSession()
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
thicknessArray=[]
conductivityArray=[]
locationArray=[]
loop=19
for i in range(loop):
if ( (i % 1) == 0):
print(i,'read data over',loop)
L, k = sess.run([thickness, conductivity])
thicknessArray = np.append(thicknessArray, L)
conductivityArray = np.append(conductivityArray, k)
# coord
coord.request_stop()
coord.join(threads)
plot_buf = gen_plot(thicknessArray, conductivityArray)
plot=[]
plot.append(plot_buf)
# Convert PNG buffer to TF image
image = tf.image.decode_png(plot_buf.getvalue(), channels=4)
# Add the batch dimension
image = tf.expand_dims(image, 0)
# Add image summary
summary_op = tf.summary.image("Overall Plot", image, max_outputs=3)
# Run
summary = tf.summary.merge_all()
summary = sess.run(summary_op)
# Write summary
writer = tf.summary.FileWriter('/notebooks/logs', sess.graph)
writer.add_summary(summary)
end = time.time()
print("Elapsed time ",end-start, "seconds")
| skho513/Big-Data-Analytics-for-IoT-enabled-Manufacturing | DataAnalysis.py | DataAnalysis.py | py | 20,816 | python | en | code | 0 | github-code | 13 |
6515271495 | import turtle
import time
b=time.time()
turtle.speed(30)
turtle.bgcolor("black")
turtle.hideturtle()
for i in range(1):
for c in ("red","green","pink","orange"):
turtle.color(c)
turtle.pensize(2)
turtle.lt(12)
for i in range(10):
turtle.fd(5)
turtle.lt(150)
turtle.rt(150)
turtle.Screen().bye()
#turtle.done()
e=time.time()
print(f"{e-b}")
| Arryn21/Codes | New1.py | New1.py | py | 428 | python | en | code | 0 | github-code | 13 |
31201673288 | import copy
from st2tests import DbTestCase
from st2common.models.db.datastore import KeyValuePairDB
from st2common.persistence.datastore import KeyValuePair
from st2reactor.rules import datatransform
PAYLOAD = {'k1': 'v1', 'k2': 'v2'}
PAYLOAD_WITH_KVP = copy.copy(PAYLOAD)
PAYLOAD_WITH_KVP.update({'k5': '{{system.k5}}'})
class DataTransformTest(DbTestCase):
def test_payload_transform(self):
transformer = datatransform.get_transformer(PAYLOAD)
mapping = {'ip1': '{{trigger.k1}}-static',
'ip2': '{{trigger.k2}} static'}
result = transformer(mapping)
self.assertEqual(result, {'ip1': 'v1-static', 'ip2': 'v2 static'})
def test_hypenated_payload_transform(self):
payload = {'headers': {'hypenated-header': 'dont-care'}, 'k2': 'v2'}
transformer = datatransform.get_transformer(payload)
mapping = {'ip1': '{{trigger.headers[\'hypenated-header\']}}-static',
'ip2': '{{trigger.k2}} static'}
result = transformer(mapping)
self.assertEqual(result, {'ip1': 'dont-care-static', 'ip2': 'v2 static'})
def test_system_transform(self):
k5 = KeyValuePair.add_or_update(KeyValuePairDB(name='k5', value='v5'))
k6 = KeyValuePair.add_or_update(KeyValuePairDB(name='k6', value='v6'))
k7 = KeyValuePair.add_or_update(KeyValuePairDB(name='k7', value='v7'))
try:
transformer = datatransform.get_transformer(PAYLOAD_WITH_KVP)
mapping = {'ip5': '{{trigger.k5}}-static',
'ip6': '{{system.k6}}-static',
'ip7': '{{system.k7}}-static'}
result = transformer(mapping)
expected = {'ip5': 'v5-static',
'ip6': 'v6-static',
'ip7': 'v7-static'}
self.assertEqual(result, expected)
finally:
KeyValuePair.delete(k5)
KeyValuePair.delete(k6)
KeyValuePair.delete(k7)
| gtmanfred/st2 | st2reactor/tests/unit/test_data_transform.py | test_data_transform.py | py | 1,988 | python | en | code | null | github-code | 13 |
6312830354 | import cx_Oracle
import requests, json
import datetime
import time
import yaml
from config.config import *
DATE_FORMAT = '%Y-%m-%d'
API_KEY = api_key
MIN_DATE = '2015-01-01'
SORT_BY = 'primary_release_date.asc'
LANGUAGE = 'en'
DISCOVER_URL = 'https://api.themoviedb.org/3/discover/movie'
ID_URL = 'https://api.themoviedb.org/3/movie/{}'
DISCOVER_PAYLOAD = {'api_key': API_KEY,
'primary_release_date.gte': MIN_DATE,
'sort_by': SORT_BY,
'original_language': LANGUAGE,
'vote_count.gte': 1}
ID_PAYLOAD = {'api_key': API_KEY}
SLEEP_TIME_SECONDS = 11
ORACLE_CONN_STRING = sql_login
SELECT_STATEMENT = 'select ID from Movie where ID = {}'
INSERT_STATEMENT = "insert into Movie values ('{}', to_date('{}', \'YYYY-MM-DD\'), '{}', '{}', '{}', '{}', '{}', '{}', '{}')"
MAX_DATE_STATEMENT = "select max(release_date) from Movies"
con = cx_Oracle.connect(ORACLE_CONN_STRING)
#con.close()
cursor = con.cursor()
def request_movie_db(url, payload):
ret = requests.get(url, params=payload).text
ret = json.loads(ret)
try:
if ret['status_code']:
time.sleep(SLEEP_TIME_SECONDS)
ret = requests.get(url, params=payload).text
ret = json.loads(ret)
except KeyError:
print('Have not reached limit')
return ret
def get_max_date():
cursor.execute(MAX_DATE_STATEMENT)
res = cursor.fetchall()
if res:
date = res[0][0]
DISCOVER_PAYLOAD['primary_release_date.gte'] = date.strftime(DATE_FORMAT)
return res
#get_max_date()
r = request_movie_db(DISCOVER_URL, DISCOVER_PAYLOAD)
print(r)
bad_char_count = 0
for i in range(1, r['total_pages']): #For each page
print('######', i)
DISCOVER_PAYLOAD['page'] = i
r = request_movie_db(DISCOVER_URL, DISCOVER_PAYLOAD)
for j in range(0, len(r['results'])): #for each movie in a page
curr_id = r['results'][j]['id']
print(curr_id)
#todo:
#1. check for release date
curr_movie = request_movie_db(ID_URL.format(str(curr_id)), ID_PAYLOAD)
curr_movie['title'] = curr_movie['title'].replace("'", "`")
try:
curr_movie['overview'] = curr_movie['overview'].replace("'", "`")
except AttributeError:
pass
cursor.execute(SELECT_STATEMENT.format(curr_id))
res = cursor.fetchall()
#if len(res) == 0:
statement = INSERT_STATEMENT.format(curr_movie['title'], curr_movie['release_date'], str(curr_movie['budget']), str(curr_movie['revenue']), str(curr_movie['popularity']), curr_movie['overview'], curr_movie['poster_path'], str(curr_movie['runtime']), str(curr_id))
try:
cursor.execute(statement)
print('inserting: ', statement)
con.commit()
except UnicodeEncodeError:
bad_char_count += 1
print(bad_char_count)
except cx_Oracle.IntegrityError:
print(statement)
| chasefarmer2808/ReposiMovie-API | jobs/populate.py | populate.py | py | 3,001 | python | en | code | 0 | github-code | 13 |
7631374202 | # S[1] 메모리 114328 KB 시간 112 ms
import sys
input = sys.stdin.readline
N = int(input())
arr = [list(input().strip()) for _ in range(N)]
visited = [[False for _ in range(N)] for _ in range(N)]
complex = []
def bfs(i, j):
queue = [(i, j)]
visited[i][j] = True
cnt = 0
while queue:
x, y = queue.pop(0)
cnt += 1
for dx, dy in [(1, 0), (-1, 0), (0, 1), (0, -1)]:
if 0 <= x+dx < N and 0 <= y+dy < N and arr[x+dx][y+dy] == '1' and not visited[x+dx][y+dy]:
queue.append((x+dx, y+dy))
visited[x+dx][y+dy] = True
complex.append(cnt)
for i in range(N):
for j in range(N):
if arr[i][j] == '1' and not visited[i][j]:
bfs(i, j)
print(len(complex))
for c in sorted(complex):
print(c)
| nuuuri/algorithm | 그래프/BOJ_2667.py | BOJ_2667.py | py | 802 | python | en | code | 0 | github-code | 13 |
41431624822 | from django.shortcuts import render, redirect, get_object_or_404, reverse
from gestion_de_mascotas.models import Perro_perdido, Perro_en_adopcion, Perro, Perro_encontrado, Entrada, Libreta_sanitaria, Registro_vacuna, Servicio_veterinario, Vacuna
from .forms import Perro_perdido_form, Perro_en_adopcion_form, Send_email_form, Send_email_logged_form, Perro_form, Perro_encontrado_form, Perro_encontrado_update_form, Perro_perdido_update_form, Perro_form_update, Entrada_form, Entrada_form_vacuna
from django.core.mail import EmailMessage
from django.contrib.auth.models import User
from django.contrib.auth import get_user_model
User = get_user_model()
from datetime import date
# Create your views here.
def perros_perdidos (request) :
perros_perdidos=Perro_perdido.objects.all()
return render(request,"gestion_de_mascotas/perros_perdidos.html",{"perros_perdidos":perros_perdidos})
def perros_encontrados (request) :
perros_encontrados=Perro_encontrado.objects.all()
return render(request,"gestion_de_mascotas/perros_encontrados.html",{"perros_encontrados":perros_encontrados})
def anunciar_perro_perdido(request):
perros = list()
if (request.user.is_authenticated) :
perros = Perro.objects.filter(owner = request.user)
form = Perro_perdido_form()
ok = True #Esta variable se le envía en el data al docuemnto html para dependiendo de su valor mostrar o uno un botón
data = {
"form":form,
"perros":perros,
"ok":ok,
}
if request.method == 'POST':
form = Perro_perdido_form(request.POST, request.FILES)
perro_perdido = Perro_perdido()
if form.is_valid() :
perro_perdido = form.save(commit=False) # Guardar el formulario sin realizar la inserción en la base de datos
perro_perdido.created_by = request.user
perro_perdido.save()
data["mensaje"] = "Se publicó el anuncio correctamente."
return redirect(to="perros_perdidos")
return render(request,"gestion_de_mascotas/anunciar_perro_perdido.html",data)
def id_valida(request, id):
"""Esta función garantiza que la id corresponde un perro perteneciente
al usuario actual, para que no se pueda setear los datos de un perro que
no le corresponde através de la url
"""
perros = Perro.objects.filter(owner = request.user)
for perro in perros :
if int(perro.id) == int(id) :
return True
return False
def cargar_datos_perro(request, id):
perros = Perro.objects.filter(owner = request.user)
if id_valida(request, id ):
ok = False
perro = get_object_or_404(Perro, id=int(id))
datos_iniciales = {
'nombre': perro.nombre,
'size':perro.size,
'sexo':perro.sexo,
'raza':perro.raza,
}
form = Perro_perdido_form(initial=datos_iniciales)
data = {
"form":form,
"perros":perros,
"ok":ok,
}
else:
form = Perro_perdido_form()
data = {
"form":form,
"perros":perros,
}
if request.method == 'POST':
form = Perro_perdido_form(request.POST, request.FILES)
perro_perdido = Perro_perdido()
if form.is_valid() :
perro_perdido = form.save(commit=False) # Guardar el formulario sin realizar la inserción en la base de datos
perro_perdido.created_by = request.user
perro_perdido.save()
data["mensaje"] = "Se publicó el anuncio correctamente."
return redirect(to="perros_perdidos")
return render(request,"gestion_de_mascotas/anunciar_perro_perdido.html",data)
def anunciar_perro_encontrado(request):
perros = list()
if request.user.is_authenticated :
perros = Perro.objects.filter(owner = request.user)
form = Perro_encontrado_form()
data = {
"form":form,
"perros":perros,
}
if request.method == 'POST':
form = Perro_encontrado_form(request.POST, request.FILES)
perro_encontrado = Perro_encontrado()
if form.is_valid() :
perro_encontrado = form.save(commit=False)
perro_encontrado.created_by = request.user
perro_encontrado.save()
data["mensaje"] = "Se publicó el anuncio correctamente."
return redirect(to="perros_encontrados")
return render(request,"gestion_de_mascotas/anunciar_perro_encontrado.html",data)
def perros_en_adopcion (request) :
perros_en_adopcion=Perro_en_adopcion.objects.all()
# perros_en_adopcion = Perro_en_adopcion.objects.filter(adoptado = False)
return render(request,"gestion_de_mascotas/perros_en_adopcion.html",{"perros_en_adopcion":perros_en_adopcion})
def anunciar_perro_adopcion(request):
perros = list()
if request.user.is_authenticated :
perros = Perro.objects.filter(owner = request.user)
perro_en_adopcion = Perro_en_adopcion()
form = Perro_en_adopcion_form()
data = {
"form":form,
"perros":perros,
}
if request.method == 'POST':
form = Perro_en_adopcion_form(request.POST, request.FILES)
if form.is_valid() :
perro_en_adopcion = form.save(commit=False)
perro_en_adopcion.created_by = request.user
perro_en_adopcion.save()
data["mensaje"] = "Se publicó el anuncio correctamente."
return redirect(to="perros_en_adopcion")
return render(request,"gestion_de_mascotas/anunciar_perro_adopcion.html",data)
def mis_perros (request) :
mis_perros=Perro.objects.filter(owner = request.user)
return render(request,"gestion_de_mascotas/mis_perros.html",{"mis_perros":mis_perros})
def mis_perros_en_adopcion (request) :
mis_perros_en_adopcion=Perro_en_adopcion.objects.filter(created_by = request.user)
return render(request,"gestion_de_mascotas/mis_perros_en_adopcion.html",{"mis_perros_en_adopcion":mis_perros_en_adopcion})
def eliminar_anuncio_adopcion(request, id):
if request.user.is_authenticated:
anuncio = Perro_en_adopcion.objects.get(id=id)
anuncio.delete()
# return redirect(to = "mis_perros_en_adopcion")
return redirect(to = request.META.get('HTTP_REFERER'))
def contacto_adopcion(request, id) :
mensajes = "No puedes enviarte un mensaje a tí mismo."
publicacion = get_object_or_404(Perro_en_adopcion, id=id)
autor = get_object_or_404(User, dni=publicacion.created_by.dni)
if request.user.is_authenticated and request.user.email != autor.email :
form = Send_email_logged_form()
if request.method == "POST" :
form = Send_email_logged_form(data=request.POST)
if form.is_valid() :
mail = EmailMessage("¡Oh my dog!",
"Se han contactado contigo por tu publicación de perro en adopción:\n"+
request.POST.get('mensaje')+f"\nDe: {request.user.email}",
request.user.email,
[autor.email]
)
mail.send()
return redirect("home")
return render(request, "gestion_de_mascotas/contacto_adopcion.html",{'form':form})
else :
form = Send_email_form()
if not request.user.is_authenticated :
if request.method == "POST" :
form = Send_email_form(data=request.POST)
if form.is_valid() :
mail = EmailMessage("¡Oh my dog!",
"Se han contactado contigo por tu publicación de perro en adopción:\n"+
request.POST.get('mensaje')+f"\nDe: {request.POST.get('email')}",
request.POST.get('email'),
[autor.email]
)
mail.send()
return redirect("home")
return render(request, "gestion_de_mascotas/contacto_adopcion.html",{'form':form})
else:
perros_en_adopcion=Perro_en_adopcion.objects.all()
return render(request,"gestion_de_mascotas/perros_en_adopcion.html",{"perros_en_adopcion":perros_en_adopcion, "mensajes":mensajes})
def editar_anuncio(request, id, type) :
if request.user.is_authenticated:
publicacion = get_object_or_404(Perro_en_adopcion, id=id) #obtiene la publicación que se quiere editar
# current_user = request.user
form = Perro_en_adopcion_form(request.POST or None, instance = publicacion)
data = {
'form': form,
}
if form.is_valid():
publicacion.titulo = request.POST['titulo']
publicacion.edad = request.POST['edad']
publicacion.tamanio = request.POST['tamanio']
publicacion.detalles_de_salud = request.POST['detalles_de_salud']
publicacion.zona = request.POST['zona']
publicacion.historia = request.POST['historia']
publicacion.save()
if type == 'all' :
return redirect(to = "perros_en_adopcion")
else :
return redirect(to = "mis_perros_en_adopcion")
else:
return render(request,"gestion_de_mascotas/editar_anuncio.html",data)
def validar_perro(request, perros):
for p in perros :
if p.nombre == request.POST['nombre']:
return False
return True
def cargar_perro(request, id):
owner = get_object_or_404(User, id=id)
perros = Perro.objects.filter(owner = owner)
form = Perro_form()
data = {
"form":form
}
if request.method == 'POST':
form = Perro_form(request.POST)
if form.is_valid() and validar_perro(request, perros):
perro = form.save(commit=False)
perro.owner = owner
perro.save()
libreta_sanitaria = Libreta_sanitaria()
libreta_sanitaria.perro = perro
libreta_sanitaria.save()
data["mensaje"] = "Se agregó al perro correctamente."
params = {'id': id}
return redirect(reverse(f'ver_perros_cliente', kwargs=params))
else :
data['mensaje_error'] = 'Ya existe ese perro.'
return render(request,"gestion_de_mascotas/cargar_perro.html",data)
def editar_perro(request, id) :
if request.user.is_authenticated and request.user.is_superuser :
perro = get_object_or_404(Perro, id=id)
form = Perro_form_update(request.POST or None, instance = perro)
data = {
'form': form,
}
if form.is_valid():
perro.nombre = request.POST['nombre']
perro.size = request.POST['size']
perro.save()
params = {'id': perro.owner.id}
return redirect(reverse(f'ver_perros_cliente', kwargs=params))
else:
return render(request,"gestion_de_mascotas/editar_perro.html",data)
def eliminar_perro(request,id2, id):
if request.user.is_authenticated and request.user.is_superuser :
perro = Perro.objects.get(id=id)
perro.delete()
return redirect(to = request.META.get('HTTP_REFERER'))
def perros (request) :
perros=Perro.objects.all()
return render(request,"gestion_de_mascotas/perros.html",{"perros":perros})
def eliminar_anuncio_encontrado(request, id) :
if request.user.is_authenticated :
perro = Perro_encontrado.objects.get(id=id)
perro.delete()
return redirect(to = request.META.get('HTTP_REFERER'))
def eliminar_anuncio_perdido(request, id) :
if request.user.is_authenticated :
perro = Perro_perdido.objects.get(id=id)
perro.delete()
return redirect(to = request.META.get('HTTP_REFERER'))
def editar_anuncio_encontrado_2(request, id) :
if request.user.is_authenticated:
publicacion = get_object_or_404(Perro_encontrado, id=id)
form = Perro_encontrado_update_form(request.POST or None, instance = publicacion)
data = {
'form': form,
}
if form.is_valid():
publicacion.size = request.POST['size']
publicacion.zona = request.POST['zona']
publicacion.descripcion = request.POST['descripcion']
publicacion.save()
return redirect(to = "perros_encontrados")
else:
return render(request,"gestion_de_mascotas/editar_anuncio_encontrado.html",data)
def editar_anuncio_perdido_2(request, id) :
if request.user.is_authenticated:
publicacion = get_object_or_404(Perro_perdido, id=id)
form = Perro_perdido_update_form(request.POST or None, instance = publicacion)
data = {
'form': form,
}
if form.is_valid():
if 'nueva_imagen' in request.FILES:
publicacion.imagen = request.FILES['nueva_imagen']
publicacion.size = request.POST['size']
publicacion.zona = request.POST['zona']
publicacion.descripcion = request.POST['descripcion']
publicacion.nombre = request.POST['nombre']
publicacion.edad = request.POST['edad']
publicacion.save()
return redirect(to = "perros_perdidos")
else:
return render(request,"gestion_de_mascotas/editar_anuncio_perdido.html",data)
def contacto_encontrado(request, id) :
mensajes = "No puedes enviarte un mensaje a tí mismo."
publicacion = get_object_or_404(Perro_encontrado, id=id)
autor = get_object_or_404(User, dni=publicacion.created_by.dni)
if request.user.is_authenticated and request.user.email != autor.email :
form = Send_email_logged_form()
if request.method == "POST" :
form = Send_email_logged_form(data=request.POST)
if form.is_valid() :
mail = EmailMessage("¡Oh my dog!",
"Se han contactado contigo por tu publicación de un perro encontrado:\n"+
request.POST.get('mensaje')+f"\nDe: {request.user.email}",
request.user.email,
[autor.email]
)
mail.send()
return redirect("home")
return render(request, "gestion_de_mascotas/contacto_adopcion.html",{'form':form})
else :
form = Send_email_form()
if not request.user.is_authenticated :
if request.method == "POST" :
form = Send_email_form(data=request.POST)
if form.is_valid() :
mail = EmailMessage("¡Oh my dog!",
"Se han contactado contigo por tu publicación de un perro encontrado:\n"+
request.POST.get('mensaje')+f"\nDe: {request.POST.get('email')}",
request.POST.get('email'),
[autor.email]
)
mail.send()
return redirect("home")
return render(request, "gestion_de_mascotas/contacto_adopcion.html",{'form':form})
else:
perros_encontrados=Perro_encontrado.objects.all()
return render(request,"gestion_de_mascotas/perros_adopcion.html",{"perros_encontrados":perros_encontrados, "mensajes":mensajes})
def contacto_perdido(request, id) :
mensajes = "No puedes enviarte un mensaje a tí mismo."
publicacion = get_object_or_404(Perro_perdido, id=id)
autor = get_object_or_404(User, dni=publicacion.created_by.dni)
if request.user.is_authenticated and request.user.email != autor.email :
form = Send_email_logged_form()
if request.method == "POST" :
form = Send_email_logged_form(data=request.POST)
if form.is_valid() :
mail = EmailMessage("¡Oh my dog!",
"Se han contactado contigo por tu publicación de un perro perdido:\n"+
request.POST.get('mensaje')+f"\nDe: {request.user.email}",
request.user.email,
[autor.email]
)
mail.send()
return redirect("home")
return render(request, "gestion_de_mascotas/contacto_adopcion.html",{'form':form})
else :
form = Send_email_form()
if not request.user.is_authenticated :
if request.method == "POST" :
form = Send_email_form(data=request.POST)
if form.is_valid() :
mail = EmailMessage("¡Oh my dog!",
"Se han contactado contigo por tu publicación de un perro perdido:\n"+
request.POST.get('mensaje')+f"\nDe: {request.POST.get('email')}",
request.POST.get('email'),
[autor.email]
)
mail.send()
return redirect("home")
return render(request, "gestion_de_mascotas/contacto_adopcion.html",{'form':form})
else:
perros_perdidos=Perro_perdido.objects.all()
return render(request,"gestion_de_mascotas/perros_perdidos.html",{"perros_perdidos":perros_perdidos, "mensajes":mensajes})
def editar_anuncio_perdido(request, id, type):
publicacion = get_object_or_404(Perro_perdido, pk=id)
if request.method == 'POST':
form = Perro_perdido_update_form(request.POST, request.FILES, instance=publicacion)
if form.is_valid():
publicacion = form.save(commit=False)
publicacion.save()
if type == 'all' :
return redirect(to = "perros_perdidos")
else :
return redirect(to = "mis_perros_perdidos")
else:
form = Perro_perdido_update_form(instance=publicacion)
return render(request, 'gestion_de_mascotas/editar_anuncio_perdido.html', {'form': form})
def editar_anuncio_encontrado(request, id, type):
publicacion = get_object_or_404(Perro_encontrado, pk=id)
if request.method == 'POST':
form = Perro_encontrado_update_form(request.POST, request.FILES, instance=publicacion)
if form.is_valid():
form.save()
if type == 'all' :
return redirect(to = "perros_encontrados")
else :
return redirect(to = "mis_perros_encontrados")
else:
form = Perro_encontrado_update_form(instance=publicacion)
return render(request, 'gestion_de_mascotas/editar_anuncio_perdido.html', {'form': form})
def adopcion_realizada(request, id):
if request.user.is_authenticated:
perro = Perro_en_adopcion.objects.get(id=id)
perro.adoptado = True
perro.save()
return redirect(to = request.META.get('HTTP_REFERER')) #request.META.get('HTTP_REFERER') devuelve la anterior vista ejecutada
def perro_encontrado(request, id):
if request.user.is_authenticated:
perro = Perro_perdido.objects.get(id=id)
perro.encontrado = True
perro.save()
return redirect(to = request.META.get('HTTP_REFERER')) #request.META.get('HTTP_REFERER') devuelve la anterior vista ejecutada
def owner_encontrado(request, id):
if request.user.is_authenticated:
perro = Perro_encontrado.objects.get(id=id)
perro.recuperado = True
perro.save()
return redirect(to = request.META.get('HTTP_REFERER')) #request.META.get('HTTP_REFERER') devuelve la anterior vista ejecutada
def mis_perros_perdidos(request):
mis_perros_perdidos = Perro_perdido.objects.filter(created_by = request.user)
mis_perros_perdidos = mis_perros_perdidos.reverse()
return render(request,"gestion_de_mascotas/mis_perros_perdidos.html",{'mis_perros_perdidos':mis_perros_perdidos})
def mis_perros_encontrados(request):
mis_perros_encontrados = Perro_encontrado.objects.filter(created_by = request.user)
return render(request,"gestion_de_mascotas/mis_perros_encontrados.html",{'mis_perros_encontrados':mis_perros_encontrados})
def ver_perros_cliente(request, id):
perro_owner = User.objects.get(id=id)
perros_cliete = Perro.objects.filter(owner = perro_owner)
return render(request,"gestion_de_mascotas/perros_cliente.html",{'perros':perros_cliete, "cliente":perro_owner})
def ver_historial_medico(request, id):
"""
Se recibe la id de un perro
"""
perro = Perro.objects.get(id=id)
entradas = Entrada.objects.filter(perro = perro) #Filtra las entradas del perro
return render(request,"gestion_de_mascotas/historial_medico.html",{'entradas':entradas, 'id_perro':id, 'id_user':perro.owner.id})
def crear_entrada(request, id):
perro = Perro.objects.get(id=id)
libreta = Libreta_sanitaria.objects.get(perro = perro)
form = Entrada_form(libreta.castrado)
data = {
"form":form,
"id":id,
}
if request.method == 'POST':
form = Entrada_form(libreta.castrado,request.POST, request.FILES)
if form.is_valid() :
servicio = Servicio_veterinario.objects.get(id = request.POST.get('motivo'))
if servicio.servicio == "Castración" :
libreta.castrado = True
libreta.save()
elif servicio.servicio == "Desparasitación" :
libreta.ultima_desparasitacion = date.today()
libreta.save()
elif servicio.servicio == "Vacunación" :
registro_vacuna = Registro_vacuna()
registro_vacuna.perro = Perro.objects.get(id = id)
from gestion_de_mascotas.models import Vacuna
registro_vacuna.vacuna = Vacuna.objects.get(id = int(request.POST.get('vacuna')))
registro_vacuna.numero_dosis = int(request.POST.get('numero_dosis'))
registro_vacuna.save()
entrada = form.save(commit=False)
entrada.perro = perro
entrada.save()
params = {'id': id}
return redirect(reverse(f'ver_historial_medico', kwargs=params))
else:
if int(request.POST.get('peso')) < 0 :
data['mensaje_error'] = "El peso debe ser un valor positivo."
elif int(request.POST.get('numero_dosis')) < 0 :
data['mensaje_error'] = "El número de dosis no puede ser negativo."
else:
data['mensaje_error'] = "Los campos dosis y vacuna son obligatorios para vacunación."
return render(request,"gestion_de_mascotas/crear_entrada.html",data)
def eliminar_entrada(request,id2, id) :
if request.user.is_superuser :
entrada = Entrada.objects.get(id=id)
if entrada.motivo.servicio == "Vacunación" :
registro_vacuna = Registro_vacuna.objects.get(entrada = entrada)
registro_vacuna.delete()
entrada.delete()
elif entrada.motivo.servicio == "Castración" :
libreta_sanitaria = Libreta_sanitaria.objects.get(perro = Perro.objects.get(id = id2))
libreta_sanitaria.castrado = False
libreta_sanitaria.save()
entrada.delete()
elif entrada.motivo.servicio == "Desparasitación" :
libreta_sanitaria = Libreta_sanitaria.objects.get(perro = Perro.objects.get(id = id2))
libreta_sanitaria.ultima_desparasitacion = libreta_sanitaria.anteultima_desparasitacion
libreta_sanitaria.save()
entrada.delete()
return redirect(to = request.META.get('HTTP_REFERER'))
def editar_entrada(request, id) :
from .forms import Entrada_update_form, Entrada_update_form_vacunacion
entrada = Entrada.objects.get(id=id)
if entrada.motivo.servicio == "Vacunación":
form = Entrada_update_form_vacunacion(request.POST or None, instance = entrada)
else:
form = Entrada_update_form(request.POST or None, instance = entrada)
if form.is_valid():
# entrada_2 = form.save(commit=False)
# from gestion_de_mascotas.models import Vacuna
# entrada_2.vacuna = Vacuna.objects.get(request.POST.get('vacuna'))
form.save()
params = {'id': entrada.perro.id}
return redirect(reverse(f'ver_historial_medico', kwargs=params))
else:
print(form.errors)
data = {
'form': form,
}
return render(request,"gestion_de_mascotas/editar_entrada.html",data)
def ver_libreta_sanitaria(request, id) :
perro = Perro.objects.get(id = id)
libreta_sanitaria = Libreta_sanitaria.objects.get(perro = perro)
vacunas_registradas = Registro_vacuna.objects.filter(perro = perro)
data = {
"perro":perro,
"libreta_sanitaria":libreta_sanitaria,
"vacunas":vacunas_registradas,
}
return render(request,"gestion_de_mascotas/libreta_sanitaria.html",data)
def elegir_motivo(request, id) :
perro = Perro.objects.get(id = id)
libreta_sanitaria = Libreta_sanitaria.objects.get(perro = perro)
if libreta_sanitaria.castrado :
motivos = Servicio_veterinario.objects.exclude(servicio = "Castración")
else :
motivos = Servicio_veterinario.objects.all()
data = {
"motivos":motivos,
}
if request.method == 'POST' :
if request.POST.get('motivo') != "" :
#Si el formulario es válido se evalúa que hacer en función del motivo elegido
print(request.POST.get('motivo'))
motivo = Servicio_veterinario.objects.get(id = request.POST.get('motivo'))
if motivo.servicio != "Vacunación" :
params = {
'id': id,
'motivo':request.POST.get('motivo')
}
return redirect(reverse(f'agregar_entrada', kwargs=params))
else:
params = {
'id': id,
'motivo':request.POST.get('motivo')
}
return redirect(reverse(f'agregar_entrada_vacuna', kwargs=params))
else:
data['mensaje_error'] = "Debes elegir un motivo."
return render(request, "gestion_de_mascotas/elegir_motivo.html", data)
def agregar_entrada(request, id, motivo): #Recibe la id de un perro y debería recibir un motivo
perro = Perro.objects.get(id=id)
libreta_sanitaria = Libreta_sanitaria.objects.get(perro = perro)
form = Entrada_form()
data = {
"form":form,
"id":id,
}
if request.method == 'POST':
form = Entrada_form(request.POST, request.FILES)
if form.is_valid() :
servicio = Servicio_veterinario.objects.get(id = int(motivo))
entrada = form.save(commit=False)
entrada.motivo = servicio
if servicio.servicio == "Castración" :
libreta_sanitaria.castrado = True
libreta_sanitaria.save()
elif servicio.servicio == "Desparasitación" :
libreta_sanitaria.anteultima_desparasitacion = libreta_sanitaria.ultima_desparasitacion
libreta_sanitaria.ultima_desparasitacion = date.today()
libreta_sanitaria.save()
entrada.perro = perro
entrada.save()
data['mensaje'] = "Salió bien."
params = {'id': id}
return redirect(reverse(f'ver_historial_medico', kwargs=params))
else:
return render(request,"gestion_de_mascotas/crear_entrada.html",{"form":form, "id":id})
return render(request,"gestion_de_mascotas/crear_entrada.html",data)
def agregar_entrada_vacuna(request, id, motivo) :
perro = Perro.objects.get(id=id)
form = Entrada_form_vacuna()
data = {
"form":form,
"id":id,
}
if request.method == 'POST':
form = Entrada_form_vacuna(request.POST, request.FILES)
if form.is_valid() :
servicio = Servicio_veterinario.objects.get(id = int(motivo))
entrada = form.save(commit=False)
entrada.motivo = servicio
entrada.perro = perro
entrada.save()
registro_vacuna = Registro_vacuna()
registro_vacuna.perro = perro
registro_vacuna.vacuna = Vacuna.objects.get(id = int(request.POST.get('vacuna')))
registro_vacuna.numero_dosis = int(request.POST.get('numero_dosis'))
registro_vacuna.entrada = entrada
registro_vacuna.save()
params = {'id': id}
return redirect(reverse(f'ver_historial_medico', kwargs=params))
else:
# if form.errors:
# data['mensaje_error'] = []
# error_peso = form.errors.get("peso", None)
# if error_peso:
# error_peso = error_peso.as_data()[0].message
# data["mensaje_error"].append(error_peso)
# error_numero_dosis = form.errors.get("numero_dosis", None)
# if error_numero_dosis:
# error_numero_dosis = error_numero_dosis.as_data()[0].message
# data["mensaje_error"].append(error_numero_dosis)
return render(request,"gestion_de_mascotas/crear_entrada.html",{"form":form, "id":id})
return render(request,"gestion_de_mascotas/crear_entrada.html",data)
#Entrada_form_vacuna | FacuLede/oh_my_dog | oh_my_dog/gestion_de_mascotas/views.py | views.py | py | 30,336 | python | es | code | 0 | github-code | 13 |
436864211 | import csv
from core_data_modules.cleaners import Codes
from core_data_modules.data_models.code_scheme import CodeTypes
class AnalysisConfiguration(object):
def __init__(self, dataset_name, raw_field, coded_field, code_scheme):
self.dataset_name = dataset_name
self.raw_field = raw_field
self.coded_field = coded_field
self.code_scheme = code_scheme
def get_codes_from_td(td, analysis_configuration):
"""
Returns all the codes from a traced data object under the coded_field and code_scheme in the given
analysis_configuration.
:param td: TracedData to get the codes from.
:type td: core_data_modules.traced_data.TracedData
:param analysis_configuration: Analysis configuration to use to get the codes.
The coded_field sets field to look-up in each TracedData.
The code_scheme is used to convert the labels to their corresponding codes.
:type analysis_configuration: AnalysisConfiguration
:return: Codes for the labels in this TracedData's `analysis_configuration.coded_field`.
:rtype: list of core_data_modules.data_models.Code
"""
coded_field = analysis_configuration.coded_field
if coded_field not in td:
return []
# TracedData can contain a single label or a list of labels. Read into a list of labels in all cases.
if type(td[coded_field]) == list:
labels = td[coded_field]
else:
labels = [td[coded_field]]
# Convert the labels to their corresponding code objects
codes = [analysis_configuration.code_scheme.get_code_with_code_id(label["CodeID"]) for label in labels]
return codes
def normal_codes(codes):
"""
Filters a list of codes for those with code type CodeTypes.NORMAL.
:param codes: Codes to filter.
:type codes: list of core_data_modules.data_models.Code
:return: All codes in `codes` which have code type CodeTypes.NORMAL.
:rtype: list of core_data_modules.data_models.Code
"""
return [code for code in codes if code.code_type == CodeTypes.NORMAL]
def responded(td, consent_withdrawn_key, analysis_configuration):
"""
Returns whether the given TracedData object contains a response under the given analysis_configuration.
The TracedData is considered to contain a response if its analysis_configuration.coded_field has been labelled
with anything other than codes with control code Codes.TRUE_MISSING or Codes.SKIPPED.
:param td: TracedData to check.
:type td: core_data_modules.traced_data.TracedData
:param analysis_configuration: Analysis configuration to use to check if the TracedData contains a response.
This determines the coded_field to check and the code_scheme to use to interpret it.
:type analysis_configuration: AnalysisConfiguration
:return: Whether `td` contains a response under the `analysis_configuration`.
:rtype: bool
"""
if withdrew_consent(td, consent_withdrawn_key):
return False
codes = get_codes_from_td(td, analysis_configuration)
assert len(codes) >= 1
if len(codes) > 1:
# If there is an NA or NS code, there shouldn't be any other codes present.
for code in codes:
assert code.control_code != Codes.TRUE_MISSING and code.control_code != Codes.SKIPPED
return True
return codes[0].control_code != Codes.TRUE_MISSING and codes[0].control_code != Codes.SKIPPED
def withdrew_consent(td, consent_withdrawn_key):
"""
Returns whether the given TracedData object represents someone who withdrew their consent to have their data
analysed.
:param td: TracedData to check.
:type td: TracedData
:param consent_withdrawn_key: Key in the TracedData of the consent withdrawn field.
:type consent_withdrawn_key: str
:return: Whether consent was withdrawn.
:rtype: bool
"""
return td[consent_withdrawn_key] == Codes.TRUE
def opt_in(td, consent_withdrawn_key):
"""
Returns whether the given TracedData object opted-in.
The TracedData is considered to contain an opt-in if it is not marked as consent withdrawn.
:param td: TracedData to check.
:type td: core_data_modules.traced_data.TracedData
:param consent_withdrawn_key: Key in the TracedData of the consent withdrawn field.
:type consent_withdrawn_key: str
:return: Whether `td` contains a response under the `analysis_configuration` and did not withdraw consent.
:rtype: bool
"""
return not withdrew_consent(td, consent_withdrawn_key)
def labelled(td, consent_withdrawn_key, analysis_configuration):
"""
Returns whether the given TracedData object has been labelled under the given analysis_configuration.
An object is considered labelled if all of the following hold:
- Consent was not withdrawn.
- A response was received (see `AnalysisUtils.responded` for the definition of this).
- The response has been assigned at least one label.
- None of the assigned labels have the control_code Codes.NOT_REVIEWED.
:param td: TracedData to check.
:type td: TracedData
:param consent_withdrawn_key: Key in the TracedData of the consent withdrawn field.
:type consent_withdrawn_key: str
:param analysis_configuration: Analysis configuration to use to check if the TracedData has been labelled.
This determines the coded_field to check and the code_scheme to use to interpret it.
:type analysis_configuration: AnalysisConfiguration
:return: Whether `td` contains a labelled response to `coding_plan` and did not withdraw consent.
:rtype: bool
"""
if not responded(td, consent_withdrawn_key, analysis_configuration):
return False
codes = get_codes_from_td(td, analysis_configuration)
if len(codes) == 0:
return False
for code in codes:
if code.control_code == Codes.NOT_REVIEWED:
return False
return True
def relevant(td, consent_withdrawn_key, analysis_configuration):
"""
Returns whether the given TracedData object contains a relevant response to the given coding_plan.
A response is considered relevant if it is labelled with a normal code.
For the definition of labelled, see `analysis_utils.labelled`.
:param td: TracedData to check.
:type td: TracedData
:param consent_withdrawn_key: Key in the TracedData of the consent withdrawn field.
:type consent_withdrawn_key: str
:param analysis_configuration: Analysis configuration to use to check if the TracedData is relevant.
This determines the coded_field to check and the code_scheme to use to interpret it.
:type analysis_configuration: AnalysisConfiguration
:return: Whether `td` contains a relevant response to `coding_plan`.
:rtype: bool
"""
if not labelled(td, consent_withdrawn_key, analysis_configuration):
return False
codes = get_codes_from_td(td, analysis_configuration)
for code in codes:
if code.code_type == CodeTypes.NORMAL:
return True
return False
def filter_opt_ins(data, consent_withdrawn_key):
"""
Filters a list of message or participant data for objects that opted-in.
For the definition of "opt-in", see `AnalysisUtils.opt_in`.
:param data: Message or participant data to filter.
:type data: iterable of core_data_modules.traced_data.TracedData
:param consent_withdrawn_key: Key in the TracedData of the consent withdrawn field.
:type consent_withdrawn_key: str
:return: data, filtered for only the objects that opted-in and responded to at least one of the analysis_configurations.
:rtype: list of core_data_modules.traced_data.TracedData
"""
filtered = []
for td in data:
if opt_in(td, consent_withdrawn_key):
filtered.append(td)
return filtered
def filter_responded(data, consent_withdrawn_key, analysis_configurations):
"""
Filters a list of message or participant data for objects that opted-in and contained a response under at least
one of the given analysis_configurations.
For the definition of "opt-in", see `AnalysisUtils.opt_in`.
:param data: Message or participant data to filter.
:type data: iterable of core_data_modules.traced_data.TracedData
:param consent_withdrawn_key: Key in the TracedData of the consent withdrawn field.
:type consent_withdrawn_key: str
:param analysis_configurations: Analysis configurations to use to check if each TracedData contains an opt-in.
:type analysis_configurations: iterable of AnalysisConfiguration
:return: data, filtered for only the objects that opted-in and responded to at least one of the analysis_configurations.
:rtype: list of core_data_modules.traced_data.TracedData
"""
filtered = []
for td in data:
for config in analysis_configurations:
if responded(td, consent_withdrawn_key, config):
filtered.append(td)
break
return filtered
def filter_partially_labelled(data, consent_withdrawn_key, analysis_configurations):
"""
Filters a list of message or participant data for objects that opted-in and are labelled under at least
one of the given analysis_configurations.
For the definition of "labelled", see `AnalysisUtils.labelled`
:param data: Message or participant data to filter.
:type data: TracedData iterable
:param consent_withdrawn_key: Key in the TracedData of the consent withdrawn field.
:type consent_withdrawn_key: str
:param analysis_configurations: Analysis configurations to use to check if each TracedData has been partially
labelled.
:type analysis_configurations: iterable of AnalysisConfiguration
:return: `data`, filtered for only the objects that opted-in and are labelled under at least one of the coding
plans.
:rtype: list of TracedData
"""
partially_labelled = []
for td in data:
for plan in analysis_configurations:
if labelled(td, consent_withdrawn_key, plan):
partially_labelled.append(td)
break
return partially_labelled
def filter_fully_labelled(data, consent_withdrawn_key, analysis_configurations):
"""
Filters a list of message or participant data for objects that opted-in and are labelled under all of
the given analysis_configurations.
For the definition of "labelled", see `AnalysisUtils.labelled`
:param data: Message or participant data to filter.
:type data: TracedData iterable
:param consent_withdrawn_key: Key in the TracedData of the consent withdrawn field.
:type consent_withdrawn_key: str
:param analysis_configurations: Analysis configurations to use to check if each TracedData has been fully
labelled.
:type analysis_configurations: iterable of AnalysisConfiguration
:return: data, filtered for the objects that are labelled under all of the analysis_configurations.
:rtype: list of TracedData
"""
fully_labelled = []
for td in data:
td_is_labelled = True
for config in analysis_configurations:
if not labelled(td, consent_withdrawn_key, config):
td_is_labelled = False
if td_is_labelled:
fully_labelled.append(td)
return fully_labelled
def filter_relevant(data, consent_withdrawn_key, analysis_configurations):
"""
Filters a list of message or participant data for objects that are relevant to at least one of the given
analysis_configurations.
For the definition of "relevant", see `AnalysisUtils.relevant`
:param data: Message or participant data to filter.
:type data: TracedData iterable
:param consent_withdrawn_key: Key in the TracedData of the consent withdrawn field.
:type consent_withdrawn_key: str
:param analysis_configurations: Analysis configurations to use to check if each TracedData is relevant.
:type analysis_configurations: iterable of AnalysisConfiguration
:return: data, filtered for the objects that are relevant to at least one of the analysis_configurations.
:rtype: list of TracedData
"""
relevant_data = []
for td in data:
for config in analysis_configurations:
if relevant(td, consent_withdrawn_key, config):
relevant_data.append(td)
break
return relevant_data
def compute_percentage_str(x, y):
"""
Formats x as a percentage of y as a string to 1 decimal place.
If y is 0, returns "-".
:param x: Dividend.
:type x: number
:param y: Divisor.
:type y: number
:return: "-" if y == 0, otherwise x / y to 1 decimal place.
:rtype: str
"""
if y == 0:
return "-"
else:
return str(round(x / y * 100, 1))
def write_csv(data, headers, f):
"""
Writes data to a CSV.
:param data: Data to write, as rows.
:type data: iterable of dict
:param headers: CSV headers.
:type headers: list of str
:param f: File to write the CSV to.
:type f: file-like
"""
writer = csv.DictWriter(f, fieldnames=headers, lineterminator="\n", quoting=csv.QUOTE_ALL)
writer.writeheader()
for row in data:
writer.writerow(row)
| AfricasVoices/CoreDataModules | core_data_modules/analysis/analysis_utils.py | analysis_utils.py | py | 13,451 | python | en | code | 0 | github-code | 13 |
43175535789 | #!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
from reporting.utils import extract_date
from rapidsms.contrib.apps.handlers import KeywordHandler
from rwanda.models import PregnantPerson, PreBirthReport
class PreBirthReportHandler(KeywordHandler):
"""
"""
keyword = "mrep"
def must_register(self):
self.respond("You must register before reporting.")
def handle(self, text):
# abort if the user hasn't identified yet
if self.msg.reporter is None:
self.must_register()
return True
try:
person = PregnantPerson.objects.get(
code=text.strip())
except PregnantPerson.DoesNotExist:
self.respond("You must register the pregnancy before reporting.")
return True
resp = "Thank you for reporting"
report = PreBirthReport.objects.create(
person=person)
# save any tags extracted during
# parse phase by the tagging app
if hasattr(self.msg, "tags"):
if len(self.msg.tags) > 0:
for tag in self.msg.tags:
report.tags.add(tag)
resp += " with indicators: %s" %\
(", ".join(map(unicode, self.msg.tags)))
self.respond("%s." % resp)
| oluka/mapping_rapidsms | apps/rwanda/handlers/pre_birth_report.py | pre_birth_report.py | py | 1,312 | python | en | code | 3 | github-code | 13 |
27556852490 | from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from pyvirtualdisplay import Display
from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as ec
from selenium.webdriver.support.ui import WebDriverWait
# The maximum amount of time that we allow an ExpectedCondition to wait
# before timing out.
MAX_WAIT = 60
class SeleniumTestBaseGeneric(StaticLiveServerTestCase):
"""Base class to be used for all selenium-based tests"""
# Don't delete data migration data after test runs:
# https://docs.djangoproject.com/en/1.7/topics/testing/tools/#transactiontestcase
serialized_rollback = True
def setUp(self):
self.display = Display(visible=0, size=(1366, 768))
self.display.start()
self.browser = webdriver.Firefox()
def tearDown(self):
# NOTE: quit() destroys ANY currently running webdriver instances.
# This could become an issue if tests are ever run in parallel.
self.browser.quit()
self.display.stop()
def wait_until_class_visible(selenium, search_class, wait_duration):
"""
Wait for a DOM element to be visible
:param selenium: selenium webdriver Instance
:param search_class: DOM element class to search for
:param wait_duration: time limit to be used in WebDriverWait()
"""
try:
return WebDriverWait(selenium, wait_duration).until(
ec.visibility_of_element_located((By.CLASS_NAME,
search_class))
)
except TimeoutException:
raise AssertionError(
"Element with class: '{}' was not visible within the {} second "
"wait period.".format(search_class, wait_duration))
| atadych/pentacon-refinery-platform | refinery/selenium_testing/utils.py | utils.py | py | 1,849 | python | en | code | 0 | github-code | 13 |
32371634987 | # -*- coding: utf-8 -*-
"""
Created on Sat Dec 22 11:08:27 2018
@author: Basha
"""
import os
import pandas as pd
from sklearn import tree
from sklearn import model_selection
from sklearn import ensemble #This is what we introduced here.
#returns current working directory
os.getcwd()
#changes working directory
os.chdir("C:/data/monster")
monster_train = pd.read_csv("train.csv")
#EDA
monster_train.shape
monster_train.info()
monster_train1 = pd.get_dummies(monster_train, columns=['color'])
monster_train1.shape
monster_train1.info()
monster_train1.head(6)
X_train = monster_train1.drop(['type'], 1)
y_train = monster_train1['type']
#oob scrore is computed as part of model construction process
dt_estimator = tree.DecisionTreeClassifier()
ada_estimator = ensemble.AdaBoostClassifier(base_estimator = dt_estimator, random_state = 2017)
ada_grid = {'n_estimators':[10,15,20], 'learning_rate':[0.01,0.02,0.8], 'base_estimator__max_depth':[3]}
grid_ada_estimator = model_selection.GridSearchCV(ada_estimator, ada_grid, cv=10, n_jobs=2)
#print(scores)
#print(scores.mean())
grid_ada_estimator.fit(X_train, y_train)
#.score gives the score on full train data
print(grid_ada_estimator.score(X_train, y_train))
monster_csv_tst = pd.read_csv("C:/data/monster/test.csv")
monster_test1 = pd.get_dummies(monster_csv_tst, columns=['color'])
monster_test1.info
X_test = monster_test1
X_test.shape
X_test.info()
X_test['type'] = grid_ada_estimator.predict(X_test)
os.getcwd()
X_test.to_csv("Submission_monster4.csv", columns=['id', 'type'], index=False) | dsbasha/NOV-2018 | Supervised Learning/Calssification Algorithms/monster_adaboost.py | monster_adaboost.py | py | 1,619 | python | en | code | 0 | github-code | 13 |
71804424018 | # Author : codechamp27
# Code Licensed under the Apache License, Version 2.0
# takes a number and checks if it is unique or not
num = int(input("Enter a number : "))
matched = False
for i in range(0, 10):
count = 0
copy = num
while copy > 0:
d = copy % 10
if d == i:
count = count + 1
copy = copy // 10
if count > 1:
matched = True
break
if matched:
print("Not a unique number")
else:
print("Unique number")
| codechamp2006/Python-11 | uniquenumber.py | uniquenumber.py | py | 489 | python | en | code | 0 | github-code | 13 |
42409733221 | def SIMPLE_XOR(l, r):
for i in range(l, r + 1):
for j in range(i + 1, r + 1):
for k in range(j + 1, r + 1):
temp = i ^ j ^ k
if temp >= l and temp <= r:
s = set([i, j, k, temp])
if len(s) == 4:
return s
return set()
for _ in range(int(input())):
l, r = map(int, input().split())
s = SIMPLE_XOR(l, r)
if len(s) == 0:
print(-1)
else:
print(*s)
| ZicsX/CP-Solutions | Simple_XOR.py | Simple_XOR.py | py | 500 | python | en | code | 0 | github-code | 13 |
8295180121 | from django.shortcuts import render
#from .models import HashFunction
from .forms import HashInputForm
from .utils import calculate_hashes
def calculator_page_view(request, *args, **kwargs):
context = {}
text_var_name = 'text'
text_to_hash = request.GET.get(key=text_var_name)
#returns an empty string when input is empty
hashes_dict = calculate_hashes(string=text_to_hash)
form = HashInputForm(request.GET or None)
context = {
'input_form': form,
'hashes_dict': hashes_dict,
}
return render(request, 'hash_calculators/hash_body.html', context)
| NotSirius-A/Hash-calculator-website | hash_calculators/views.py | views.py | py | 607 | python | en | code | 0 | github-code | 13 |
18348845110 | from django.urls import path
from . import views
urlpatterns = [
path("", views.index, name='home'),
path("main", views.index),
path("blogs", views.blogs, name='blogs'),
path("blogs/<int:id>", views.blog_details, name='blog_details'),
# path("tarifler",views.tarifler,name="tarifler"),
# path("tarifler/<int:id>", views.tarifler_details, name='tarifler_details'),
path("hakkimda",views.hakkimda,name="hakkimda"),
]
| hermannKonyar/Blog_Django | blog/urls.py | urls.py | py | 460 | python | en | code | 2 | github-code | 13 |
6791989210 | # CENG 487 Assignment1 by
# DoğukanÇiftçi
# StudentId: 230201071
# October 2021
from mat3d import *
class oobject :
def __init__(self, position,vertices,matrix_stack):
self.position = position
self.vertices = vertices
self.matrix_stack=matrix_stack
def applyMatrixToVertices(self, mat3d):
for i, vertex in enumerate(self.vertices):
self.vertices[i] = mat3d.multiply(vertex)
def applyMatrixStack(self):
for matrix in self.matrix_stack:
self.applyMatrixToVertices(matrix)
def rotate(self,x,y,z):
newvertices=[]
for v in self.vertices:
v=mat3d().rotate_x(x).multiply(v)
v=mat3d().rotate_y(y).multiply(v)
v=mat3d().rotate_z(z).multiply(v)
newvertices.append(v)
self.vertices=newvertices | dogukanjackson/ComputerGraphics | DoğukanÇiftçi_assignment3/DoğukanÇiftçi_assignment3/oobject.py | oobject.py | py | 859 | python | en | code | 0 | github-code | 13 |
39312087746 | from distutils.core import setup
from Cython.Build import cythonize
from distutils.extension import Extension
from Cython.Distutils import build_ext
import numpy as np
ext_modules=[
Extension("subroutine_cython",
["subroutine_cython.pyx"],
libraries=["m"],
extra_compile_args = ["-O3", "-ffast-math","-march=native", "-fopenmp" ],
extra_link_args=['-fopenmp'],
include_dirs = [np.get_include()]
)
]
setup(
name = "subroutine_cython",
cmdclass = {"build_ext": build_ext},
ext_modules = ext_modules
) | mgalcode/CLEAN-Capon-3C | subroutine_cython_setup.py | subroutine_cython_setup.py | py | 596 | python | en | code | 7 | github-code | 13 |
71946784338 | #!/usr/bin/env python
# coding: utf-8
# # Basic Calulator
# ***Importing required liabraries***
# In[1]:
from tkinter import *
# ***Defining finctions***
# In[2]:
def btnClick(numbers):
global operator
operator = operator + str(numbers)
text_input.set(operator)
# In[3]:
def btnClearDisplay():
global operator
operator =""
text_input.set("")
# In[4]:
def btnEqualsInput():
global operator
sumup =str(eval(operator))
text_input.set(sumup)
operator=""
# ***Creating Display & Title***
# In[5]:
cal = Tk()
cal.title("Calcuator")
operator=""
text_input=StringVar()
# In[6]:
txtDispay = Entry(cal, font = ('arial', 20, 'bold'), textvariable = text_input, bd = 30,
insertwidth = 4, bg = "powder blue", justify = 'right').grid(columnspan=4)
# ***Creating Buttons & Assigning Commands***
# In[7]:
btn7 = Button (cal, padx = 16, pady = 16, bd = 8, fg = "black", font = ('arial', 20, 'bold'),
text = "7", bg = "powder blue", command = lambda : btnClick(7)).grid(row = 1, column = 0)
btn8 = Button (cal, padx = 16, pady = 16, bd = 8, fg = "black", font = ('arial', 20, 'bold'),
text = "8", bg = "powder blue", command = lambda : btnClick(8)).grid(row = 1, column = 1)
btn9 = Button (cal, padx = 16, pady = 16, bd = 8, fg = "black", font = ('arial', 20, 'bold'),
text = "9", bg = "powder blue", command = lambda : btnClick(9)).grid(row = 1, column = 2)
Addition = Button (cal, padx = 16, pady = 16, bd = 8, fg = "black", font = ('arial', 20, 'bold'),
text = "+", bg = "powder blue", command = lambda : btnClick("+")).grid(row = 1, column = 3)
# In[8]:
btn4 = Button (cal, padx = 16, pady = 16, bd = 8, fg = "black", font = ('arial', 20, 'bold'),
text = "4", bg = "powder blue", command = lambda : btnClick(4)).grid(row = 2, column = 0)
btn5 = Button (cal, padx = 16, pady = 16, bd = 8, fg = "black", font = ('arial', 20, 'bold'),
text = "5", bg = "powder blue", command = lambda : btnClick(5)).grid(row = 2, column = 1)
btn6 = Button (cal, padx = 16, pady = 16, bd = 8, fg = "black", font = ('arial', 20, 'bold'),
text = "6", bg = "powder blue", command = lambda : btnClick(6)).grid(row = 2, column = 2)
Substraction = Button (cal, padx = 16, pady = 16, bd = 8, fg = "black", font = ('arial', 20, 'bold'),
text = "-", bg = "powder blue", command = lambda : btnClick("-")).grid(row = 2, column = 3)
# In[9]:
btn1 = Button (cal, padx = 16, pady = 16, bd = 8, fg = "black", font = ('arial', 20, 'bold'),
text = "1", bg = "powder blue", command = lambda : btnClick(1)).grid(row = 3, column = 0)
btn2 = Button (cal, padx = 16, pady = 16, bd = 8, fg = "black", font = ('arial', 20, 'bold'),
text = "2", bg = "powder blue", command = lambda : btnClick(2)).grid(row = 3, column = 1)
btn3 = Button (cal, padx = 16, pady = 16, bd = 8, fg = "black", font = ('arial', 20, 'bold'),
text = "3", bg = "powder blue", command = lambda : btnClick(3)).grid(row = 3, column = 2)
Multiply = Button (cal, padx = 16, pady = 16, bd = 8, fg = "black", font = ('arial', 20, 'bold'),
text = "*", bg = "powder blue", command = lambda : btnClick("*")).grid(row = 3, column = 3)
# In[10]:
btn0 = Button (cal, padx = 16, pady = 16, bd = 8, fg = "black", font = ('arial', 20, 'bold'),
text = "0", bg = "powder blue", command = lambda : btnClick(0)).grid(row = 4, column = 0)
btnClear = Button (cal, padx = 16, pady = 16, bd = 8, fg = "black", font = ('arial', 20, 'bold'),
text = "C", bg = "powder blue", command = btnClearDisplay).grid(row = 4, column = 1)
btnEquals = Button (cal, padx = 16, pady = 16, bd = 8, fg = "black", font = ('arial', 20, 'bold'),
text = "=", bg = "powder blue", command = btnEqualsInput).grid(row = 4, column = 2)
Division = Button (cal, padx = 16, pady = 16, bd = 8, fg = "black", font = ('arial', 20, 'bold'),
text = "/", bg = "powder blue", command = lambda : btnClick("/")).grid(row = 4, column = 3)
# In[11]:
cal.mainloop()
| aniketkulye/Python_tkinter_Calculators | Basic Calculator.py | Basic Calculator.py | py | 4,154 | python | en | code | 0 | github-code | 13 |
41857484210 | # -*- coding: utf-8 -*-
"""
@author: Valerie Desnoux
with improvements by Andrew Smith
contributors: Jean-Francois Pittet, Jean-Baptiste Butet, Pascal Berteau, Matt Considine
Version 8 September 2021
------------------------------------------------------------------------
reconstruction on an image from the deviations between the minimum of the line and a reference line
calcul sur une image des ecarts simples entre min de la raie et une ligne de reference
-------------------------------------------------------------------------
"""
from solex_util import *
from ser_read_video import *
from ellipse_to_circle import ellipse_to_circle, correct_image
from ExploreFit_util_EN2 import rebin, rebin2, congrid, thebigroutine #MattC
from scipy.optimize import curve_fit #MattC
# read video and return constructed image of sun using fit and LineRecal
def read_video_improved(serfile, fit, LineRecal, options, theflattener):
rdr = ser_reader(serfile)
ih, iw = rdr.ih, rdr.iw
FrameMax = rdr.FrameCount
disk_list = [np.zeros((ih, FrameMax), dtype=rdr.infiledatatype) # MattC avi
for _ in options['shift']]
#disk_list_d1 = np.copy(disk_list) # MattC for doppler
#disk_list_d2 = np.copy(disk_list) # MattC for doppler
#disk_list_c = np.copy(disk_list) # MattC for doppler
disk_list_bw = np.zeros((ih, FrameMax), dtype=rdr.infiledatatype) # MattC for bandwidth
if options['flag_display']:
cv2.namedWindow('disk', cv2.WINDOW_NORMAL)
cv2.resizeWindow('disk', FrameMax // 3, ih // 3)
cv2.moveWindow('disk', 200, 0)
cv2.namedWindow('image', cv2.WINDOW_NORMAL)
cv2.moveWindow('image', 0, 0)
cv2.resizeWindow('image', int(iw), int(ih))
col_indeces = []
for shift in options['shift']:
ind_l = (np.asarray(fit)[:, 0] + np.ones(ih)
* (LineRecal + shift)).astype(int)
# CLEAN if fitting goes too far
ind_l[ind_l < 0] = 0
ind_l[ind_l > iw - 2] = iw - 2
ind_r = (ind_l + np.ones(ih)).astype(int)
col_indeces.append((ind_l, ind_r))
left_weights = np.ones(ih) - np.asarray(fit)[:, 1]
right_weights = np.ones(ih) - left_weights
# start of mods for doppler, bandwidth # MattC
if m1 > 0: # MattC
the_weights = (np.ones((ih, (m3 - m1 + 1))).astype(float)) * m6 # +1
test_coords = []
for i, shift in enumerate(options['shift']):
#ind_bw = (np.asarray(fit)[:, 0] + np.ones(ih) * (LineRecal + 0 * shift)).astype(int) # MattC
#ind_bw[ind_bw < 0] = 0
#ind_bw[ind_bw > iw - 2] = iw - 2
ind_bw, _ = col_indeces[i]
startcol = np.add(ind_bw, m2)
endcol = np.add(ind_bw, m4)
# img_suff = (['.fits', '_d1_' + str(shift_dop) + '.fits', '_d2_' + str(shift_dop) + '.fits',
# '_cont_' + str(shift_cont) + '.fits', '_bw_' + str(np.abs(m2) + np.abs(m4) + 1) + '.fits'])
testcoords = np.linspace(startcol, endcol, np.abs(m2) + np.abs(m4) + 1).astype(int).T # +1
testcoords[testcoords < 0] = 0
testcoords[testcoords > iw - 2] = iw - 2
test_coords.append((testcoords))
# end of mods for doppler, bandwidth # MattC
# lance la reconstruction du disk a partir des trames
print('reader num frames:', rdr.FrameCount)
while rdr.has_frames():
img = rdr.next_frame()
if options['flag_display'] and rdr.FrameIndex % 10 == 0:
cv2.imshow('image', img)
if cv2.waitKey(1) == 27:
cv2.destroyAllWindows()
sys.exit()
for i in range(len(options['shift'])):
ind_l, ind_r = col_indeces[i]
left_col = img[np.arange(ih), ind_l]
right_col = img[np.arange(ih), ind_r]
IntensiteRaie = left_col * left_weights + right_col * right_weights
#disk_list[i][:, rdr.FrameIndex] = np.divide(IntensiteRaie, theflattener).astype(rdr.infiledatatype) #MattC
disk_list[i][:, rdr.FrameIndex] = IntensiteRaie.astype(rdr.infiledatatype) #MattC avi
# start of bandwidth mods MattC
if m1 > 0: # MattC
imgsubset = np.take_along_axis(img, test_coords[i], axis=1)
newvalues = imgsubset * the_weights
newvalues = np.sum(newvalues, axis=1)
IntensiteRaie_bw = np.round(newvalues, 0)
#disk_list_bw[:, rdr.FrameIndex] = np.divide(IntensiteRaie_bw, theflattener).astype(rdr.infiledatatype) #MattC
disk_list_bw[:, rdr.FrameIndex] = IntensiteRaie_bw.astype(rdr.infiledatatype) # MattC
# end of bandwidth mods MattC
if options['flag_display'] and rdr.FrameIndex % 10 == 0:
# disk_list[1] is always shift = 0
cv2.imshow('disk', disk_list[1])
if cv2.waitKey(
1) == 27: # exit if Escape is hit
cv2.destroyAllWindows()
sys.exit()
if m1 > 0: # MattC
options['shift'] = options['shift'] + [str(int(m4))+'FWHM'] #MattC adding bw 2*m4+1
disk_list.append(disk_list_bw)
print("rdr infiletype : ",rdr.infiledatatype)
for i in range(len(options['shift'])): # MattC this should just change 8bit to 16bit
imgmax = np.max(disk_list[i])
imgmin = np.min(disk_list[i])
print('img min max dtype : ',i, imgmin, imgmax, disk_list[i].dtype)
##disk_list[i] = ((disk_list[i]-imgmin)*64000/(imgmax-imgmin)).astype('uint16')
#disk_list[i] = (disk_list[i]*(64000/255)).astype('uint16')
if options['binsize'] > 1 : # MattC
binsize=options['binsize']
print('old ih and iw : ',ih, iw)
print('length disk_list : ',len(disk_list))
print('length options shift : ',len(options['shift']))
print('options binsize : ',options['binsize'])
print('binsize : ',binsize)
disk_list2 = [np.zeros((ih//binsize,FrameMax), dtype='uint16') for _ in options['shift']]
print("here is range len options shift ",range(len(options['shift'])))
print("here is length disklist ", len(disk_list))
for i in range(len(options['shift'])):
img = rebin2(disk_list[i], (binsize, binsize))
#origmax = np.max(disk_list[i])
#img = congrid(disk_list[i],(ih // binsize, FrameMax // binsize), method='spline', centre=True)
#img = ((img-0)*origmax/(np.max(img)-0)).astype('uint16') #spline can give value > 65535 so reset
disk_list2[i] = img
ih,iw = disk_list2[1].shape
print('new ih and iw : ',ih, iw)
return disk_list2, ih, iw, rdr.FrameCount
else:
return disk_list, ih, iw, rdr.FrameCount
def make_header(rdr):
# initialisation d'une entete fits (etait utilisé pour sauver les trames
# individuelles)
hdr = fits.Header()
hdr['SIMPLE'] = 'T'
hdr['BITPIX'] = 32
hdr['NAXIS'] = 2
hdr['NAXIS1'] = rdr.iw
hdr['NAXIS2'] = rdr.ih
hdr['BZERO'] = 0
hdr['BSCALE'] = 1
hdr['BIN1'] = 1
hdr['BIN2'] = 1
hdr['EXPTIME'] = 0
return hdr
def compute_flattener(animg, imgh, afit, ay1, ay2): # MattC
corrintprofile = np.zeros((imgh,1))
MinRayfit = animg[np.arange(0, imgh), (np.asarray(afit)[0:imgh, 0]).astype(np.int)]
popt, __ = curve_fit(func2,np.asarray(afit)[0:imgh, 2],MinRayfit,p0=np.ones(3,)) # np.ones sets poly
v2 = func2(np.asarray(afit)[0:imgh, 2],*popt) #.astype('int')
mawindow = 15 #5
corrintprofile = np.divide(MinRayfit, v2)
corrintprofile = moving_average(corrintprofile, mawindow)
profbuffer = 100 #18
#corrintprofile[np.abs(corrintprofile) > 1.5 ] = 0
#now fix the tails ...
a=[corrintprofile[min(ay2+1+profbuffer,len(corrintprofile)-1)]]*(mawindow-1)
corrintprofile=np.concatenate((corrintprofile,a))
corrintprofile[0:max(ay1-1-profbuffer,0)] = corrintprofile[max(ay1-1-profbuffer,0)] #18 was 25
print(min(ay2+1+profbuffer,ih))
print(ih)
print(imgh)
print("length of array", len(corrintprofile))
corrintprofile[min(ay2+1+profbuffer,ih-1):ih-1] = corrintprofile[min(ay2+1+profbuffer,ih-1)]
# plt.plot(corrintprofile)
# plt.show()
return corrintprofile
# compute mean image of video
def compute_mean(serfile):
"""IN : serfile path"
OUT :numpy array
"""
rdr = ser_reader(serfile)
logme('Width, Height : ' + str(rdr.Width) + ' ' + str(rdr.Height))
logme('Number of frames : ' + str(rdr.FrameCount))
my_data = np.zeros((rdr.ih, rdr.iw), dtype='uint64')
my_datacube = [np.zeros((rdr.ih, rdr.iw), dtype=rdr.infiledatatype) # MattC
for _ in range(rdr.FrameCount)]
i = 0 # MattC
while rdr.has_frames():
img = rdr.next_frame()
my_data += img
my_datacube[i] = img # MattC
i += 1 # MattC
return (my_data / rdr.FrameCount).astype(rdr.infiledatatype), my_datacube # MattC avi
def func2(x, *p): # MattC
"""Polynomial fitting function of arbitrary degree."""
poly = 0.
for i, n in enumerate(p):
poly += n * x**i
return poly
def moving_average(x, w): # MattC
return np.convolve(x, np.ones(w), 'valid') / w
def compute_mean_return_fit(serfile, options, LineRecal=1):
global hdr, ih, iw
global m1,m2,m3,m4,m5,m6,m7,m8
"""
----------------------------------------------------------------------------
Reconstuit l'image du disque a partir de l'image moyenne des trames et
des trames extraite du fichier ser avec un fit polynomial
Corrige de mauvaises lignes et transversallium
basefich: nom du fichier de base de la video sans extension, sans repertoire
shift: ecart en pixel par rapport au centre de la raie pour explorer longueur d'onde decalée
----------------------------------------------------------------------------
"""
flag_display = options['flag_display']
# first compute mean image
# rdr is the ser_reader object
mean_img, thedatacube = compute_mean(serfile) #MattC
"""
----------------------------------------------------------------------------
Calcul polynome ecart sur une image au centre de la sequence
----------------------------------------------------------------------------
"""
#savefich=basefich+'_mean'
if options['save_fit']:
DiskHDU = fits.PrimaryHDU(mean_img, header=hdr)
DiskHDU.writeto(basefich0 + '_mean.fits', overwrite='True')
# affiche image moyenne
if flag_display:
cv2.namedWindow('Video mean', cv2.WINDOW_NORMAL) # MattC avi
cv2.resizeWindow('Video mean', iw, ih) # MattC avi
cv2.moveWindow('Video mean', 100, 0) # MattC avi
cv2.imshow('Video mean', mean_img) # MattC avi
if cv2.waitKey(2000) == 27: # exit if Escape is hit
cv2.destroyAllWindows()
sys.exit()
cv2.destroyAllWindows()
y1, y2 = detect_bord(mean_img, axis=1, offset=0) # MattC 5
logme('Vertical limits y1, y2 : ' + str(y1) + ' ' + str(y2))
if (y1>0.2*ih): #MattC
y1 = 5
if (y2<0.8*ih): #MattC
y2 = ih-5
PosRaieHaut = y1
PosRaieBas = y2
"""
-----------------------------------------------------------
Trouve les min intensité de la raie
-----------------------------------------------------------
"""
# construit le tableau des min de la raie a partir du haut jusqu'en bas
MinOfRaie = []
for i in range(PosRaieHaut, PosRaieBas):
line_h = mean_img[i, :]
MinX = line_h.argmin()
MinOfRaie.append([MinX, i])
#print('MinOfRaie x,y', MinX,i)
# best fit d'un polynome degre 2, les lignes y sont les x et les colonnes
# x sont les y
np_m = np.asarray(MinOfRaie)
xm, ym = np_m.T
# LineRecal=xm.min()
p = np.polyfit(ym, xm, 2)
# calcul des x colonnes pour les y lignes du polynome
a = p[0]
b = p[1]
c = p[2]
fit = []
# ecart=[]
for y in range(0, ih):
x = a * y**2 + b*y + c
deci = x - int(x)
fit.append([int(x) - LineRecal, deci, y])
adjustflag=False #MattC
thenewfit, adjustflag, m1, m2, m3, m4, m5, m6, m7, m8 = thebigroutine(mean_img, fit, LineRecal, basefich0) #MattC
print(adjustflag,"m1 : ",m1,m2,m3,m4,m5,m6,m7,m8,"y1 : ",y1,y2,"Haut : ",PosRaieHaut,PosRaieBas,"ih :",ih)
if m7>0:
print("m7 ",m7)
y1 = m7
PosRaieHaut = y1
if m8>0:
print("m8 ",m8)
y2 = m8
PosRaieBas = y2
if adjustflag : #MattC
print("Origl fit : ",np.asarray(fit)[1:5,]) #MattC
print("Adjusted fit : ", np.asarray(thenewfit)[1:5,]) #MattC
fit=thenewfit #MattC
corrintprofile = compute_flattener(mean_img, ih, fit, y1, y2)
# plt.plot(corrintprofile)
# plt.show()
framenums = len(thedatacube)
disk_flat = np.zeros((ih, framenums), dtype=np.float) #, dtype=np.uint16)
for i in range(0,framenums):
# disk_flat[:, i] = compute_flattener(thedatacube[i], ih, fit, y1, y2)
disk_flat[:, i] = corrintprofile
# if i == (framenums // 2):
# print("disk f ranges ",np.min(disk_flat[:,i]), np.max(disk_flat[:,i]),i)
# plt.plot(disk_flat[:, i])
# plt.show()
# print("disk f ranges ",np.min(disk_flat), np.max(disk_flat))
# disk_flat[disk_flat>3]=1
# disk_flat[disk_flat < 0] = 1
# diskf = (disk_flat-np.min(disk_flat))*64000/(np.max(disk_flat)-np.min(disk_flat))
cv2.imwrite(basefich0 + '_flatdisk_MattC.png', disk_flat.astype(np.uint16))
if 2==1:
MinRayfit = mean_img[np.arange(0, ih), (np.asarray(fit)[0:ih, 0]).astype(np.int)] #
popt, __ = curve_fit(func2,np.asarray(fit)[0:ih, 2],MinRayfit,p0=np.ones(3,)) # np.ones sets poly
v2 = func2(np.asarray(fit)[0:ih, 2],*popt) #.astype('int')
mawindow = 15 #5
corrintprofile = np.divide(MinRayfit, v2)
corrintprofile = moving_average(corrintprofile, mawindow)
profbuffer = 100 #18
#corrintprofile[np.abs(corrintprofile) > 1.5 ] = 0
corrintprofile[0:(y1-1-profbuffer)] = corrintprofile[(y1-1-profbuffer)] #18 was 25
corrintprofile[(y2+1+profbuffer):ih] = corrintprofile[(y2+1+profbuffer)]
a=[corrintprofile[(y2+1+profbuffer)]]*(mawindow-1)
corrintprofile=np.concatenate((corrintprofile,a))
plt.plot(corrintprofile)
plt.show()
return fit, a, b, c, corrintprofile, disk_flat
def correct_bad_lines_and_geom(Disk, options, not_fake):
global hdr, basefich
iw = Disk.shape[1]
ih = Disk.shape[0]
img = Disk
if (m7>0) and (m8>0):
y1 = m7
y2 = m8
else:
y1, y2 = detect_bord(img, axis=1,offset=0) # MattC # bords verticaux
# detection de mauvaises lignes
# somme de lignes projetées sur axe Y
ysum = np.mean(img, 1)
# ne considere que les lignes du disque avec marge de 15 lignes
ymargin = 0 # MattC was 15
ysum = ysum[y1 + ymargin:y2 - ymargin]
# filtrage sur fenetre de 31 pixels, polynome ordre 3 (etait 101 avant)
yc = savgol_filter(ysum, 31, 3)
# divise le profil somme par le profil filtré pour avoir les hautes
# frequences
hcol = np.divide(ysum, yc)
# met à zero les pixels dont l'intensité est inferieur à 1.03 (3%)
hcol[abs(hcol - 1) <= 0.03] = 0
# tableau de zero en debut et en fin pour completer le tableau du disque
a = [0] * (y1 + ymargin)
b = [0] * (ih - y2 + ymargin)
hcol = np.concatenate((a, hcol, b))
# creation du tableau d'indice des lignes a corriger
l_col = np.where(hcol != 0)
listcol = l_col[0]
# correction de lignes par filtrage median 13 lignes, empririque
img_copy = np.copy(img)
for c in listcol:
m = img[c - 7:c + 6, ]
s = np.median(m, 0)
img_copy[c - 1:c, ] = s
'''
if options['save_fit']:
DiskHDU=fits.PrimaryHDU(img_copy,header=hdr)
DiskHDU.writeto(basefich+'_corr.fits', overwrite='True')
'''
return img_copy
def correct_transversalium(img, flag_nobords, options, not_fake, theflattener, theflatdisk): # MattC
global hdr, ih, basefich
frame = img
newiw=img.shape[1]
ih=img.shape[0]
flag_nobords = False
# on cherche la projection de la taille max du soleil en Y
if (m7>0) and (m8>0): #MattC
y1 = m7
y2 = m8
else:
y1, y2 = detect_bord(frame, axis=1,offset=0) # MattC
#y1,y2=detect_bord(frame, axis=1,offset=0) #MattC comment out if using above lines
#print ('flat ',y1,y2)
# si mauvaise detection des bords en x alors on doit prendre toute l'image
if flag_nobords:
ydisk=np.median(img,1)
else:
#plt.hist(frame.ravel(),bins=1000,)
#plt.show()
#plt.hist(frame.ravel(),bins=1000,cumulative=True)
#plt.show()
##seuil_bas=np.percentile(frame,25)
seuil_haut=np.percentile(frame,97)
#print ('Seuils de flat: ',seuil_bas, seuil_haut)
#print ('Seuils bas x: ',seuil_bas*4)
#print ('Seuils haut x: ',seuil_haut*0.25)
#myseuil=seuil_haut*0.2
myseuil=seuil_haut*0.5
# filtre le profil moyen en Y en ne prenant que le disque
ydisk=np.empty(ih+1)
for j in range(0,ih):
temp=np.copy(frame[j,:])
temp=temp[temp>myseuil]
if len(temp)!=0:
ydisk[j]=np.median(temp)
else:
ydisk[j]=1
##y1=y1
##y2=y2
ToSpline= ydisk[y1:y2]
Smoothed2=savgol_filter(ToSpline,301, 3) # window size, polynomial order
''' #origly commented out
#best fit d'un polynome degre 4
np_m=np.asarray(ToSpline)
ym=np_m.T
xm=np.arange(y2-y1)
p=np.polyfit(xm,ym,4)
#calcul des x colonnes pour les y lignes du polynome
a=p[0]
b=p[1]
c=p[2]
d=p[3]
e=p[4]
x = np.arange(y2-y1)
Smoothed=a*x**4+b*x**3+c*x**2+d*x+e
'''
# divise le profil reel par son filtre ce qui nous donne le flat
hf=np.divide(ToSpline,Smoothed2)
# elimine possible artefact de bord
hf=hf[5:-5]
#reconstruit le tableau du pofil complet
a=[1]*(y1+5)
b=[1]*(ih-y2+5)
hf=np.concatenate((a,hf,b))
##Smoothed=np.concatenate((a,Smoothed,b))
ToSpline=np.concatenate((a,ToSpline,b))
Smoothed2=np.concatenate((a,Smoothed2,b))
'''
# MattC test
corrintprofile = np.zeros((y2-y1,1))
popt, __ = curve_fit(func2,range(y1,y2),ydisk,p0=np.ones(3,)) # np.ones sets poly
v2 = func2(range(y1,y2),*popt) #.astype('int')
mawindow = 15 #5
corrintprofile = np.divide(ydisk, v2)
corrintprofile = moving_average(corrintprofile, mawindow)
profbuffer = 0 # 100 18
#corrintprofile[np.abs(corrintprofile) > 1.5 ] = 0
corrintprofile[0:(y1-1-profbuffer)] = corrintprofile[(y1-1-profbuffer)] #18 was 25
corrintprofile[(y2+1+profbuffer):ih] = corrintprofile[(y2+1+profbuffer)]
a=[corrintprofile[(y2+1+profbuffer)]]*(mawindow-1)
Smoothed2=np.concatenate((corrintprofile,a))
# plt.plot(Smoothed2)
# plt.show()
# MattC end test
'''
# genere tableau image de flat
flat=[]
hf = np.array(hf) / max(0.9, min(hf)) # don't make things bigger
hf[hf==0] = 1
for i in range(0,newiw):
flat.append(hf)
'''
BelleImage=np.divide(frame,theflatdisk) # MattC
framef=np.array(BelleImage, dtype='uint16')
DiskHDU = fits.PrimaryHDU(framef, header=hdr)
DiskHDU.writeto(basefich + '_flat_MattC.fits', overwrite='True')
'''
np_flat=np.asarray(flat)
flat = np_flat.T
#print(hf, sum(hf)/len(hf), max(hf), min(hf))
# divise image par le flat
BelleImage=np.divide(frame,flat)
frame=np.array(BelleImage, dtype='uint16')
# sauvegarde de l'image deflattée
if options['save_fit'] and not_fake:
DiskHDU = fits.PrimaryHDU(frame, header=hdr)
DiskHDU.writeto(basefich + '_flat.fits', overwrite='True')
return frame
def solex_proc(serfile, options):
global hdr, ih, iw, basefich0, basefich
clearlog()
# plt.gray() #palette de gris si utilise matplotlib pour visu
# debug
logme('Using pixel shift : ' + str(options['shift']))
options['shift'] = [10, 0] + options['shift'] # 10, 0 are "fake"
WorkDir = os.path.dirname(serfile) + "/"
os.chdir(WorkDir)
base = os.path.basename(serfile)
basefich0 = os.path.splitext(base)[0]
LineRecal = 1
rdr = ser_reader(serfile)
hdr = make_header(rdr)
ih = rdr.ih
iw = rdr.iw
fit, a, b, c, flattener, flatdisk = compute_mean_return_fit(serfile, options, LineRecal)
# Modification Jean-Francois: correct the variable names: A0, A1, A2
logme('Coeff A0, A1, A2 : ' + str(a) + ' ' + str(b) + ' ' + str(c))
disk_list, ih, iw, FrameCount = read_video_improved(serfile, fit, LineRecal, options, flattener)
hdr['NAXIS1'] = iw # note: slightly dodgy, new width
#if m1 > 0:
# options['shift'] = options['shift'] + [str(int(m4))+'FWHM'] #MattC adding bw 2*m4+1
# sauve fichier disque reconstruit
if options['flag_display']:
cv2.destroyAllWindows()
cercle = (-1, -1, -1)
frames_circularized = []
for i in range(len(disk_list)):
basefich = basefich0 + '_shift=' + str(options['shift'][i])
if options['save_fit'] and i >= 2:
DiskHDU = fits.PrimaryHDU(disk_list[i], header=hdr)
DiskHDU.writeto(basefich + '_img.fits', overwrite='True')
"""
--------------------------------------------------------------------
--------------------------------------------------------------------
Badlines and geometry
--------------------------------------------------------------------
--------------------------------------------------------------------
"""
try:
img = correct_bad_lines_and_geom(disk_list[i], options, i >= 2)
"""
--------------------------------------------------------------
transversallium correction
--------------------------------------------------------------
"""
flag_nobords = False
frame_flatted = correct_transversalium(img, flag_nobords, options, i >= 2, flattener, flatdisk)
# frame_flatted = img # MattC
except Exception:
logme('WARNING: correct_bad_lines / correct_transversalium FAILED')
frame_flatted = disk_list[i]
"""
We now apply ellipse_fit to apply the geometric correction
"""
# disk_list[0] is always shift = 10, for more contrast for ellipse fit
if options['ratio_fixe'] is None and options['slant_fix'] is None:
frame_circularized, cercle, options['ratio_fixe'], phi = ellipse_to_circle(
frame_flatted, options, basefich)
# in options angles are stored as degrees for some reason
options['slant_fix'] = math.degrees(phi)
frames_circularized.append(frame_circularized)
else:
ratio = options['ratio_fixe'] if not options['ratio_fixe'] is None else 1.0
phi = math.radians(
options['slant_fix']) if not options['slant_fix'] is None else 0.0
frames_circularized.append(correct_image(frame_flatted / 65536, phi, ratio, np.array(
[-1.0, -1.0]), -1.0, print_log=i == 0)[0]) # Note that we assume 16-bit
# sauvegarde en fits de l'image finale
if options['save_fit'] and i >= 2: # first two shifts are not user specified
DiskHDU = fits.PrimaryHDU(frames_circularized[-1], header=hdr)
DiskHDU.writeto(basefich + '_recon.fits', overwrite='True')
with open(basefich0 + '_log.txt', "w") as logfile:
logfile.writelines(mylog)
return frames_circularized[2:], hdr, cercle
| mconsidine/Digital_SHG | Solex_recon.py | Solex_recon.py | py | 24,677 | python | en | code | 1 | github-code | 13 |
42952575036 | from MindSphere import MindSphere
mindsphere = MindSphere(app_Name=None,
app_Version=None,
tenant=None,
gateway_URL=None,
client_ID=None,
client_Secret=None
)
assetId,aspectName = None,None
mindsphere.putTimeSeriesData(assetId,aspectName,{"_time":None,"Temperature":90.50})
| unifgabsantos/MindSphere | main.py | main.py | py | 432 | python | en | code | 1 | github-code | 13 |
41163065986 | jogador = dict()
partidas = list()
sum = 0
jogador['nome'] = str(input('Digite o nome do Jogador: '))
n = int(input(f'Quantas partidas {jogador["nome"]} jogou: '))
for x in range(0, n):
partidas.append(int(input(f'Quantos gols na partida {x+1}? ')))
jogador['gols'] = partidas
for x in jogador['gols']:
sum += x
print(jogador)
print(sum)
for k, v in jogador.items():
print(f'O campo {k} tem o valor {v}') | Pauloa90/Python | ex093.py | ex093.py | py | 419 | python | pt | code | 0 | github-code | 13 |
19064178176 | from dataclasses import dataclass
@dataclass
class CustomQueue:
items: list
MAXSIZE: int
x_values = set()
def xSet(self) -> set:
if not self.x_values:
for items in self.items:
self.x_values.add(items.pos[0])
return self.x_values
def removeX(self, x):
if x in self.x_values:
self.x_values.remove(x)
def addX(self, x):
self.x_values.add(x)
def isEmpty(self) -> bool:
return len(self.items) == 0
def enqueue(self, item):
if self.MAXSIZE == len(self.items):
return
if not self.isEmpty():
self.items.append(item)
self.Sort()
else:
self.items.append(item)
def dequeue(self):
if self.isEmpty():
return
item = self.Top()
self.items.pop(0)
return item
def Top(self):
return self.items[0]
def Sort(self):
for i in range(len(self.items)):
idx = i
for j in range(len(self.items)):
if self.items[idx].pos[1] < self.items[j].pos[1]:
idx = j
self.items[i], self.items[idx] = self.items[idx], self.items[i]
def Pop(self, node, add):
for i in range(len(self.items)):
if self.items[i].pos == node.pos:
new = add()
self.items[i] = new
return new
def check(self, height):
if self.Top().pos[1] >= height and self.Top().maxed:
return self.dequeue()
if self.Top().pos[1] >= height and self.Top().next:
item = self.Top()
item = item.next
return None
def UpdateNodes(self, appendFunc):
for item in self.items:
node = item
while node:
node.pos = (node.pos[0], node.pos[1] + 5)
node = node.next
for item in self.items:
node = item
appendFunc(node)
def Trails(self) -> list:
return self.items
| TajTelesford/ColorMatrixApplication | CustomQueue.py | CustomQueue.py | py | 2,102 | python | en | code | 0 | github-code | 13 |
24704010564 | #! /usr/local/python_anaconda/bin/python3.4
from Bio import SeqIO
from Bio import AlignIO
from Bio import Alphabet
from Bio.Alphabet import IUPAC
import collections
from file_utilities import check_filename
import pandas as pd
import textwrap
from collections import Counter
from itertools import product
from phyVirus.get_baltimore import get_baltimore_classifiaction
def count_gaps_and_characters(aln_file, file_format = "fasta"):
"""
count how many gaps and how many characters there are in an alignemnt
:param aln_file: input alignment file
:param file_format: input file format (default: fasta)
:return: alignment length, number of gap chars, number of non-gap chars
"""
aln_file = check_filename(aln_file)
aln = AlignIO.read(aln_file, file_format, alphabet=Alphabet.Gapped(IUPAC.unambiguous_dna))
total_gaps = 0
total_not_gaps = 0
for record in aln:
local_gaps = record.seq.count("-")
local_not_gaps = len(record.seq) - local_gaps
total_gaps += local_gaps
total_not_gaps += local_not_gaps
return len(aln), total_gaps, total_not_gaps
def base_frequencies(filename, in_format="fasta"):
"""
calculates base frequencies in sequence file
:param filename: input nucleotide sequence filename
:param in_format: input format (default: fasta)
:return: freqs dictionary
"""
filename = check_filename(filename)
dataset = list(SeqIO.parse(filename, in_format))
freqs = {"A": 0, "G": 0, "C": 0, "T": 0}
count = 0
for seq in dataset:
a = seq.seq.count("A")
c = seq.seq.count("C")
t = seq.seq.count("T")
g = seq.seq.count("G")
count += len(seq.seq)
freqs["A"] += a
freqs["C"] += c
freqs["T"] += t
freqs["G"] += g
for k in freqs.keys():
freqs[k] = freqs[k] / float(count)
print(freqs)
return freqs
def get_consensus_from_alignment(aln_file, in_format="fasta"):
"""
constructs a consensus sequence from alignment file
:param aln_file: alignment file
:param in_format: file format (default: fasta)
:return: consensus sequence
"""
aln_file = check_filename(aln_file)
aln = AlignIO.read(aln_file, in_format, alphabet=Alphabet.Gapped(IUPAC.unambiguous_dna))
len_aln = len(aln[0])
consensus = ""
for i in range(len_aln):
count = 0
max_char = ""
counter = collections.Counter(aln[:, i])
for j in counter:
if counter[j] > count:
count = counter[j]
max_char = j
if max_char == "-":
continue
consensus += max_char
return consensus
def stop_mutation_potential_in_coding_sequence(filename, in_format="fasta"):
"""
checks the potential to create stop codons in a given sequence
assumes that coding region starts from the first nucleotide
:param filename: input file name
:param in_format: input format
:return: dictionary of sequence name and stop mutation count
"""
stop_codons = ["TGA", "TAA", "TAG"]
filename = check_filename(filename)
dataset = list(SeqIO.parse(filename, in_format))
df = pd.DataFrame(columns=["seq_id", "seq_len", "stop_mutation_count"])
for seq in dataset:
stop_mutation_count = 0
seq_len = len(seq.seq)
for i in range(0, seq_len, 3):
codon = seq.seq[i:i + 3]
if codon in stop_codons:
continue
for nuc in ["T", "C", "A", "G"]:
new_codon_1 = codon[:2] + nuc
new_codon_2 = nuc + codon[1:]
new_codon_3 = codon[0] + nuc + codon[2]
if new_codon_1 in stop_codons:
stop_mutation_count += 1
if new_codon_2 in stop_codons:
stop_mutation_count += 1
if new_codon_3 in stop_codons:
stop_mutation_count += 1
df = df.append({"seq_id":seq.id, "seq_len":seq_len, "stop_mutation_count":stop_mutation_count},
ignore_index=True)
return df
def get_major_and_minor_consensus(aln_file, in_format="fasta"):
"""
calculates major and minor consensus and each position's probability
- major consensus - the most prominent base (including "-")
- minor consensus - the most prominent base (not including "-")
:param aln_file: alignment file path
:param in_format: input alignment format (default: fasta)
:return: major_consensus, major_freqs, minor_consensus, minor_freqs
"""
aln_file = check_filename(aln_file)
aln = AlignIO.read(aln_file, in_format, alphabet=Alphabet.Gapped(IUPAC.unambiguous_dna))
len_aln = len(aln[0])
num_of_seq = len(aln)
major_consensus = ""
major_freqs = []
minor_consensus = ""
minor_freqs = []
for i in range(len_aln):
counter = collections.Counter(aln[:, i])
major_count = 0
minor_count = 0
major_char = ""
minor_char = ""
for j in counter:
if counter[j] > major_count:
major_count = counter[j]
major_char = j
if j != "-":
minor_count = counter[j]
minor_char = j
if counter[j] > minor_count and j != "-":
if j not in ["A", "C", "G", "T"]:
minor_count = counter[j]
minor_char = "N"
else:
minor_count = counter[j]
minor_char = j
gap_count = counter["-"]
major_consensus += major_char
major_freqs.append(round(major_count / (num_of_seq - gap_count), 2))
minor_consensus += minor_char
minor_freqs.append(round(minor_count / (num_of_seq - gap_count), 2))
return major_consensus, major_freqs, minor_consensus, minor_freqs
def get_consensus_percentage(aln_file, in_format="fasta"):
"""
gets alignment file and returns the consensus and
the percentage of each position in the alignment
the percentage calculation ignores gaps
:param aln_file: input alignment file path
:param in_format: input file format (defualt: fasta)
:return: consensus sequance and consensus percentage
"""
aln_file = check_filename(aln_file)
aln = AlignIO.read(aln_file, in_format, alphabet=Alphabet.Gapped(IUPAC.unambiguous_dna))
len_aln = len(aln[0])
num_of_seq = len(aln)
consensus_percentage= {1:0, 0.9:0, 0.8:0, 0.7:0, 0.6:0, 0.5:0, 0.4:0, 0.3:0, 0.2:0}
consensus = ""
for i in range(len_aln):
counter = collections.Counter(aln[:, i])
count = 0
max_char = ""
for j in counter:
if j == "-":
continue
elif counter[j] > count:
count = counter[j]
max_char = j
if "-" not in counter:
gap_count = 0
else:
gap_count = counter["-"]
percentage = round(count/(num_of_seq-gap_count), 1)
consensus_percentage[percentage] += 1
consensus += max_char
for n in consensus_percentage:
consensus_percentage[n] = round(consensus_percentage[n] / len_aln, 3)
return consensus, consensus_percentage
def get_codon_freqs_from_consensus(filename, in_format="fasta"):
"""
gets codon frequencies of concensus sequence from sequence file
:param filename: input sequence filename
:param in_format: input format (default: fasta)
:return: codons dictionary and text for simulation input
"""
filename = check_filename(filename)
dataset = list(SeqIO.parse(filename, in_format))
consensus = get_consensus_from_alignment(filename)
codons = {"TTT": 0, "TTC": 0, "TTA": 0, "TTG": 0,
"TCT": 0, "TCC": 0, "TCA": 0, "TCG": 0,
"TAT": 0, "TAC": 0, "TAA": 0, "TAG": 0,
"TGT": 0, "TGC": 0, "TGA": 0, "TGG": 0,
"CTT": 0, "CTC": 0, "CTA": 0, "CTG": 0,
"CCT": 0, "CCC": 0, "CCA": 0, "CCG": 0,
"CAT": 0, "CAC": 0, "CAA": 0, "CAG": 0,
"CGT": 0, "CGC": 0, "CGA": 0, "CGG": 0,
"ATT": 0, "ATC": 0, "ATA": 0, "ATG": 0,
"ACT": 0, "ACC": 0, "ACA": 0, "ACG": 0,
"AAT": 0, "AAC": 0, "AAA": 0, "AAG": 0,
"AGT": 0, "AGC": 0, "AGA": 0, "AGG": 0,
"GTT": 0, "GTC": 0, "GTA": 0, "GTG": 0,
"GCT": 0, "GCC": 0, "GCA": 0, "GCG": 0,
"GAT": 0, "GAC": 0, "GAA": 0, "GAG": 0,
"GGT": 0, "GGC": 0, "GGA": 0, "GGG": 0}
all_codons = 0
for i in range(0, len(consensus), 3):
c = str(consensus[i:i + 3])
if len(c) < 3:
continue
if "N" in c:
continue
codons[c] += 1
all_codons += 1
for c in codons.keys():
codons[c] = float(codons[c]) / all_codons
if codons[c] == 0:
codons[c] = 0.000001
to_print = ""
to_print += "%f %f %f %f // TTT TTC TTA TTG\n" % (codons["TTT"], codons["TTC"], codons["TTA"], codons["TTG"])
to_print += "%f %f %f %f // TCT TCC TCA TCG\n" % (codons["TCT"], codons["TCC"], codons["TCA"], codons["TCG"])
to_print += "%f %f %f %f // TAT TAC TAA TAG\n" % (codons["TAT"], codons["TAC"], codons["TAA"], codons["TAG"])
to_print += "%f %f %f %f // TGT TGC TGA TGG\n" % (codons["TGT"], codons["TGC"], codons["TGA"], codons["TGG"])
to_print += "\n"
to_print += "%f %f %f %f // CTT CTC CTA CTG\n" % (codons["CTT"], codons["CTC"], codons["CTA"], codons["CTG"])
to_print += "%f %f %f %f // CCT CCC CCA CCG\n" % (codons["CCT"], codons["CCC"], codons["CCA"], codons["CCG"])
to_print += "%f %f %f %f // CAT CAC CAA CAG\n" % (codons["CAT"], codons["CAC"], codons["CAA"], codons["CAG"])
to_print += "%f %f %f %f // CGT CGC CGA CGG\n" % (codons["CGT"], codons["CGC"], codons["CGA"], codons["CGG"])
to_print += "\n"
to_print += "%f %f %f %f // ATT ATC ATA ATG\n" % (codons["ATT"], codons["ATC"], codons["ATA"], codons["ATG"])
to_print += "%f %f %f %f // ACT ACC ACA ACG\n" % (codons["ACT"], codons["ACC"], codons["ACA"], codons["ACG"])
to_print += "%f %f %f %f // AAT AAC AAA AAG\n" % (codons["AAT"], codons["AAC"], codons["AAA"], codons["AAG"])
to_print += "%f %f %f %f // AGT AGC AGA AGG\n" % (codons["AGT"], codons["AGC"], codons["AGA"], codons["AGG"])
to_print += "\n"
to_print += "%f %f %f %f // GTT GTC GTA GTG\n" % (codons["GTT"], codons["GTC"], codons["GTA"], codons["GTG"])
to_print += "%f %f %f %f // GCT GCC GCA GCG\n" % (codons["GCT"], codons["GCC"], codons["GCA"], codons["GCG"])
to_print += "%f %f %f %f // GAT GAC GAA GAG\n" % (codons["GAT"], codons["GAC"], codons["GAA"], codons["GAG"])
to_print += "%f %f %f %f // GGT GGC GGA GGG\n" % (codons["GGT"], codons["GGC"], codons["GGA"], codons["GGG"])
print(to_print)
return codons, to_print
def get_amino_acid_freqs(filename, no_strange_aas = True):
"""
gets a fasta file of protein seqs and returns a dataframe of amino acid frequencies for each fasta entry
id - ncbi id
:param filename: file path of amino acid fasta file
:param no_strange_aas: if to ignore strange aa letters
:return: dataframe with aa frequecnies
"""
filename = check_filename(filename)
aa = open(filename, "r").read()
aa = aa.split(">")[1:]
aa_data = pd.DataFrame()
for a in (aa):
ncbi_id = a.split("|")[3]
seq = "".join(a.split("\n")[1:])
length = len(seq)
if no_strange_aas:
# remove problematic AA's
if "X" in seq:
seq = "".join(seq.split("X"))
if "J" in seq:
seq = "".join(seq.split("J"))
if "Z" in seq:
seq = "".join(seq.split("Z"))
if "B" in seq:
seq = "".join(seq.split("B"))
counter = dict(Counter(seq))
for i in counter:
counter[i] = counter[i] / float(length)
counter["ncbi_id"] = ncbi_id
aa_data = aa_data.append(counter, ignore_index=True)
return aa_data
def get_codon_freqs(filename, no_strange_nucs = True):
"""
gets a fasta file of nucleotide seqs and returns a dataframe of codon frequencies for each fasta entry
id - ncbi id
:param filename: file path of amino acid fasta file
:param no_strange_nucs: if to ignore strange nucleotide letters
:return: dataframe with codon frequecnies
"""
alphabet = ["a", "c", "t", "g"]
codons_possibilities = ["".join(i) for i in product(alphabet, repeat=3)]
filename = check_filename(filename)
gb = open(filename, "r").read()
items = gb.split(">")[1:]
codon_info = pd.DataFrame()
for k in (items):
result = {}
ncbi_id = k.split("|")[3]
seq = "".join(k.split("\n")[1:])
if len(seq) % 3 != 0:
continue
codons = textwrap.wrap(seq, 3)
counter = {}
for c in codons:
if c in codons_possibilities:
if not c in counter.keys():
counter[c] = 0
counter[c] += 1
sum_co = sum(counter.values())
for codon in counter:
counter[codon] = counter[codon] / float(sum_co)
counter['ncbi_id'] = ncbi_id
codon_info = codon_info.append(counter, ignore_index=True)
return codon_info
def get_dinucleotide_odds_ratio(fasta_file, in_format="fasta", output_dir=None):
fasta_file = check_filename(fasta_file)
dinucs_or = pd.DataFrame()
dinucs = {}
for p in product("ACTG", repeat=2):
dinucs[p[0]+p[1]] = 0
nucs = {"A":0, "C":0, "G":0, "T":0}
comp = {"A":"T", "C":"G", "G":"C", "T":"A"}
basename = fasta_file.split("/")[-1].split(".fasta")[0].split(".aln")[0].split(".aln.best.fas")[0].split(".codon_aln.best.fas")[0]
family = basename.split("_")[0]
baltimore = get_baltimore_classifiaction(family)
if output_dir == None:
output_base = fasta_file.split(".fasta")[0].split(".aln")[0].split(".aln.best.fas")[0].split(".codon_aln.best.fas")[0]
else:
output_base = "%s/%s" % (output_dir, basename)
output_dinuc = output_base + ".dinuc_odds_ratio"
output_dinuc_averaged = output_base + ".dinuc_averaged_odds_ratio"
seqs = list(SeqIO.parse(fasta_file, format=in_format))
for s in seqs:
for i in dinucs:
dinucs[i] = 0
for i in nucs:
nucs[i] = 0
s.seq = str(s.seq).replace("-", "").upper()
#count and calculate nucleotide freqs
for i in nucs:
nucs[i] = s.seq.count(i)
count = len(s.seq)
for i in nucs:
nucs[i] = nucs[i] / count
#count and calculate dinucleotide freqs
for i in dinucs:
dinucs[i] = s.seq.count(i)
count_dinucs = sum(dinucs.values())
for i in dinucs:
dinucs[i] = dinucs[i] / count_dinucs
#calculate odds ratio
for i in dinucs:
if "ds" in baltimore:
comp_dinuc = comp[i[1]] + comp[i[0]]
dinucs[i] = (2*(dinucs[i] + dinucs[comp_dinuc]) /
((nucs[i[0]] + nucs[i[1]]) * (nucs[comp_dinuc[0]] + nucs[comp_dinuc[1]])))
else:
dinucs[i] = dinucs[i] / (nucs[i[0]] * nucs[i[1]])
dinucs_or = dinucs_or.append(
{"baltimore":baltimore, "family": family, "basename": basename, "seq_name": s.id,
**dinucs},
ignore_index=True)
dinucs_average = dinucs_or.mean(axis=0).to_frame().transpose()
dinucs_average = pd.concat([dinucs_average,
pd.DataFrame([{"baltimore":baltimore, "family": family, "basename": basename,}])], axis=1)
dinucs_or.to_csv(output_dinuc, index=False)
dinucs_average.to_csv(output_dinuc_averaged, index=False)
def analyze_nuc_frequencies_and_wobble_freqs(fasta_file, in_format="fasta", output=None):
fasta_file = check_filename(fasta_file)
if output == None:
output = fasta_file.split(".fasta")[0] + ".base_freqs.csv"
else:
output = check_filename(output, Truefile=False)
df = pd.DataFrame(columns=["filename", "dir", "base_file", "A", "C", "T", "G", "wob_A", "wob_C", "wob_T", "wob_G",
"non_wob_A", "non_wob_G", "non_wob_C", "non_wob_T"])
seqs = list(SeqIO.parse(fasta_file, format=in_format))
base_freqs = {"A": 0, "G": 0, "C": 0, "T": 0}
wobble_freqs = {"wob_A": 0, "wob_G": 0, "wob_C": 0, "wob_T": 0}
non_wobble_freqs = {"non_wob_A":0, "non_wob_G":0, "non_wob_C":0, "non_wob_T":0}
count = 0
wobble_count = 0
non_wobble_count = 0
for s in seqs:
if len(s.seq) % 3 != 0:
continue
#general freqs
s.seq = s.seq.upper()
a = s.seq.count("A")
c = s.seq.count("C")
t = s.seq.count("T")
g = s.seq.count("G")
count += len(s.seq)
base_freqs["A"] += a
base_freqs["C"] += c
base_freqs["T"] += t
base_freqs["G"] += g
#wobble freqs
wobble_s = s.seq[2::3]
a = wobble_s.count("A")
c = wobble_s.count("G")
t = wobble_s.count("C")
g = wobble_s.count("T")
wobble_count += len(wobble_s)
wobble_freqs["wob_A"] += a
wobble_freqs["wob_G"] += c
wobble_freqs["wob_C"] += t
wobble_freqs["wob_T"] += g
#non wobble freqs
non_wobble_s = s.seq[0::3] + s.seq[1::3]
a = non_wobble_s.count("A")
c = non_wobble_s.count("G")
t = non_wobble_s.count("C")
g = non_wobble_s.count("T")
non_wobble_count += len(non_wobble_s)
non_wobble_freqs["non_wob_A"] += a
non_wobble_freqs["non_wob_G"] += c
non_wobble_freqs["non_wob_C"] += t
non_wobble_freqs["non_wob_T"] += g
for k in base_freqs.keys():
base_freqs[k] = base_freqs[k] / float(count)
for k in wobble_freqs.keys():
wobble_freqs[k] = wobble_freqs[k] / float(wobble_count)
for k in non_wobble_freqs.keys():
non_wobble_freqs[k] = non_wobble_freqs[k] / float(non_wobble_count)
df = df.append({"filename":fasta_file, "dir":fasta_file.split("/")[-2], "base_file":fasta_file.split("/")[-1], **base_freqs, **wobble_freqs, **non_wobble_freqs}, ignore_index=True)
df.to_csv(output)
def analyze_nuc_frequencies_and_wobble_freqs_from_aln(aln_file, in_format="fasta", output_dir=None):
aln_file = check_filename(aln_file)
base = aln_file.split(".fasta")[0].split(".aln")[0].split(".aln.best.fas")[0].split(".codon_aln.best.fas")[0]
basename = base.split("/")[-1]
if output_dir == None:
output_freqs = base + ".base_freqs_info.csv"
output_counts = base + ".base_counts_info.csv"
output_averaged_freqs = base + ".base_freqs_averaged_freqs.csv"
output_averaged_counts = base + ".base_freqs_averaged_counts.csv"
else:
output_freqs = output_dir + basename + ".base_freqs_info.csv"
output_counts = output_dir + basename + ".base_counts_info.csv"
output_averaged_freqs = output_dir + basename + ".base_freqs_averaged_freqs.csv"
output_averaged_counts = output_dir + basename + ".base_freqs_averaged_counts.csv"
counts = pd.DataFrame()
freqs = pd.DataFrame()
aln = AlignIO.read(aln_file, format=in_format)
family = basename.split("_")[0]
baltimore = get_baltimore_classifiaction(family)
for a in aln:
a.seq = a.seq.upper()
wobble = a[2::3]
non_wobble = a[0:3] + a[1::3]
all_count = a.seq.count("A") + a.seq.count("C") + a.seq.count("G") + a.seq.count("T")
wobble_count = wobble.seq.count("A") + wobble.seq.count("C") + wobble.seq.count("G") + wobble.seq.count("T")
non_wobble_count = non_wobble.seq.count("A") + non_wobble.seq.count("C") + non_wobble.seq.count("G") + non_wobble.seq.count("T")
counts = counts.append({"baltimore":baltimore, "family":family, "basename":basename, "seq_name":a.id,
"A": a.seq.count("A"), "C": a.seq.count("C"), "G": a.seq.count("G"), "T": a.seq.count("T"),
"wob_A": wobble.seq.count("A"), "wob_C": wobble.seq.count("C"), "wob_C": wobble.seq.count("C"),
"non_wob_A":non_wobble.seq.count("A"), "non_wob_C":non_wobble.seq.count("C"), "non_wob_G":non_wobble.seq.count("G"), "non_wob_T":non_wobble.seq.count("T")},
ignore_index=True)
freqs = freqs.append({"baltimore":baltimore, "family":family, "basename":basename, "seq_name":a.id,
"A": a.seq.count("A")/all_count, "C": a.seq.count("C")/all_count,
"G": a.seq.count("G")/all_count, "T": a.seq.count("T")/all_count,
"wob_A": wobble.seq.count("A")/wobble_count, "wob_C": wobble.seq.count("C")/wobble_count,
"wob_G": wobble.seq.count("G")/wobble_count, "wob_T": wobble.seq.count("T")/wobble_count,
"non_wob_A":non_wobble.seq.count("A")/non_wobble_count, "non_wob_C":non_wobble.seq.count("C")/non_wobble_count,
"non_wob_G":non_wobble.seq.count("G")/non_wobble_count, "non_wob_T":non_wobble.seq.count("T")/non_wobble_count},
ignore_index=True)
averaged_freqs = freqs.mean(axis=0).to_frame().transpose()
averaged_freqs = pd.concat([averaged_freqs,
pd.DataFrame([{"baltimore":baltimore, "family":family, "basename":basename}])],
axis=1)
averaged_counts = counts.mean(axis=0).to_frame().transpose()
averaged_counts = pd.concat([averaged_counts,
pd.DataFrame(
[{"baltimore":baltimore, "family":family, "basename":basename}])],
axis=1)
counts.to_csv(output_counts, index=False)
freqs.to_csv(output_freqs, index=False)
averaged_freqs.to_csv(output_averaged_freqs, index=False)
averaged_counts.to_csv(output_averaged_counts, index=False)
| SternLabTAU/SternLab | seqFileAnalyzer.py | seqFileAnalyzer.py | py | 22,174 | python | en | code | 1 | github-code | 13 |
41420692659 | """
TP3
réalisé le 28/11/2022
Par Saglibene Lilian et Glemet Augustin
Objectif : Créer une fonction si la lettre proposé par l'utilisateur est dans le mot recherché
"""
def verif_lettre(lettre_prop, solution):
lettre_sol=list(solution)
list_indice=[-1 for i in range(len(lettre_sol))]
for i in range(len(lettre_sol)):
if lettre_prop == lettre_sol[i]:
list_indice[i]=i
return list_indice
| Lilian2588/Python | TP3/verification_lettre.py | verification_lettre.py | py | 469 | python | fr | code | 0 | github-code | 13 |
74675240656 | import pytest
import numpy as np
from mstk.topology.geometry import *
def test_grow_particle():
xyz1 = np.array([0, 0, 0])
xyz2 = np.array([0, 1, 0])
print(grow_particle(xyz1, xyz2, 2, np.pi * 0.1))
def test_cluster():
elements = list(range(10))
bonds = [(7, 1), (1, 0), (3, 4), (5, 6), (4, 7)]
matrix = np.zeros((10, 10))
for i, j in bonds:
matrix[i][j] = 1
matrix[j][i] = 1
assert find_clusters(elements, lambda x, y: matrix[x][y]) == [[0, 1, 3, 4, 7], [2], [5, 6], [8], [9]]
assert find_clusters_consecutive(elements, lambda x, y: matrix[x][y]) == [[0, 1, 2, 3, 4, 5, 6, 7], [8], [9]]
| z-gong/mstk | tests/topology/test_geometry.py | test_geometry.py | py | 645 | python | en | code | 7 | github-code | 13 |
30361575522 | #! /usr/bin/env python3.3
import urllib.request,urllib.parse
from package.game_time import GameDateGenerator, TimeGenerator
from package.resp_parser import ResponseParser
from package.request_api import RequestParamBuilder, RequestParameters,\
RequestTokenExtractor
'''
The class VenueChecker checks for available game slots for the
next saturdays and sundays from 9am - 7pm for the given location
code
'''
class VenueChecker:
def __init__(self, location_code, description):
self.location_code = location_code
self.description = description
def find_available_time(self):
date_gen = GameDateGenerator();
game_dates = date_gen.get_game_date();
response_parser = ResponseParser()
time_gen = TimeGenerator();
request_token = RequestTokenExtractor()
request_token.get_request_tokens()
slots = list()
for game_date in game_dates:
for time_range in time_gen.get_game_time_frame():
print('Checking ', self.location_code, game_date.strftime('%d/%m/%y'), '; Start Time: ', \
str(time_range.start_time), '; End Time: ', str(time_range.end_time))
params = RequestParameters()
params.game_date = game_date
params.start_time = time_range.start_time
params.end_time = time_range.end_time
params.location_code = self.location_code
params.view_state = request_token.view_state
params.event_validation = request_token.event_validation
request_builder = RequestParamBuilder(params)
req_params = request_builder.build_http_param();
#Build the http req
request = urllib.request.Request("http://www.icanbook.com.sg/icbnew/Facility/Public/UI/AvailabilityCheck.aspx", data=req_params, method='POST')
request.add_header("Content-Type","application/x-www-form-urlencoded;charset=utf-8")
request.add_header("Origin","http://www.icanbook.com.sg")
request.add_header("Cookie","BIGipServerweb_pool_http=JiiuKjZyI/MyXXUtsjus9dXCDEp3VECVjVNDuJP+RVWxYegxCVsdA92TaFnqkBHA6LIK/++0W6ioW04=; ASP.NET_SessionId=3x24ih45ozjftxeg5py0mb45; __utma=24707618.545845258.1373292469.1373292469.1373292469.1; __utmc=24707618; __utmz=24707618.1373292469.1.1.utmcsr=(direct)|utmccn=(direct)|utmcmd=(none)")
request.add_header("Referer","http://www.icanbook.com.sg/icbnew/Facility/Public/UI/AvailabilityCheck.aspx")
request.add_header("User-Agent","Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.116 Safari/537.36")
request.add_header("X-MicrosoftAjax","Delta=true")
f = urllib.request.urlopen(request)
avail_slots = response_parser.parse_response(game_date, f, self.description)
slots.append(avail_slots)
return slots
| cjjavellana/bcourt-poller | package/venue_checker.py | venue_checker.py | py | 3,077 | python | en | code | 0 | github-code | 13 |
23027312132 | import logging
import random
import numpy as np
import kaldi_io_py
def make_batchset(data, batch_size, max_length_in, max_length_out,
num_batches=0, batch_sort_key='shuffle', min_batch_size=1):
"""Make batch set from json dictionary
:param dict data: dictionary loaded from data.json
:param int batch_size: batch size
:param int max_length_in: maximum length of input to decide adaptive batch size
:param int max_length_out: maximum length of output to decide adaptive batch size
:param int num_batches: # number of batches to use (for debug)
:param str batch_sort_key: 'shuffle' or 'input' or 'output'
:param int min_batch_size: mininum batch size (for multi-gpu)
:return: list of batches
"""
# sort data with batch_sort_key
if batch_sort_key == 'shuffle':
logging.info('use shuffled batch.')
sorted_data = random.sample(data.items(), len(data.items()))
elif batch_sort_key == 'input':
logging.info('use batch sorted by input length and adaptive batch size.')
# sort it by input lengths (long to short)
# NOTE: input and output are reversed due to the use of same json as asr
sorted_data = sorted(data.items(), key=lambda data: int(
data[1]['output'][0]['shape'][0]), reverse=True)
elif batch_sort_key == 'output':
logging.info('use batch sorted by output length and adaptive batch size.')
# sort it by output lengths (long to short)
# NOTE: input and output are reversed due to the use of same json as asr
sorted_data = sorted(data.items(), key=lambda data: int(
data[1]['input'][0]['shape'][0]), reverse=True)
else:
raise ValueError('batch_sort_key should be selected from None, input, and output.')
logging.info('# utts: ' + str(len(sorted_data)))
# check #utts is more than min_batch_size
if len(sorted_data) < min_batch_size:
raise ValueError("#utts is less than min_batch_size.")
# make list of minibatches
minibatches = []
start = 0
while True:
if batch_sort_key == 'shuffle':
end = min(len(sorted_data), start + batch_size)
else:
# NOTE: input and output are reversed due to the use of same json as asr
ilen = int(sorted_data[start][1]['output'][0]['shape'][0])
olen = int(sorted_data[start][1]['input'][0]['shape'][0])
factor = max(int(ilen / max_length_in), int(olen / max_length_out))
# change batchsize depending on the input and output length
# if ilen = 1000 and max_length_in = 800
# then b = batchsize / 2
# and max(1, .) avoids batchsize = 0
bs = max(1, int(batch_size / (1 + factor)))
end = min(len(sorted_data), start + bs)
# check each batch is more than minimum batchsize
minibatch = sorted_data[start:end]
if len(minibatch) < min_batch_size:
mod = min_batch_size - len(minibatch) % min_batch_size
additional_minibatch = [sorted_data[i] for i in np.random.randint(0, start, mod)]
minibatch.extend(additional_minibatch)
minibatches.append(minibatch)
if end == len(sorted_data):
break
start = end
# for debugging
if num_batches > 0:
minibatches = minibatches[:num_batches]
logging.info('# minibatches: ' + str(len(minibatches)))
return minibatches
def load_inputs_and_targets(batch, use_speaker_embedding=False, use_second_target=False):
"""Load inputs and targets from list of dicts (json)
:param list batch: list of dict which is subset of loaded data.json
:param bool use_speaker_embedding: whether to load speaker embedding vector
:param bool use_second_target: whether to load second target vector
:return: list of input token id sequences [(L_1), (L_2), ..., (L_B)]
:rtype: list of int ndarray
:return: list of target feature sequences [(T_1, D), (T_2, D), ..., (T_B, D)]
:rtype: list of float ndarray
:return: list of speaker embedding vectors
:rtype: list of float adarray
:return: list of second target feature sequences [(T_1, V), (T_2, V), ..., (T_B, V)],
:rtype: list of float ndarray
"""
# load acoustic features and target sequence of token ids
xs = [b[1]['output'][0]['tokenid'].split() for b in batch]
ys = [kaldi_io_py.read_mat(b[1]['input'][0]['feat']) for b in batch]
# get index of non-zero length samples
nonzero_idx = list(filter(lambda i: len(xs[i]) > 0, range(len(xs))))
if len(nonzero_idx) != len(xs):
logging.warning('Input sequences include empty tokenid (batch %d -> %d).' % (
len(xs), len(nonzero_idx)))
# sort in input length
nonzero_sorted_idx = sorted(nonzero_idx, key=lambda i: -len(xs[i]))
# remove zero-length samples
xs = [np.fromiter(map(int, xs[i]), dtype=np.int64) for i in nonzero_sorted_idx]
ys = [ys[i] for i in nonzero_sorted_idx]
# load second target for CHBG
if use_second_target:
spcs = [kaldi_io_py.read_mat(b[1]['input'][1]['feat']) for b in batch]
spcs = [spcs[i] for i in nonzero_sorted_idx]
else:
spcs = None
# load speaker embedding
if use_speaker_embedding:
spembs = [kaldi_io_py.read_vec_flt(b[1]['input'][1]['feat']) for b in batch]
spembs = [spembs[i] for i in nonzero_sorted_idx]
else:
spembs = None
return xs, ys, spembs, spcs
| Gastron/espnet-old-speaker-aware | espnet/tts/tts_utils.py | tts_utils.py | py | 5,511 | python | en | code | 0 | github-code | 13 |
71292812178 | from rest_framework.test import APITestCase
from rest_framework import status
from course_service.models import Course
from user_service.models import *
from django.urls import reverse
from rest_framework_simplejwt.tokens import RefreshToken
from django.utils.http import urlencode
from password_generator import PasswordGenerator
# Create your tests here.
class ModelSetup:
"""
Common setup cases for each test cases to avoid repitition of code.
"""
def common_model_setup(self):
pwg = PasswordGenerator()
pwg.minlen = 10
self.user1 = User.objects.create_user(
first_name="John",
last_name="Doe" ,
email="example1@gmail.com",
password=pwg.generate(),
is_instructor=True,
is_verified=True,
enable_two_factor_authentication=False
)
self.user2 = User.objects.create_user(
first_name="Michael",
last_name="Kruse" ,
email="example2@gmail.com",
password=pwg.generate(),
is_instructor=True,
is_verified=True,
enable_two_factor_authentication=False
)
instructors = InstructorProfile.objects.all()
self.instructor1 = instructors[0]
self.instructor2 = instructors[1]
self.course1 = Course.objects.create(
name="Fullstack Python/Django course for beginners - 2023",
instructor=self.instructor1,
difficulty="Intermediate",
is_certified=True,
is_available=True
)
self.course2 = Course.objects.create(
name="Golang course for beginners - 2023",
instructor=self.instructor2,
difficulty="Intermediate",
is_certified=False,
is_available=True
)
refresh = RefreshToken.for_user(self.user1)
access_token = refresh.access_token
self.client.credentials(HTTP_AUTHORIZATION= f"Bearer {access_token}")
# Course Test Cases
class CourseTestCase(ModelSetup, APITestCase):
def setUp(self):
return super().common_model_setup()
def test_create_course(self):
data = {
"name": "Full Stack MERN course for beginners - 2023",
"difficulty": "Advanced",
"is_certified": True,
"is_available": True,
"description": "None",
"duration": "23hrs 40mins"
}
url = reverse("course-list")
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.json()["data"]["instructor"], self.instructor1.id)
self.assertEqual(Course.objects.count(), 3)
def test_create_course_with_is_instructor_false(self):
self.user2.is_instructor = False
self.user2.save()
refresh = RefreshToken.for_user(self.user2)
access_token = refresh.access_token
self.client.credentials(HTTP_AUTHORIZATION= f"Bearer {access_token}")
data = {
"name": "Full Stack MERN course for beginners - 2023",
"difficulty": "Beginner",
"is_certified": True,
"is_available": True,
"description": "None",
"duration": "23hrs 40mins"
}
url = reverse("course-list")
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.json()["data"]["detail"], "User must have is_instructor = True to create a course")
def test_list_courses(self):
url = reverse("course-list")
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_list_courses_filter(self):
url = f"{reverse('course-list')}?{urlencode({'is_certified': 'False'})}"
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.json()["data"]["results"][0]["name"], "Golang course for beginners - 2023")
def test_retrieve_course(self):
url = reverse("course-detail", args=[self.course1.id])
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_partial_update_course(self):
data = {
"is_available": False
}
url = reverse("course-detail", args=[self.course1.id])
response = self.client.patch(url, data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.json()["data"]["is_available"], False)
def test_update_course(self):
data = {
"is_available": False,
"description": f"Django Course by {self.course1.get_instructor_fullname}",
}
url = reverse("course-detail", args=[self.course1.id])
response = self.client.put(url, data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.json()["data"]["description"], "Django Course by John Doe") | chukaibejih/smart_learn | course_service/tests/test_course.py | test_course.py | py | 6,151 | python | en | code | 21 | github-code | 13 |
70368843859 | from flask import Flask, render_template, request
from difflib import SequenceMatcher
app = Flask(__name__)
@app.get("/")
def form_get():
return render_template('input.html')
@app.post("/output.html")
def form_post():
left_text = request.form.get('left_box')
right_text = request.form.get('right_box')
print(left_text, right_text)
# now we can use python code to change our HTTP GET request data
similarity = SequenceMatcher(None, left_text, right_text).ratio() # get the similarity ratio between the two strings
similarity = round(similarity, 4) # restrict our number to 2 decimals
similarity_percent = str(similarity * 100) + "%" # format it as a percent (rather than a decimal)
return render_template('output.html', left_output=left_text, right_output=right_text, similarity_output=similarity_percent) | yangbranden/SuperSimplePlagiarismChecker | PlagiarismChecker.py | PlagiarismChecker.py | py | 865 | python | en | code | 0 | github-code | 13 |
73953888656 | import requests
import json
with open("./default_table.json", "r") as f:
body = json.load(f)
response = requests.post(
url="http://localhost:3000/api/savetable",
json = body
)
print("status code", response.status_code)
print(response.content)
| erietz/periodic-table | scripts/default_table/default_table.py | default_table.py | py | 274 | python | en | code | 0 | github-code | 13 |
37263088031 | import maya.api.OpenMaya as om
def getDependNode(name):
try:
selection_list = om.MSelectionList()
selection_list.add(name)
return selection_list.getDependNode(0)
except:
om.MGlobal.displayError('No object matches or more than one object matches name: {0}'.format(name))
def getDagPath(name):
try:
selection_list = om.MSelectionList()
selection_list.add(name)
return selection_list.getDagPath(0)
except:
om.MGlobal.displayError('No object matches or more than one object matches name: {0}'.format(name))
def getDagPathNode(name):
try:
selection_list = om.MSelectionList()
selection_list.add(name)
dag, mObject = selection_list.getComponent(0)
return dag, mObject
except:
om.MGlobal.displayError('No object matches or more than one object matches name: {0}'.format(name))
def getPlug(node, attribute):
obj = getDagPath(node)
if obj.hasFn(om.MFn.kTransform):
transform_fn = om.MFnTransform(obj)
plug = transform_fn.findPlug(attribute, False)
return plug
def getAttr(node, attribute):
plug = getPlug(node, attribute)
if plug.isCompound:
numChildren = plug.numChildren()
value = []
for i in range(numChildren):
child_plug = plug.child(i)
value.append(child_plug.asDouble())
return value
else:
return plug.asDouble()
def setAttr(node, attribute, value):
plug = getPlug(node, attribute)
if plug.isCompound:
for x, val in enumerate(value):
child_plug = plug.child(x)
child_plug.setDouble(val)
else:
plug.setDouble(value)
def transformGetTranslation(node):
loc_dag, loc_obj = getDagPathNode(node)
loc_fn = om.MFnTransform(loc_dag)
pos = om.MPoint(loc_fn.translation(om.MSpace.kWorld))
return pos
def transformSetTranslation(node, value):
obj = getDagPath(node)
if obj.hasFn(om.MFn.kTransform):
tranform_fn = om.MFnTransform(obj)
translation = tranform_fn.translation(om.MSpace.kTransform)
for x, val in enumerate(value):
translation[x] = val
tranform_fn.setTranslation(translation, om.MSpace.kTransform)
def getClosestVertex(mesh, pos, select=True):
mesh_dag, mesh_obj = getDagPathNode(mesh)
mesh_fn = om.MFnMesh(mesh_dag)
closest_point, id = mesh_fn.getClosestPoint(pos, om.MSpace.kWorld)
mesh_vtx = mesh_fn.getPolygonVertices(id)
closest_id = mesh_vtx[0]
closest_dist = mesh_fn.getPoint(mesh_vtx[0]).distanceTo(pos)
for vtx_id in mesh_vtx[1:]:
vtx_pos = mesh_fn.getPoint(vtx_id)
dist = vtx_pos.distanceTo(pos)
if dist < closest_dist:
closest_dist = dist
closest_id = vtx_id
mfn_components = om.MFnSingleIndexedComponent(mesh_obj)
mfn_object = mfn_components.create(om.MFn.kMeshVertComponent)
mfn_components.addElements([closest_id])
if select:
selection_list = om.MSelectionList()
selection_list.add((mesh_dag, mfn_object))
om.MGlobal.setActiveSelectionList(selection_list)
return mfn_components, mfn_components.element(0)
| asheiwa/ah_maya_api | ah-maya-api/learn/mayaAPI_basics.py | mayaAPI_basics.py | py | 3,306 | python | en | code | 0 | github-code | 13 |
32617702735 | import os,json,shutil
class Colors: fail = '\033[91m' ; good = '\033[92m' ; end = '\033[0m'
# TODO: work on Tables()
class Tables:
def __init__(self,tableName:str):
self.__thisDir = os.path.dirname(__file__)+'/tables/'
self.__tableName = tableName.strip()
self.__tableDir = self.__thisDir+self.__tableName+'/'
self.__tableStructurePath = self.__thisDir+self.__tableName+'/__init__.json'
self.__table = True
# create tables folder if not exists
if not os.path.isdir(self.__thisDir): os.mkdir(self.__thisDir)
# check if table exists
if not os.path.isdir(self.__tableDir): self.__table = False
def createTable(self,tableStructure:dict):
# check if table already exists
if not self.__table: return {'status':False,'msg':f'Table {self.__tableName} already exists'}
# else try to create a new one
acceptedRowTypes = ['str','list','dict','int','float']
for rowName,rowType in tableStructure.items():
if rowType not in acceptedRowTypes:
print(f'{Colors.fail}Wrong table row type {rowType} use python data types {acceptedRowTypes}{Colors.end}')
return {'status':False,'msg':f'Wrong table row type {rowType}'}
#create table directory
os.mkdir(self.__tableDir)
# create table structure (json init file)
open(self.__tableStructurePath,'w').write(json.dumps(tableStructure))
return {'status':True,'msg':f'Table {self.__tableName} was created'}
def tableStructure(self,updateStructure=False,addStructure=False):
# check if tables exists, if not return false status
if not self.__table: return {'status':False,'msg':f'Table {self.__tableName} do not exists please use .createTable() to create a new one'}
#----
removeAtTheEnd,addAtTheEnd = [],[]
acceptedRowTypes = ['str','list','dict','int','float']
tableStructure = json.loads(open(self.__tableStructurePath,'r').read())
# if add structure is true
if addStructure:
addingStructure = True
while addingStructure:
newStructureInput = input('Add new table structure use this format using double quotes {"rowName":"rowDataType"} or keep empty to skip: ').strip()
if not newStructureInput: addingStructure = False ; continue
try:
newStructure = json.loads(newStructureInput)
_name,_type = list(newStructure)[0],newStructure[list(newStructure)[0]]
# if row type not accepted
if _type not in acceptedRowTypes:
return {'status':False,'msg':f'Wrong data type >>{_type}<< use python data types: {acceptedRowTypes}'}
# if this row name already exists
if _name in tableStructure:
print(f'{Colors.fail}Structure {_name} already in this table structure{Colors.end}') ; continue
# if everything passed add to table structure
tableStructure[_name]=_type
except: return {'status':False,'msg':'Error adding structure make sure to use double quotes example: {"rowName":"rowDataType"}'}
# if update structure is true
if updateStructure:
for rowName,rowType in tableStructure.items():
updateInput = input('Update {'+'"'+rowName+'":"'+rowType+'"'+'} use this format using double quotes {"rowName":"rowDataType"} or keep empty to skip: ').strip()
if updateInput == '': continue
try:
thisRowChanges = json.loads(updateInput)
_name,_type = list(thisRowChanges)[0],thisRowChanges[list(thisRowChanges)[0]]
# if row type not accepted
if _type not in acceptedRowTypes:
return {'status':False,'msg':f'Wrong data type >>{_type}<< use python data types: {acceptedRowTypes}'}
# if this row change key name add it to remove at the end list because dict can not change size or name when looping
# else if it is the same name change the key value (type)
if _name != rowName:
removeAtTheEnd.append(rowName)
addAtTheEnd.append({'name':_name,'type':_type})
elif _name == rowName: tableStructure[rowName] = _type
except: return {'status':False,'msg':'Error make sure to use double quotes example: {"rowName":"rowDataType"}'}
# if everything passed make changes and updated all object in this table
for name in removeAtTheEnd: del tableStructure[name]
for _dict in addAtTheEnd: tableStructure[_dict['name']] = _dict['type']
# save changes
open(self.__tableStructurePath,'w').write(json.dumps(tableStructure))
return tableStructure
def reset(self):
# check if tables exists, if not return false status
if not self.__table: return {'status':False,'msg':f'Table {self.__tableName} do not exists please use .createTable() to create a new one'}
#----
answer = input('This will delete all table in the tables directory ok/no? ').strip().lower()
if answer == 'ok' or answer == 'yes':
for table in os.listdir(self.__thisDir):
shutil.rmtree(self.__thisDir+table)
return {'status':True,'msg':'All tables were deleted'}
# TODO: work on Collections()
class Collections:
def __init__(self,collectionName:str,defaultData='list'):
self.__thisDir = os.path.dirname(__file__)+'/collections/'
self.collectionName = collectionName
self.collectionPath = self.__thisDir+self.collectionName+'.json'
# create collections folder if not exists
if not os.path.isdir(self.__thisDir): os.mkdir(self.__thisDir)
# if collection json file do not exists make one
if not os.path.exists(self.collectionPath):
if defaultData == 'list': open(self.collectionPath,'w').write('[]')
else: open(self.collectionPath,'w').write('{}')
def add(self,data:dict):
collectionData = json.loads(open(self.collectionPath,'r').read())
try:
for dataKey,dataValue in data.items(): collectionData[dataKey]=dataValue
except: return {'status':False,'msg':f'Error inserting data into collection data'}
open(self.collectionPath,'w').write(json.dumps(collectionData,indent=4))
return {'status':True,'msg':f'Data inserted into collection data'}
def delete(self,keyName:str):
collectionData = json.loads(open(self.collectionPath,'r').read())
if keyName in collectionData:
del collectionData[keyName]
open(self.collectionPath,'w').write(json.dumps(collectionData,indent=4))
return {'status':True,'msg':f'{keyName} deleted'}
return {'status':False,'msg':f'{keyName} not in collection data'}
def find(self,keyName:str):
collectionData = json.loads(open(self.collectionPath,'r').read())
if keyName in collectionData: return collectionData[keyName]
return {'status':False,'msg':f'{keyName} not in collection data'}
def read(self):
collectionData = json.loads(open(self.collectionPath,'r').read())
return collectionData
def update(self,newData):
try: open(self.collectionPath,'w').write(json.dumps(newData,indent=4)) ; return True
except: return False
def drop(self):
if os.path.exists(self.collectionPath): os.remove(self.collectionPath)
def reset(self):
# this will delete all collections from collections directory
answer = input('All collections will be deleted ok/no?').strip().lower()
if answer == 'ok' or answer == 'yes':
for collection in os.listdir(self.__thisDir): os.remove(self.__thisDir+collection)
return {'status':True,'msg':f'All collections were deleted'}
class JsonObjects:
def __init__(self,fileName:str,defaultData='list'):
self.thisDir = os.path.dirname(__file__)+'/json/'
self.fileName = fileName
self.jsonPath = f'{self.thisDir+fileName}.json'
self.jsonData = False
# create json folder if not exists
if not os.path.isdir(self.thisDir): os.mkdir(self.thisDir)
# create this json file if not exists
if not os.path.exists(self.jsonPath):
if defaultData == 'list': open(self.jsonPath,'w').write('[]')
else: open(self.jsonPath,'w').write('{}')
def read(self):
if os.path.exists(self.jsonPath):
jsonData = json.loads(open(self.jsonPath,'r').read())
self.jsonData = jsonData ; return jsonData
else: print(f'{Colors.fail} The json object {self.fileName} do not exists{Colors.end}') ; return False
def drop(self):
if os.path.exists(self.jsonPath): os.remove(self.jsonPath) ; return True
else: print(f'{Colors.fail} The json object {self.fileName} do not exists{Colors.end}') ; return False
def update(self):
if self.jsonData: open(self.jsonPath,'w').write(json.dumps(self.jsonData,indent=4)) ; return True
else: print(f'{Colors.fail} Use .read() to read the json data before you can update it.{Colors.end}') ; return False
def reset(self):
# this will delete all json objects
answer = input('All the json objects will be deleted ok/no?').strip().lower()
if answer == 'ok' or answer == 'yes':
for obj in os.listdir(self.thisDir): os.remove(self.thisDir+obj)
return True
# table = Tables('test')
# collection = Collections('test')
# jsonObject = JsonObjects('stockSymbols',defaultData='list')
# symbols = jsonObject.read()
# print(symbols)
# TODO: work on tables()
# TODO: work on collection() | anthony16t/mystorage | mystorage/__init__.py | __init__.py | py | 9,956 | python | en | code | 0 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.