blob_id stringlengths 40 40 | language stringclasses 1
value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30
values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2
values | text stringlengths 12 5.47M | download_success bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
29034e773ab4307de24f93b88bf6d934913a6114 | Python | markieboy/hacker.org | /challenge/ExecutionStyle.py | UTF-8 | 1,537 | 2.578125 | 3 | [] | no_license | #!/usr/bin/env python3
# Q: http://www.hacker.org/challenge/chal.php?id=194
# A: http://www.hacker.org/challenge/chal.php?answer=teddy+bear&id=194&go=Submit
import os.path
import subprocess
import tempfile
import urllib.request
from PIL import Image
def main():
local_filename = urllib.request.urlretrieve('http://www.hacker.org/challenge/img/Doll2.png')[0]
image = Image.open(local_filename)
folder = tempfile.tempdir
exe_file = tempfile.NamedTemporaryFile(dir=folder, delete=False)
exe_file = open(exe_file.name, 'wb')
for y in range(image.size[1]):
for x in range(image.size[0]):
pixel = image.getpixel((x, y))
exe_file.write(bytes([pixel]))
exe_file.close()
subprocess.run(['wine', exe_file.name])
python_file_name = os.path.join(folder, 'temp.py')
with open(python_file_name, 'w') as python_file:
subprocess.run(['perl', os.path.join(folder, 'Doll2.pl')], stdout=python_file)
c_file_name = os.path.join(folder, 'temp.c')
with open(c_file_name, 'w') as c_file:
subprocess.run(['python2.7', python_file_name], stdout=c_file)
out_file_name = os.path.join(folder, 'temp.out')
subprocess.run(['gcc', c_file_name, '-o', out_file_name])
hvm_file_name = os.path.join(folder, 'temp.hvm')
with open(hvm_file_name, 'w') as hvm_file:
subprocess.run([out_file_name], stdout=hvm_file)
subprocess.run(['../hackvm.py', hvm_file_name])
if __name__ == '__main__':
main()
| true |
cc23502a1c812ebed3a43ae94da0e369943da7be | Python | youoldmiyoung/rainer-bot-rilke | /delete.py | UTF-8 | 70 | 3.125 | 3 | [] | no_license | for x in range (131, 150):
z = str(x)
print('rilke' + z + ',') | true |
118774f99642dd6451b2cd92743a6fe28da0217c | Python | lfam/khal | /tests/aux.py | UTF-8 | 1,371 | 2.734375 | 3 | [
"MIT"
] | permissive | import icalendar
import os
def normalize_component(x):
x = icalendar.cal.Component.from_ical(x)
def inner(c):
contentlines = icalendar.cal.Contentlines()
for name, value in c.property_items(sorted=True, recursive=False):
contentlines.append(c.content_line(name, value, sorted=True))
contentlines.append('')
return (c.name, contentlines.to_ical(),
frozenset(inner(sub) for sub in c.subcomponents))
return inner(x)
def _get_text(event_name):
directory = '/'.join(__file__.split('/')[:-1]) + '/ics/'
if directory == '/ics/':
directory == './ics/'
return open(os.path.join(directory, event_name + '.ics'), 'rb').read().decode('utf-8')
def _get_vevent_file(event_path):
directory = '/'.join(__file__.split('/')[:-1]) + '/ics/'
ical = icalendar.Calendar.from_ical(
open(os.path.join(directory, event_path + '.ics'), 'rb').read()
)
for component in ical.walk():
if component.name == 'VEVENT':
return component
def _get_all_vevents_file(event_path):
directory = '/'.join(__file__.split('/')[:-1]) + '/ics/'
ical = icalendar.Calendar.from_ical(
open(os.path.join(directory, event_path + '.ics'), 'rb').read()
)
for component in ical.walk():
if component.name == 'VEVENT':
yield component
| true |
aa87b0d816dbe876e32e47b9aa9d1b6f72b6bc5e | Python | justonemoresideproject/python-exercises | /27_titleize/titleize.py | UTF-8 | 557 | 4.46875 | 4 | [] | no_license | def titleize(phrase):
count = 1
newPhrase = []
for letter in phrase:
if count == 1:
newPhrase.append(letter.upper())
count-=1
elif letter == ' ':
count+=1
newPhrase.append(' ')
else:
newPhrase.append(letter.lower())
return "".join(newPhrase)
"""Return phrase in title case (each word capitalized).
>>> titleize('this is awesome')
'This Is Awesome'
>>> titleize('oNLy cAPITALIZe fIRSt')
'Only Capitalize First'
"""
| true |
700f60450df5014008a9452281ba6217d7bce2f0 | Python | leonlopezanto/Atril-Inteligente | /Server/RedNeuronal.py | UTF-8 | 1,965 | 3.203125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Tue Nov 19 17:41:50 2019
@author: Antonio
"""
import Normalizador as norm
from keras.models import load_model
import os
import numpy as np
def warn(*args, **kwargs):
pass
import warnings
warnings.warn = warn
def Normalizador(modelName, cqt):
'''
Carga y normaliza la señal CQT
:param modelName: Nombre del modelo
:param cqt: datos CQT
:return: CQT normalizada
'''
normalizador = norm.Normalizador()
#Carga valores de normalizacion
normalizador.loadValues('./NeuralNetwork/')
#Normaliza
cqt = normalizador.normalize(cqt)
return cqt
def cargarModelo(name):
'''
Carga el modelo neuronal y carga los pesos
:param name: Nombre del modelo
:return: Modelo cargado
'''
dirModelos = './NeuralNetwork/'
for root, directory, files in os.walk(dirModelos):
for i in range(len(files)): #len(files)
if(files[i].find(name) != -1):
print('\nCargando modelo...')
path_model = dirModelos + name + '.h5'
new_model = load_model(path_model)
# print('Modelo cargado. Mostrando Info.')
# new_model.summary()
# print('Cargando pesos...')
name_weights = dirModelos + name + '_weights.h5'
new_model.load_weights(name_weights)
# print('Devolvemos el modelo')
return new_model
#Si no se carga el modelo, se devuelve -1
print('Modelo no encontrado')
return -1
def predecir(model, cqt, umbral=0.9):
'''
Realiza la predicción en base a la CQT
:param model: Modelo preparado para predecir
:param cqt: señal cqt normalizada
:param umbral: límite para limpiar predicción
:return:
'''
#Predicción
pred = model.predict(cqt)
pred = 1.0 * (np.squeeze(pred) > umbral )
return pred.T
| true |
bd9fd72fb331d9f71dca91a46a7ccf5e979bb45e | Python | manish59/Bootstrapping | /totaldll.py | UTF-8 | 1,369 | 2.84375 | 3 | [] | no_license | import re
def remove_white_space(string):
new_string = ""
for i in string:
if i == " " or i=="\n":
continue
else:
new_string = new_string + i
return new_string
class _dlls:
dict_of_locations={}
d_of_suspects={}# list of suspected pids stored in this dictionary
def __init__(self,file_name):
self.file_name=file_name
buffer=open(self.file_name)
temp_buffer=buffer.readlines()
for i in range(len(temp_buffer)):
name=""
pid=""
location=""
a=temp_buffer[i].find("pid")
if a>=0:
name=temp_buffer[i][:a]
pid=temp_buffer[i][a+4:]
location=temp_buffer[i+1][15:]
#print name,pid,location
self.dict_of_locations.setdefault(remove_white_space(pid),[]).append(remove_white_space(name))
self.dict_of_locations.setdefault(remove_white_space(pid), []).append(remove_white_space(location))
for i in self.dict_of_locations:
aaa=self.dict_of_locations[i][1].find("C:\WINDOWS")
if aaa<0:
name=self.dict_of_locations[i][0]
location=self.dict_of_locations[i][1]
self.d_of_suspects.setdefault(remove_white_space(i),[]).append(remove_white_space(name))
self.d_of_suspects.setdefault(remove_white_space(i), []).append(remove_white_space(location))
if __name__=="__main__":
a=_dlls("dlllist")
for i in a.dict_of_locations:
print i,a.dict_of_locations[i][1]
| true |
1c9b218a5bbd11b5f00fa99bb0fb9493d8179f0a | Python | Diptojyoti/Project | /ocr.py | UTF-8 | 2,306 | 2.640625 | 3 | [] | no_license | #This part of the program helps read the cash amount given
#and converts the amount given from string to numerical int
# import the necessary packages
from enchant import DictWithPWL
import numpy as np
import enchant
from PIL import Image
import pytesseract
from skimage.segmentation import clear_border
from imutils import contours
import imutils
import argparse
import cv2
import os
import word2number
#finds the read line in the check which contains the amount paid
def matches(lines):
f=open("num.txt","r")
data = f.readlines()
datanew =list(map(str.rstrip, data))
score = {}
for line in lines:
words=line.upper().split(' ')
score[line]=0
for word in words:
if word in datanew:
score[line]+=1
maxline=max(score, key=score.get)
return(maxline)
def readAmount(imgPath, preprocess):
#print(os.path.join(root, name))
image = cv2.imread(imgPath)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#Removing some noise
kernel = np.ones((1, 1), np.uint8)
image = cv2.dilate(image, kernel, iterations=1)
image = cv2.erode(image, kernel, iterations=1)
if preprocess == "thresh":
gray = cv2.threshold(gray, 0, 255,
cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
#make a check to see if median blurring should be done to remove
#noise
elif preprocess == "blur":
gray = cv2.medianBlur(gray, 3)
# write the grayscale image to disk as a temporary file so we can
# apply OCR to it
filename = "{}.png".format(os.getpid())
cv2.imwrite(filename, gray)
# load the image, apply OCR, and then delete
# the temporary file
Spellchecked=''
result = pytesseract.image_to_string(Image.open(filename))
lines=result.split('\n')
probableLines= matches(lines)
#Spell check and auto-correct the extracted line
if len(probableLines) > 0:
from enchant.checker import SpellChecker
chkr = SpellChecker(DictWithPWL("en_US", "num.txt"))
chkr.set_text(probableLines)
for err in chkr:
sug = err.suggest()
if len(sug)>0:
err.replace(sug[0])
Spellchecked = chkr.get_text()
words=Spellchecked.split(' ')
#remove any unreadable characters
star='*'
for word in words:
if star in word:
Spellchecked=Spellchecked.replace(word, ' ')
break
os.remove(filename)
return(Spellchecked)
| true |
b70b9386eb46c6e975b638a79089548098a5dbf2 | Python | lemurey/advent_of_code | /2017/day19.py | UTF-8 | 2,532 | 3.203125 | 3 | [] | no_license | from aoc_utilities import get_instructions
import os
from collections import deque
class Network:
def __init__(self, grid):
self.grid = grid
def _get(self, postion):
x, y = int(postion.real), int(postion.imag)
if not (0 <= x < len(self.grid)):
return None
if not (0 <= y < len(self.grid[0])):
return None
return self.grid[x][y]
def __getitem__(self, key):
if isinstance(key, complex):
return self._get(key)
elif isinstance(key, tuple):
return self._get(complex(*key))
else:
msg = 'only keys that are complex or tuple allowed'
raise NotImplementedError(msg)
def _neighbors(self, node, direction):
if self[node + direction]:
yield node + direction, direction
for turn in [1j, -1j]:
if self[node + turn * direction]:
yield node + turn * direction, turn * direction
yield node, None
def _find_start(self):
for i, char in enumerate(self.grid[0]):
if char:
return complex(0, i)
def follow_path(self):
node = self._find_start()
self.visited = set()
seen = []
direction = 1
steps = 1
self.end = None
iterations = 0
while True:
self.visited.add(node)
for node, direction in self._neighbors(node, direction):
if direction is None:
self.end = node
return ''.join(seen), steps
if self[node].isalpha():
seen.append(self[node])
steps += 1
break
def make_grid(data):
max_len = 0
for i, line in enumerate(data.split('\n')):
if len(line) > max_len:
max_len = len(line)
grid = [['' for _ in range(max_len)] for _ in range(i + 1)]
i = 0
j = 0
for char in data:
if char == '\n':
i += 1
j = 0
continue
if char != ' ':
grid[i][j] = char
j += 1
return grid
def get_answer(data, part2=False):
grid = make_grid(data)
n = Network(grid)
chars, length = n.follow_path()
if part2:
return length
return chars
if __name__ == '__main__':
day = int(os.path.basename(__file__).split('.')[0].split('y')[1])
inputs = get_instructions(day)
print(get_answer(inputs, part2=False))
print(get_answer(inputs, part2=True))
| true |
20494050fcf56f8f64f0c21299e385abedc5637f | Python | JBProf/diu-eil-project123-lycee_hessel_aubrac | /grilles.py | UTF-8 | 8,832 | 3.359375 | 3 | [] | no_license | import numpy as np
def grille_zero(grille):
grille = np.array([0]*81)
grille = grille.reshape(9,9)
return grille
def case_vers_numero(i,j):
return i*9+j
def numero_vers_case(k):
return (k//9,k%9)
def liste_vers_grille(liste):
grille=[[0 for j in range(9)]for i in range(9)]
for k in range(len(liste)):
i,j = numero_vers_case(k)
grille[i][j]=liste[k]
return grille
#On rentre une grille comme une liste de 81 valeurs
#les deux listes suivantes sont complètes et correctes
liste_pleine_1=[5,3,4, 6,7,8, 9,1,2,
6,7,2, 1,9,5, 3,4,8,
1,9,8, 3,4,2, 5,6,7,
8,5,9, 7,6,1, 4,2,3,
4,2,6, 8,5,3, 7,9,1,
7,1,3, 9,2,4, 8,5,6,
9,6,1, 5,3,7, 2,8,4,
2,8,7, 4,1,9, 6,3,5,
3,4,5, 2,8,6, 1,7,9]
liste_pleine_2=[4,1,5, 6,3,8, 9,7,2,
3,6,2, 4,7,9, 1,8,5,
7,8,9, 2,1,5, 3,6,4,
9,2,6, 3,4,1, 7,5,8,
1,3,8, 7,5,6, 4,2,9,
5,7,4, 9,8,2, 6,3,1,
2,5,7, 1,6,4, 8,9,3,
8,4,3, 5,9,7, 2,1,6,
6,9,1, 8,2,3, 5,4,7]
liste_fausse=[4,8,3, 9,5,7, 6,1,2,
7,5,6, 1,2,8, 4,9,3,
1,9,2, 4,3,6, 5,7,8,
2,3,1, 5,6,4, 7,8,9,
5,7,4, 8,1,9, 2,3,6,
8,6,9, 2,7,3, 1,4,5,
6,4,7, 3,8,2, 9,5,1,
9,1,8, 6,4,5, 3,2,3,
3,2,5, 7,9,1, 8,6,4]
#liste_depart= [3,0,4, 0,8,0, 0,5,0,
# 7,0,0, 0,1,0, 0,0,3,
# 8,0,0, 0,0,2, 6,0,0,
# 0,0,9, 1,0,0, 3,0,5,
# 4,0,5, 3,0,7, 9,0,2,
# 6,0,8, 0,0,9, 7,0,0,
# 0,0,7, 4,0,0, 0,0,6,
# 5,0,0, 0,9,0, 0,0,8,
# 0,4,0, 0,7,0, 5,0,9,]
#on convertit les listes en grille:
grille_2=liste_vers_grille(liste_pleine_2)
grille_1=liste_vers_grille(liste_pleine_1)
grille_fausse=liste_vers_grille(liste_fausse)
#grille_incomplète=liste_vers_grille(liste_incomplète)
def afficher_une_grille(grille):
for k in range(9):
for i in range(9):
if grille[k][i] != 0:
print(grille[k][i], end=" ")
else:
print("-", end=" ") # remplace les zéros par des tirets si les zéros représentent les nombres manquants
print()
#afficher_une_grille(grille_incomplète)
def verification_ligne_grille(grille):
dico_verificateur={1:1,2:1,3:1,4:1,5:1,6:1,7:1,8:1,9:1}
for k in range(9):
a={}
for i in range(9):
if grille[k][i] in a.keys():
a[grille[k][i]]+=1
else :
a[grille[k][i]]=1
if a!=dico_verificateur:
return False
return True
def verification_colonne_grille(grille):
dico_verificateur={1:1,2:1,3:1,4:1,5:1,6:1,7:1,8:1,9:1}
for k in range(9):
a={}
for i in range(9):
if grille[i][k] in a.keys():
a[grille[i][k]]+=1
else :
a[grille[i][k]]=1
if a!=dico_verificateur:
return False
return True
def verification_carre_grille(grille):
dico_verificateur = {1: 1, 2: 1, 3: 1, 4: 1, 5: 1, 6: 1, 7: 1, 8: 1, 9: 1}
for j in range(3):
for i in range(3):
a={}
for k in range(3):
for h in range(3):
if grille[3*i+k][3*j+h] in a.keys():
a[grille[3*i+k][3*j+h]]+=1
else:
a[grille[3*i+k][3*j+h]] = 1
if a != dico_verificateur:
return False
return True
def grille_is_correct(grille):
if verification_carre_grille(grille) and verification_colonne_grille(grille) and verification_ligne_grille(grille):
return True
else :
return False
#print(grille_is_correct(grille_1))
# on considère que les chiffres manquants ou enlevés seront représentés par des zéros
def chiffres_lignes(i,grille): # cette fonction renvoie une liste avec tous les chiffres présents sur la ligne i hors zéro
ligne=[]
for k in range(9):
if grille[i][k]!=0:
ligne=ligne +[grille[i][k]]
return ligne
def chiffres_colonnes(j,grille):# cette fonction renvoie une liste avec tous les chiffres présents sur la colonne j hors zéro
colonne=[]
for k in range(9):
if grille[k][j]!= 0:
colonne=colonne +[grille[k][j]]
return colonne
def chiffres_carré(i,j,grille):# cette fonction renvoie une liste avec tous les chiffres présents sur la colonne j hors zéro
a = 3*(i//3)
b = 3*(j//3) # (a,b) représente les coordonnées du coin supérieur gauche du carré
carré=[]
for k in range(3):
for h in range(3):
if grille[a+k][b+h]!=0:
carré = carré + [grille[a+k][b+h]]
return carré
def possibilites_de_la_case(k,grille): # pour chaque case 0<=k<=80 d'une grille, on renvoie les chiffres possibles
i,j = numero_vers_case(k)
if grille[i][j] != 0: # si la case comporte un numéro, on le garde
return [grille[i][j]]
#chiffres_présents est une liste qui renvoi tous les chiffres dans la meme ligne, la meme colonne et le même carré que la case k
chiffres_presents= chiffres_lignes(i,grille)+chiffres_colonnes(j,grille)+chiffres_carré(i,j,grille)
chiffres_possibles=[i for i in range(1,10) if i not in chiffres_presents]
return chiffres_possibles #peut renvoyer une liste vide s'il n'y a pas de possibilités
# les fonctions suivantes ont pour but de déterminer par retour en arrière toutes les possibilités de grille connaissant les chiffres du début
# pour chaque case 0<=k<=80 d'une grille, on renvoie les chiffres possibles en tenant compte
#des choix faits dans les cases précédentes
possibilites=[] #renvoie une liste contenant toutes les possibilités case par case
def derniere_valeur(): #on choisit la dernière valeur obtenue par la fonction possibilites_de_la_case()
return [possibilite[-1] for possibilite in possibilites]
def possibilites_case(k):
i,j = numero_vers_case(k)
if grille_depart[i][j] != 0: # si la case comporte un numéro, on le garde
return [grille_depart[i][j]]
valeur=derniere_valeur() #renvoie une liste des "derniers" chiffres possibles
grille=liste_vers_grille(valeur) #renvoie une grille contenant une combinaison possible
for s in range(9):
for t in range(9):
if grille_depart[s][t]!=0: # on rajoute dans notre grille les valeurs de la grille de départ
grille[s][t]= grille_depart[s][t]
#chiffres_présents est une liste qui renvoie tous les chiffres dans la même ligne, la meme colonne et le même carré que la case k
chiffres_presents = chiffres_lignes(i,grille) + chiffres_colonnes(j,grille) + chiffres_carré(i,j,grille)
chiffres_possibles=[k for k in range(1,10) if k not in chiffres_presents]
return chiffres_possibles #peut renvoyer une liste vide s'il n'y a pas de possibilités
def retour():#si jamais le choix conduit à une impasse
global possibilites
r =len(possibilites)-1 # avant dernière case
while r>=0 and len(possibilites[r])==1: #si il n'y a qu'une possibilité pour l'avant dernière case
possibilites=possibilites[0:r]
r = r - 1
if r >= 0:
u = len(possibilites[r])
possibilites[r]=possibilites[r][0:u-1]
return
def combinaisons_correctes():
global possibilites
possibilites=[]
possibilites=[possibilites_case(0)]
termine = False
while not termine:
r = len(possibilites)
print(possibilites)
print()
if r ==0: #plus de possibilités
termine = True
if 0<r<81:
autre_combinaison=possibilites_case(r)
if len(autre_combinaison)!=0:
possibilites=possibilites+[autre_combinaison]
else:
retour()
if r ==81: # on a une solution
print("solution:",derniere_valeur())
#retour() #essaye de trouver une autre grille solution mais ça marche pas!!!!
termine=True
return derniere_valeur()
liste_depart=[0,2,0, 0,0,0, 0,6,0,
0,0,8, 3,0,0, 0,0,4,
5,4,0, 0,9,0, 0,2,1,
0,0,0, 0,2,0, 0,3,0,
3,0,0, 0,0,0, 0,0,9,
0,7,0, 0,5,0, 0,0,0,
9,6,0, 0,3,0, 0,7,8,
2,0,0, 0,0,6, 4,0,0,
0,5,0, 0,0,0, 0,1,0]
grille_depart=liste_vers_grille(liste_depart)
afficher_une_grille(grille_depart)
liste_solution=combinaisons_correctes()
grille_solution=liste_vers_grille(liste_solution)
afficher_une_grille(grille_solution)
| true |
0f1f41a0bc9d9d67e97c19d1c52907ce6a8db5a9 | Python | ejmcreates/itc255-foodkiosk-coding | /foodkiosk/test.py | UTF-8 | 1,493 | 3.546875 | 4 | [] | no_license | import unittest
from item import Item
from orderitem import OrderItem
from order import Order
class ItemTest(unittest.TestCase):
def setUp(self):
self.item=Item(1,'chips',3.75, 'med')
def test_itemString(self):
self.assertEqual(str(self.item),self.item.itemname)
def test_getPrice(self):
self.assertEqual(str(self.item.getItemPrice()), '3.75')
def test_getItemNumber(self):
self.assertEqual(str(self.item.getItemNumber()),'1')
class OrderItemTest(unittest.TestCase):
def setUp(self):
self.item=Item(1,'chips',3.75, 'med')
self.quantity=2
self.special='none'
self.orderitem=OrderItem(self.item, self.quantity, self.special)
def test_getQuantity(self):
self.assertEqual(self.orderitem.getQuantity(),2)
class OrderTest(unittest.TestCase):
def setUp(self):
self.o=Order()
self.item1=Item(1,'chips', 4.00, 'med')
self.item2=Item(2,'pizza', 13.00, 'small')
self.item3=Item(3,'fries', 2.00, 'small')
self.orderitem1=OrderItem(self.item1,2,'none')
self.orderitem2=OrderItem(self.item2,1,'none')
self.orderitem3=OrderItem(self.item3,3,'none')
self.o.addOrderItems(self.orderitem1)
self.o.addOrderItems(self.orderitem2)
self.o.addOrderItems(self.orderitem3)
def test_CalculateTotal(self):
payment=self.o.calcTotal()
self.assertEqual(str(payment), 'Your payment today will be 27.0')
| true |
5750450aee3003f2974da2bdc59e04bd55a6e8a8 | Python | falcon1996/Simple-python-programs | /profanity_check/check_profanity.py | UTF-8 | 749 | 3.671875 | 4 | [] | no_license | #module urllib helps to get info from internet having function urlopen.
#open helps to read file from computer and returns object of type File.
#wydl is a google based website to check profanity.
import urllib
def read_text():
quotes = open("""Enter file location""")
contents_of_file = quotes.read()
print(contents_of_file)
quotes.close()
check_profanity(contents_of_file)
def check_profanity(text_to_check):
connection = urllib.urlopen("http://www.wdylike.appspot.com/?q="+text_to_check)
output = connection.read()
connection.close()
if "true" in output:
print "This document has curse words!!"
else:
print "No curse words are present in this document"
read_text()
| true |
425768b7f939e7735128be1d67611f409b726c2e | Python | JakeIsCoding/algorithms_preparation | /leetcode/count_and_say.py | UTF-8 | 1,821 | 4.09375 | 4 | [] | no_license | from typing import List
class Solution:
"""
The count-and-say sequence is a sequence of digit strings defined by the recursive formula:
countAndSay(1) = "1"
countAndSay(n) is the way you would "say" the digit string from countAndSay(n-1),
which is then converted into a different digit string.
To determine how you "say" a digit string, split it into the minimal number of
groups so that each group is a contiguous section all of the same character. Then
for each group, say the number of characters, then say the character. To convert
the saying into a digit string, replace the counts with a number and concatenate
every saying.
Given a positive integer n, return the nth term of the count-and-say sequence.
"""
def countAndSay(self, n: int) -> str:
if n==1:
return "1"
else:
digit_string = self.countAndSay(n-1)
answer_string = []
prev_c = digit_string[0]
count = 1
for c in digit_string[1:]:
if c == prev_c:
count += 1
else:
answer_string.append(str(count) + prev_c)
count = 1
prev_c = c
answer_string.append(str(count) + prev_c)
# Time Complexity: O(?) # Space Complexity: O(N)
# Time complexity is difficult; it is N * len(digit_string), the
# second of which has a very nontrivial dependence on N.
# Space complexity is O(N): N recursive calls, N for mutating
# digit string, and N for answer string, so 3*N = N.
return "".join(answer_string)
if __name__=='__main__':
sol = Solution()
assert sol.countAndSay(1) == "1"
assert sol.countAndSay(4) == "1211"
| true |
e48d8f535a3639c011b8530a4aa3e0bd8cdac1de | Python | cy565025164/DeepFM | /cpt.py | UTF-8 | 1,049 | 2.59375 | 3 | [] | no_license | #!/usr/bin/env python
# encoding: utf-8
import sys,random
cps_99 = ["99-10", "99-20", "99-15"]
cps_59 = ["59-6", "59-10"]
n = 0
dct = dict()
for line in open("tes", 'r'):
line = line.strip().split('\t')
n += 1
if n == 1:
continue
if len(line) != 4:
print("error n:", n)
break
pin, money, score = line[0], float(line[1]), float(line[-1])
if pin not in dct or dct[pin][1] < score:
dct[pin] = [money, score]
del pin, money, score
dct = sorted(dct.items(), key=lambda x:x[1][1], reverse=True)
num_99_15 = 0
m = 0
for k in dct:
pin, money, score = k[0], k[1][0], k[1][1]
cps = ""
is_dx = "0"
if money>31.3 and money<76.7:
i = random.randint(0,1)
cps = cps_59[i]
else:
i = random.randint(0,2)
if i == 2:
num_99_15 += 1
if num_99_15 > 93000:
i = random.randint(0, 1)
cps = cps_99[i]
m += 1
if m > 50000 and m < 150001:
is_dx = "1"
print ("\t".join([pin, str(score), cps, is_dx]))
| true |
5b5a656bc58881648da8be6ce4927919185b76ee | Python | dyfloveslife/SolveRealProblemsWithPython | /BasicKnowledge/test_20180814-3.py | UTF-8 | 1,550 | 3.28125 | 3 | [] | no_license | # path = 'f:\\Python_test\\BasicKnowledge\\google_stock_data.csv'
'''
file = open(path)
for line in file:
print(line)
'''
'''
lines = [line.strip().split(',') for line in open(path)]
print(lines[0])
'''
import csv
from datetime import datetime
path = 'f:\\Python_test\\BasicKnowledge\\google_stock_data.csv'
file = open(path, newline='')
reader = csv.reader(file)
header = next(reader) # The first line is the header
'''
data = [row for row in reader] # Read the remaining(剩余) data
print(header)
print(data[0])
'''
data = []
for row in reader:
# row = [Date, Open, High, Low, Close, Volume, Adj.Close]
# row = [datetime, float,float,float,float, integer,float]
date = datetime.strptime(row[0], '%m/%d/%Y')
open_price = float(row[1]) # 价格
high = float(row[2])
low = float(row[3])
close = float(row[4])
volume = int(row[5])
adj_close = float(row[6])
data.append([date, open_price, high, low, volume, adj_close])
# Compute and store daily stock returns
return_path = 'f:\\Python_test\\BasicKnowledge\\google_returns.csv'
file = open(return_path, 'w')
writer = csv.writer(file)
writer.writerow(['Date', 'Returns'])
for i in range(len(data) - 1):
today_row = data[i]
today_date = today_row[0]
today_price = today_row[-1]
yesterdays_row = data[i + 1]
yesterdays_price = yesterdays_row[-1]
daily_return = (today_price - yesterdays_price) / yesterdays_price
formatted_date = today_date.strftime('%m/%d/%Y')
writer.writerow([formatted_date, daily_return])
| true |
353d869bf22ab118f3713481af1c3583b6ed5840 | Python | reumongshop/python_study_ | /20200423/cctv_in_seoul_jar.py | UTF-8 | 7,666 | 3.453125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Thu Apr 23 10:10:33 2020
@author: USER
"""
'''
서울시 구별 CCTV 현황 분석하기
서울시 각 구별 CCTV 수를 파악하고,
인구대비 CCTV 비율을 파악해서 순위 비교
인구대비 CCTV의 평균치를 확인하고
그로부터 CCTV가 과하게 부족한 구를 확인
Python 기본 문법 / Pandas 와 Matplotlib의 기본적 사용법을 이용한 시각화
단순한 그래프 표현에서
한 단계 더 나아가 경향을 확인하고 시각화하는 기초 확인
'''
import pandas as pd
import numpy as np
# CCTV 데이터와 인구 데이터 합치고 분석하기
# CCTV 데이터 읽
CCTV_Seoul = pd.read_csv('C:/python_data/20200423/01. CCTV_in_Seoul.csv', encoding='utf-8')
CCTV_Seoul.head()
CCTV_Seoul.columns
CCTV_Seoul.columns[0]
# 컬럼명 변경 : 기관명을 구별로 변경
# rename 은 DataFrame 꺼! < == > replace 랑 사용 용도 알아둬야함!
CCTV_Seoul.rename(columns={CCTV_Seoul.columns[0] : '구별'}, inplace=True)
print(CCTV_Seoul.head()) # head : 상위 몇개만 읽어오기 디폴트값은 5개!
# 인구 데이터 읽기 1
pop_Seoul = pd.read_excel('01. population_in_Seoul.xls', encoding='utf-8')
pop_Seoul.head()
print(pop_Seoul.head())
# 인구 데이터 읽기 2 - 필요한 데이터만 선별하여 읽기
pop_Seoul = pd.read_excel('01. population_in_Seoul.xls',
header = 2,
usecols = 'B, D, G, J, N',
encoding = 'utf-8')
pop_Seoul.head()
print(pop_Seoul.head())
# 알기 쉬운 컬럼명으로 변경
pop_Seoul.rename(columns={pop_Seoul.columns[0] : '구별',
pop_Seoul.columns[1] : '인구수',
pop_Seoul.columns[2] : '한국인',
pop_Seoul.columns[3] : '외국인',
pop_Seoul.columns[4] : '고령자'}, inplace=True)
pop_Seoul.head()
print(pop_Seoul.head())
# CCTV 데이터 파악하기
# sort_values() : 변수 정렬 / 원본 데이터 수정되지 않음, 보여줄 때만 바뀌어 보
# ascending = True : 오름차순
# ascending = False : 내림차순
print(CCTV_Seoul.sort_values(by='소계', ascending=True).head(5))
print(CCTV_Seoul.sort_values(by='소계', ascending=False).head(5))
# 최근증가율 = (2016년 + 2015년 + 2014년) / 2013년도 이전 * 100
CCTV_Seoul['최근증가율'] = (CCTV_Seoul['2016년'] + CCTV_Seoul['2015년'] + \
CCTV_Seoul['2014년']) / CCTV_Seoul['2013년도 이전'] * 100
cv = CCTV_Seoul.sort_values(by='최근증가율', ascending=False).head(5)
print(cv)
# 서울시 인구 데이터 파악
print(pop_Seoul.head())
# 첫번째 합계 행 삭제
pop_Seoul.drop([0], inplace=True)
print(pop_Seoul.head())
# '구별' 컬럼의 중복값 제거
print(pop_Seoul['구별'].unique())
# '구별' 컬럼의 NULL 값 확인
print(pop_Seoul[pop_Seoul['구별'].isnull()])
# '구별' 컬럼의 NULL 값 있는 행 제거
pop_Seoul.drop([26], inplace=True)
print(pop_Seoul.head())
# 외국인비율과 고령자비율 추가
pop_Seoul['외국인비율'] = pop_Seoul['외국인'] / pop_Seoul['인구수'] * 100
pop_Seoul['고령자비율'] = pop_Seoul['고령자'] / pop_Seoul['인구수'] * 100
print(pop_Seoul.head())
# 각 칼럼 확인
pop_Seoul.sort_values(by='인구수', ascending=False).head(5)
pop_Seoul.sort_values(by='외국인', ascending=False).head(5)
pop_Seoul.sort_values(by='외국인비율', ascending=False).head(5)
pop_Seoul.sort_values(by='고령자', ascending=False).head(5)
pop_Seoul.sort_values(by='고령자비율', ascending=False).head(5)
# CCTV 데이터와 인구 데이터 합치고 분석하기
# 두 개의 데이터프레임을 합할 경우
# 동일 컬럼명은 하나('구별')로 통일된다
# merge() : 두 데이터 프레임을 공통된 값을 기준으로 묶는 함
# 데이터베이스에서 join과 같은 역할을 한다
data_result = pd.merge(CCTV_Seoul, pop_Seoul, on='구별')
print(data_result.head())
# CCTV에 대한 '소계' 컬럼을 제외한 나머지 CCTV 데이터 삭제
del data_result['2013년도 이전']
del data_result['2014년']
del data_result['2015년']
del data_result['2016년']
print(data_result.head())
# 시각화 작업을 위한 구이름('구별')을 index화
data_result.set_index('구별', inplace = True)
print(data_result.head())
# CCTV와 각 컬럼에 대한 상관관계 분석
# 상관관계 함수 : np.corrcoef()
print(np.corrcoef(data_result['고령자비율'], data_result['소계']))
print(np.corrcoef(data_result['외국인비율'], data_result['소계']))
print(np.corrcoef(data_result['인구수'], data_result['소계']))
print(data_result.sort_values(by='소계', ascending=False).head(5))
# 파일 저장
data_result.to_csv('data_result.csv')
# CCTV와 인구현황 그래프로 분석하기
import platform
# 폰트설정 (특히 한글 부분)
from matplotlib import font_manager, rc
from matplotlib import pyplot as plt
plt.rcParams['axes.unicode_minus'] = False
if platform.system() == 'Darwin':
rc('font', family='AppleGothic')
elif platform.system() == 'Windows':
path = "c:/Windows/Fonts/malgun.ttf"
font_name = font_manager.FontProperties(fname=path).get_name()
rc('font', family=font_name)
else:
print('Unknown system... SORRY ~_~')
# CCTV 비율을 구하고 그에 따른 시각화 작업
data_result['CCTV비율'] = data_result['소계'] / data_result['인구수'] * 100
data_result['CCTV비율'].sort_values().plot(kind='barh', grid=True, figsize=(10,10))
plt.show()
# 산점도(인구수와 소계)
plt.figure(figsize=(6,6))
plt.scatter(data_result['인구수'], data_result['소계'], s=50)
plt.xlabel('인구수')
plt.ylabel('CCTV')
plt.grid()
plt.show()
# 인구수와 CCTV는 상관계수가 양의 값이므로 산점도와 직선
# 직선구하기(Polyfit을 이용한 회귀선)
# polyfit 함수를 이용해서 예측 모델 z의 계수 생성
fp1 = np.polyfit(data_result['인구수'], data_result['소계'],1)
fp1
# 만들어진 예측 모델을 이용한 그래프 그리기
f1 = np.poly1d(fp1) # y축 데이터
fx = np.linspace(100000, 700000, 100) # x축 데이터
plt.figure(figsize = (10, 10))
plt.scatter(data_result['인구수'], data_result['소계'], s=50)
plt.plot(fx, f1(fx), ls='dashed', lw=3, color = 'g')
plt.xlabel('인구수')
plt.ylabel('CCTV')
plt.grid()
plt.show()
# 조금 더 설득력 있는 자료 만들기
'''
직선이 전체 데이터의 대표값 역할을 한다면
인구수가 300,000 일 경우 CCTV는 1100 정도여야 한단 결론
가독성 향상을 위해 오차를 계산할 수 있는 코드 작성 후,
오차가 큰 순으로 데이터 정렬
'''
fp1 = np.polyfit(data_result['인구수'], data_result['소계'], 1)
f1 = np.poly1d(fp1)
fx = np.linspace(100000, 700000, 100)
data_result['오차'] = np.abs(data_result['소계'] - f1(data_result['인구수']))
df_sort = data_result.sort_values(by='오차', ascending = False)
print(df_sort.head())
# 시각화 작업
# plot 크기 설정
plt.figure(figsize=(14, 10))
# 산점도
plt.scatter(data_result['인구수'], data_result['소계'], c=data_result['오차'], s=50)
# 회귀선
plt.plot(fx, f1(fx), ls='dashed', lw=3, color='g')
for n in range(10):
plt.text(df_sort['인구수'][n] * 1.02, df_sort['소계'][n] * 0.98,
df_sort.index[n], fontsize=15)
plt.xlabel('인구수') # x축라벨
plt.ylabel('인구당비율') # y축라벨
plt.colorbar() # 오른쪽에 색상 바
plt.grid() # 가이드 라
plt.show()
| true |
001165f242fdb986fecec06d0a830c95b6935da8 | Python | chasecolford/Leetcode | /problems/61.py | UTF-8 | 2,090 | 3.921875 | 4 | [] | no_license | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def rotateRight(self, head: ListNode, k: int) -> ListNode:
""" Main idea:
First, calculate the mod of k % len(list) if k >= len(list),
since k can be much larger than the length of the list. This
saves us a lot of repeated work in the worst case.
Then, there are only 3 steps:
1. Find the (k+1)th node from the back and make it point to None
2. Make the last node point to the head
3. Return the kth node from the back, since this is the new head
"""
if not head: return
# Step [0]: Get the length of the list, which makes future calculations easy.
n, dummy = 1, head
while dummy.next: n, dummy = n+1, dummy.next
# Store the last node for later.
last = dummy
# Step [1]: Check base cases and adjust k as needed.
if n == 1: return head # If the list is length 1, any rotation is the same.
if k >= n: k = k % n # Adjust k if >= len(list).
if k == 0: return head # If we rotate by 0, just return the list.
# Step [2]: Find the critical nodes.
# Critical nodes are the (k+1)th and kth node(s) from the back of the list.
# The (k+1)th node from the back (n-k-1) should point to None, as it will be the new end node.
# The kth node from the back (n-k) will be the node we return.
i, dummy, knode, k1node = 0, head, None, None
while i < n:
if i == n - k - 1: k1node = dummy
elif i == n - k: knode = dummy
i, dummy = i + 1, dummy.next
# Step [3]: Adjust all the relevant nodes:
# a. The (k+1)th node should point to node.
# b. The last node should now point to the head.
# c. Return the kth node from the back.
k1node.next = None
last.next = head
return knode
| true |
fdcd56f7aa9e0f650dd2617c3147821c318e74d4 | Python | RustyDotson/gpc | /main.py | UTF-8 | 4,838 | 2.953125 | 3 | [] | no_license | from bs4 import BeautifulSoup as bs
import requests
def get_names():
game_name = input("Please select the name of a game you are searching for.\n"
"We will try our best to give you the average pricing online. "
"\nKeep in mind that this application only works for games \n"
"under NTSC-U/C for better accuracy:")
console_name = input("What system is this game played on? (ex. Xbox 360, NES, Commodore 64)")
return game_name.lower(), console_name.lower()
def sub_space(label):
"""
replace spaces in labels with '+' so url searching is possible.
"""
new_label = ""
for i in label:
if i == " ":
new_label = new_label + "+"
else:
new_label = new_label + i
return new_label
def average_price(prices):
"""
get the average of a list of floats
"""
overall_price = 0
for i in prices:
overall_price += i
return overall_price / len(prices)
def get(link):
"""
pull html from the url link parameter.
help from https://www.youtube.com/watch?v=ng2o98k983k
"""
page = requests.get(link).text
fetch = bs(page, 'lxml')
return fetch
def check_format(price):
if "See price" in price:
return 0.00
if "," in price:
convert_price = ""
for j in price:
if j != ",":
convert_price = convert_price + j
price = convert_price
if " " in price:
price_range = price.split()
price = (float(price_range[0]) + float(price_range[-1][1:])) / 2
return price
def check_shipping(ship_html):
if ship_html is None:
return False
#elif str(ship_html.get_text()) == "Free shipping" or str(ship_html.get_text()) == "Freight":
# return False
strNums = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
shippingLabel = ship_html.get_text()
for digit in strNums:
if digit not in shippingLabel:
return False
return True
def add_shipping(price, ship_check):
shipping = ship_check.get_text()
temp_ship = shipping.split()
shipping = float(temp_ship[0][2:])
price = float(price) + shipping
return price
def filter_title(title, page):
"""
Use for removing unwanted prices on games that will likely lead to a significantly higher price due to details given
in the title of the listing. (ex. CIB, Sealed, Factory, Collector's Edition)
"""
keywords = ["1", "2", "3", "4", "cib", "collector's", "collectors", "collector", "legendary", "special", "factory", "sealed",
"complete", "in box", "lot", "games", "graded", "mint", "disc only", "disk", "rare",
"repro", "reproduction", "manual only", "case only", "set", "bundle", "Steelbook", "steelbook"]
listing_name = page.find(class_="s-item__title").get_text()
for i in keywords:
if i in listing_name.lower() and i not in title:
return False
return True
def get_prices(page_data, title):
price_list = []
for i in range(199):
print("games checked: " + str(i))
listing = page_data.find("li", {"data-view": "mi:1686|iid:" + str(i+1)}) #"srp-river-results" + str(i + 1))
if listing is None:
return average_price(price_list)
games = listing.find(class_="s-item__price") # finds price of item
price = games.get_text()[1:]
price = check_format(price)
ship_check = ((listing.find(
class_="s-item__shipping s-item__logisticsCost"))) # finds the shipping of item
if check_shipping(ship_check) is True:
price = add_shipping(price, ship_check)
if filter_title(title, listing):
price_list.append(float(price))
return average_price(price_list)
def main():
game, console = get_names()
url = "https://www.ebay.com/sch/i.html?_from=R40&_nkw=" + sub_space(game) + "+" + sub_space(console) + \
"&_sacat=0&LH_BIN=1&Region%2520Code=NTSC%252DU%252FC%2520%2528US%252FCanada%2529&rt=nc&_oaa=1&_dcat=139973" \
"&_ipg=200&LH_Sold=1&LH_Complete=1"
print(url)
print("please wait a moment")
page = get(url)
average = get_prices(page, game)
print("\n" + game + " on the " + console + " is approximately $"
+ str("{0:.2f}".format(average))) # Used to remove large floating decimal numbers in the average
print("\nKeep in mind that the average may vary depending on pricing based on quality and edition of copies")
print("Also, games with similar names may accidentally be thrown into the average.")
input()
main()
| true |
36e886feb71b4686298666b475fe5da53d8e4121 | Python | mfaria724/CI2691-lab-algoritmos-1 | /Laboratorio 05/Soluciones/Laboratorio/Lab05Ejercicio3c.py | UTF-8 | 2,906 | 3.6875 | 4 | [] | no_license | #
# Lab05Ejercicio3c.py
#
# DESCRIPCION: programa que dada una secuencia de enteros terminada en 0 provista por el teclado,
# donde solo aparecen los valores del conjunto {1,2,3,4}, cuenta para cada valor del conjunto,
# cuantas veces aparece dentro de la secuencia. Version por contrato
#
# Autor: Rosseline Rodriguez
#
# Ultima modificacion: 24/02/2018
import sys
# CONSTANTES
MAX = 1000 # int // Maximo numero de intentos
# VARIABLES
# e : int // Entrada: elemento actual leido
# n1 : int // Salida: dice cuantas veces aparece el 1
# n2 : int // Salida: dice cuantas veces aparece el 2
# n3 : int // Salida: dice cuantas veces aparece el 3
# n4 : int // Salida: dice cuantas veces aparece el 4
# k : int // Variable de iteracion
# cota : int // Cota de la iteracion
# VALORES INICIALES
print("Introduzca una secuencia de valores en el conjunto {1,2,3,4} ")
k,n1,n2,n3,n4 = 0,0,0,0,0
cota = MAX-k
# Inv: 0<=k<=MAX /\
# n1 == (%sigma i: 0<=i<k : Sec(i) == 1) /\
# n2 == (%sigma i: 0<=i<k : Sec(i) == 2) /\
# n3 == (%sigma i: 0<=i<k : Sec(i) == 3) /\
# n4 == (%sigma i: 0<=i<k : Sec(i) == 4)
# siendo Sec la secuencia introducida y k el tamano actual de la secuencia
#Verificacion de la cota al inicio del ciclo
try:
assert(cota >= 0)
except:
print("Error: cota negativa. El programa terminara ")
print("cota="+str(cota))
sys.exit()
while k < MAX:
e = int(input("Introduzca un valor del conjunto (para finalizar introduzca 0): "))
try:
# Precondicion: valor leido en el rango 0..4
assert(e >= 0 and e <= 4)
except:
print("El valor no esta en el conjunto. El programa terminara...")
sys.exit()
# Calculos
if e == 0: # termino la secuencia
break
if e == 1:
n1 = n1+1
elif e == 2:
n2 = n2+1
elif e == 3:
n3 = n3+1
else:
n4 = n4+1
k = k+1
#Verificacion de cota decreciente en cada iteracion
try:
assert(cota > MAX-k)
except:
print("Error: cota no decreciente. El programa terminara ")
print("cota anterior ="+str(cota)+" nueva cota ="+str(MAX-k))
sys.exit()
cota = MAX - k
#Verificacion de la cota no negativa en cada iteracion
try:
assert(cota >= 0)
except:
print("Error: cota negativa. El programa terminara ")
print("cota="+str(cota))
sys.exit()
# Postcondicion:
# n1 == (%sigma i: 0<=i<k : Sec(i) == 1) /\
# n2 == (%sigma i: 0<=i<k : Sec(i) == 2) /\
# n3 == (%sigma i: 0<=i<k : Sec(i) == 3) /\
# n4 == (%sigma i: 0<=i<k : Sec(i) == 4)
# siendo Sec la secuencia introducida y k el tamano actual de la secuencia
# Salida
print("Numero de veces que aparece el 1 : ",n1)
print("Numero de veces que aparece el 2 : ",n2)
print("Numero de veces que aparece el 3 : ",n3)
print("Numero de veces que aparece el 4 : ",n4)
| true |
9f8cb6a764468dfc7edfe2084da3fb11fae49462 | Python | srikanthpragada/PYTHON_11_JUNE_2018_WEBDEMO | /demo/models.py | UTF-8 | 880 | 2.546875 | 3 | [] | no_license | from django.db import models
# Create your models here.
class Course:
def __init__(self, title, duration, fee, topics=None):
self.title = title
self.duration = duration
self.fee = fee
self.topics = topics
class Department(models.Model):
name = models.CharField(max_length=30)
location = models.CharField(max_length=30)
def __str__(self):
return "%s %s" % (self.name, self.location)
class Meta:
db_table = 'departments'
class Employee(models.Model):
name = models.CharField(max_length=30)
job = models.CharField(max_length=50)
salary = models.IntegerField()
department = models.ForeignKey(Department, on_delete='cascade')
def __str__(self):
return "%s,%s,%d" % (self.name, self.job, self.salary)
class Meta:
db_table = 'Employees' # Name to be used in database
| true |
418cbbcd65238c56d9cf4603feed86dbdc445df0 | Python | ericc661/graph_reader | /graph_reader.py | UTF-8 | 7,801 | 3.515625 | 4 | [
"MIT"
] | permissive | '''
Eric Chen
5/9/20
GraphReader class: uses CV techniques to identify nodes and edges in an image
of a graph.
TODO: rethink organization, create Graph class and put some stuff in main into
functions
TODO: figure out how to get circles fully surround the node
TODO: morph after node removal, then work on identifying edges - but don't morph
before identifying node labels
TODO: enforce thresholding for every image - maybe after morph operators
TODO: input validation for if nodes are labeled the same thing
idea: way to detect self-loops: use less strict circle detection, if we have two
intersecting circles then we probably have that the smaller circle is a
self-loop? if one circle is completely inside another, then it might just
be a number
TODO: automate the min size for a circle - want to exclude numbers/labels but detect
self-loops as well as states
TODO: try on hand-drawn graphs
notes:
circles are more centered on thresholded images!
process:
-read in grayscale image
-perform inversion if needed so we can have a black backround
-threshold the image
-detect states/nodes on thresholded image
- this involves the labeling part
-find the contours within each node that represent the label
- take these labels and use MNIST to process them
- store all the nodes and associate state in some data structure
-remove nodes and perform morph operators to leave just edges on the graph
-with purely edges: detect self-loops as well as regular transitions - straight
as well as curved arrows
-with all this information, construct/store full graph info: nodes and edges
'''
import numpy as np
import sys
import cv2
class GraphReader(object):
'''
summary: returns 2d list where each element is (x, y, r) of circle in image
requires: image_gray to be the image to detect circles in
effects: outputs 2d list, each element is an [x, y, radius] list.
'''
def find_circles(self, image_gray):
assert len(image_gray.shape)==2
# lower line params: img res/accum res, min dist b/t centers, then
# two parameters for canny edge detection
circles = cv2.HoughCircles(image_gray, cv2.HOUGH_GRADIENT, \
1, 50, param1=80, param2=40)
return np.round(circles[0, :]).astype('int')
'''
summary: draws colored circles on original image
requires: orig to be the image to duplicate then draw circles on
effects: returns a BGR image of the original but with circles drawn in red
'''
def draw_circles(self, orig):
circles = self.find_circles(orig)
out = cv2.cvtColor(orig, cv2.COLOR_GRAY2RGB)
for (x, y, r) in circles:
cv2.circle(out, (x, y), r, (0, 0, 255), 2)
return out
'''
summary: blacks out nodes to just leave edges, extremely similar to draw_circles
requires: orig to be the image to duplicate then erase nodes of
effects: returns a
'''
def erase_nodes(self, orig):
circles = self.find_circles(orig)
out = orig.copy() # we want output to be single-channel, binary
for (x, y, r) in circles:
# NOTE: we must increase the radius to fully erase the edge
cv2.circle(out, (x, y), r+10, 0, -1)
return out
'''
summary: takes in input image and returns inverted copy if the image
is majority-white. must be called first before passing image into other
functions.
'''
def validate_input(self, img):
image_validated = img.copy()
if (np.average(img)) > 128:
print('White background detected. Inverting image...')
image_validated = 255-image_validated
return image_validated
'''
shows image in new window
'''
def show(self, image, title):
cv2.imshow(title, image)
while cv2.waitKey(15) < 0: pass
# represents information about a node: its centroid & area in the image
# as well as info about its label
class Node(object):
# state_list is the np_array [x y r] of the node in pixels
def __init__(self, state_array):
assert len(state_array.shape) == 1
assert state_array.shape[0] == 3
# state for node's centroid and area
self.x = state_array[0]
self.y = state_array[1]
self.r = state_array[2]
# label of node, is a cv2 contour
self.label = None
# calculate area of node
def area(self):
return (np.pi) * (self.r ** 2)
# return tuple of (x, y) of node's centroid
def cxy(self):
return (self.x, self.y)
class ContourUtility(object):
# all these functions take in contours as defined by CV2
# returns area of contour
@staticmethod
def get_area(contour):
return cv2.contourArea(contour)
# returns (x, y) coordinate of centroid of contour (i.e. (col, row))
@staticmethod
def get_cxy(contour):
M = cv2.moments(contour)
# return a center that won't be inside a state if m00 is 0
if M['m00'] == 0:
return (-1, -1) # to signal m00 was 0
else:
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
return (cx, cy)
def main():
if len(sys.argv) != 2:
print('usage: ' + sys.argv[0] + ' input_image')
exit(1)
gr = GraphReader()
img = cv2.imread(sys.argv[1], cv2.IMREAD_GRAYSCALE)
img = gr.validate_input(img)
# find circles on orig image
gr.show(img, 'grayscale image')
gr.show(gr.draw_circles(img), 'grayscale image with circles')
# try with thresholding
_, img_thresholded = cv2.threshold(img, 50, 255, cv2.THRESH_BINARY)
gr.show(img_thresholded, 'thresholded image')
gr.show(gr.draw_circles(img_thresholded), 'thresholded image w circles')
# after nodes identified, try identifying the node labels with findContours
# inside the location
node_info = gr.find_circles(img_thresholded)
nodes = []
for node in node_info:
nodes.append(Node(node)) # add Node object for each node found
contours, hierarchy = cv2.findContours(img, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)
# check if each contour is smaller than size of circle and contained in each circle:
for i in range(len(contours)):
# get contour's area and centroid
cnt_area = ContourUtility.get_area(contours[i])
cnt_cxy = np.array(ContourUtility.get_cxy(contours[i]))
# if contour centroid is within the node/circle and is small enough to
# be a label (0.5)
for node in nodes:
node_area = node.area()
if (np.linalg.norm(node.cxy() - cnt_cxy) < node.r) and \
cnt_area < 0.5*node.area():
print(cv2.boundingRect(contours[i]))
if node.label is None:
node.label = contours[i]
elif ContourUtility.get_area(node.label) < cnt_area:
# make the largest contour inside the node the label
node.label = contours[i]
for node in nodes:
bg = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
cv2.drawContours(bg, [node.label], 0, (0, 0, 255), thickness=2)
(x, y, w, h) = cv2.boundingRect(node.label)
cv2.rectangle(bg, (x, y), (x+w, y+h), (255, 0, 0), thickness=1)
gr.show(bg, "selected label for each node with bounding rect drawn")
# now try removing nodes on thresholded img
gr.show(gr.erase_nodes(img_thresholded), 'thresholded img w nodes erased')
'''
# try with thresholding AND morph operators:
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
img_morphed = cv2.morphologyEx(img_thresholded, cv2.MORPH_OPEN, kernel)
'''
if __name__ == '__main__':
main()
| true |
3d142048b68c276ebf6f6ad50bb92fa1363f6673 | Python | jason12360/AID1803 | /pbase/day20/with.py | UTF-8 | 770 | 4.21875 | 4 | [] | no_license | #本示例示意with语句的使用方法
#打开文件读取文件数据
#以前
# try:
# f = open('abcd.txt')
# try:
# while True:
# s = f.readline()
# if not s:
# break
# int(input('请输入任意数字打印下一行:'))
# print(s[:-1])
# finally:
# print('文件已经关闭')
# f.close()
# except IOError:
# print('出现异常已经捕获')
# except ValueError:
# print('程序已转为正常状态')
# print('程序结束')
#with语句来实现
try:
with open('abcd.txt') as f:
s_list = f.readlines()
for s in s_list:
int(input('请输入任意数字打印下一行:'))
print(s)
except IOError:
print('出现异常已经捕获')
except ValueError:
print('程序已转为正常状态')
print('程序结束')
| true |
83e4f838756a01bb9045040a486686e9452c4c0a | Python | MartinBCN/mnist-gan | /mnist_gan/gan.py | UTF-8 | 10,330 | 3.15625 | 3 | [
"MIT"
] | permissive | import os
from pathlib import Path
from typing import Union, Tuple
import torch
from sklearn import metrics
from torch import optim, nn, Tensor
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
from mnist_gan.discriminator import Discriminator
from mnist_gan.generator import Generator
plt.style.use('ggplot')
DEVICE = torch.device("cuda" if (torch.cuda.is_available() and os.environ.get('USE_GPU')) else "cpu")
class GAN:
"""
GAN
Define and train both the Generator and Discriminator networks simultaneously
Parameters
----------
latent_dimension: int, default = 100
Size of the latent dimension
learning_rate: float, default = 0.0002
Learning rate, for simplicity we use the same LR for both optimizer. A more elaborate example than MNIST
in all likelihood requires a more sophisticated choice here
"""
def __init__(self, latent_dimension: int = 100, learning_rate: float = 0.0002):
self.latent_dimension = latent_dimension
self.generator = Generator(latent_dimension).to(DEVICE)
self.discriminator = Discriminator().to(DEVICE)
# This is fixed, we want to see how this improves
self.visualisation_noise = self.create_noise(5)
# We use Adam with a given learning rate in both cases
self.optimiser_generator = optim.Adam(self.generator.parameters(), lr=learning_rate)
self.optimiser_discriminator = optim.Adam(self.discriminator.parameters(), lr=learning_rate)
# Binary Cross Entropy as Loss Function
self.criterion = nn.BCELoss()
# Track losses and accuracies
self.losses = {'discriminator': [], 'generator': []}
self.accuracies = {'discriminator': [], 'generator': []}
def create_noise(self, sample_size: int) -> Tensor:
"""
Function to create the noise vector
Parameters
----------
sample_size: int
Number of fake images we want to create
Returns
-------
Tensor
Noise vector from embedded dimension, shape [sample_size, embedded_dimension]
"""
return torch.randn(sample_size, self.latent_dimension).to(DEVICE)
@staticmethod
def label_real(batch_size: int) -> Tensor:
"""
Helper function to create real labels (ones)
Parameters
----------
batch_size: int
Returns
-------
Tensor
Fixed labels for the case of real images -> all ones
Shape [batch_size, 1]
"""
data = torch.ones(batch_size, 1)
return data.to(DEVICE)
@staticmethod
def label_fake(batch_size: int) -> Tensor:
"""
Helper function to create fake labels (zeros)
Parameters
----------
batch_size: int
Returns
-------
Tensor
Fixed labels for the case of fake images -> all zeros
Shape [batch_size, 1]
"""
data = torch.zeros(batch_size, 1)
return data.to(DEVICE)
def visualise(self, epoch: int) -> None:
"""
Create two plots:
1) Sample of what the generator creates from the fixed noise sample. In time this should look more and more
like the familiar MNIST numbers
2) Loss and accuracies vs. epoch. Note that this will not like like a regular training because ideally
both the discriminator and the generator become better at what they do
Parameters
----------
epoch: int
Returns
-------
None
"""
fig_dir = os.environ.get('FIG_DIR', 'figures')
# Create sample images from fixed noise batch
with torch.no_grad():
self.generator.eval()
images = self.generator(self.visualisation_noise)
# Rescale images 0 - 1
images = 0.5 * images + 0.5
images = images.detach().cpu().numpy()
cols = images.shape[0]
fig, axs = plt.subplots(1, cols)
for i in range(cols):
axs[i].imshow(images[i].reshape(28, 28), cmap='gray')
axs[i].axis('off')
fig.savefig(f"{fig_dir}/gan_images_{epoch}.png")
plt.close()
# --- Loss/Accuracy ---
fig, axs = plt.subplots(2, figsize=(12, 8))
axs[0].plot(self.losses['discriminator'], label='Discriminator')
axs[0].plot(self.losses['generator'], label='Generator')
axs[0].legend(title='Loss')
axs[1].plot(self.accuracies['discriminator'], label='Discriminator')
axs[1].plot(self.accuracies['generator'], label='Generator')
axs[1].legend(title='Accuracy')
fig.savefig(f"{fig_dir}/losses.png")
plt.close()
def train_discriminator(self, data_real: Tensor, data_fake: Tensor) -> Tuple[float, float]:
"""
Training the Discriminator. Here we feed both a batch of real and a batch of fake images with fixed targets
(ones for real, zeros for fake, respectively). The loss is calculated as binary cross entropy in both cases.
Parameters
----------
data_real: Tensor
Real images, shape [batch_size, 1, 28, 28]
data_fake
Fake images, shape [batch_size, 1, 28, 28]
Returns
-------
loss: float
Sum of losses for fake and real image detection
accuracy: float
Mean of accuracy for real/fake image detection
"""
# Create one set of fake and one set of real labels
batch_size = data_real.shape[0]
real_label = self.label_real(batch_size)
fake_label = self.label_fake(batch_size)
# Training Step Discriminator
self.optimiser_discriminator.zero_grad()
# 1) Detect real images
output_real = self.discriminator(data_real)
loss_real = self.criterion(output_real, real_label)
accuracy_real = metrics.accuracy_score(real_label, output_real > 0.5)
loss_real.backward()
# 2) Detect fake images
output_fake = self.discriminator(data_fake)
loss_fake = self.criterion(output_fake, fake_label)
accuracy_fake = metrics.accuracy_score(fake_label, output_fake > 0.5)
loss_fake.backward()
self.optimiser_discriminator.step()
# Book-keeping
loss = loss_real.detach().cpu() + loss_fake.detach().cpu()
accuracy = (accuracy_real + accuracy_fake) / 2
return loss, accuracy
def train_generator(self, data_fake: Tensor) -> Tuple[float, float]:
"""
Function to train the Generator part of the GAN
Parameters
----------
data_fake: Tensor
Fake image data, shape [batch_size, 1, 28, 28]
Returns
-------
loss: float
accuracy: float
"""
# We use FAKE data and REAL as label as we want the generator to produce fake images that appear real
b_size = data_fake.shape[0]
real_label = self.label_real(b_size)
# Training step for Generator
self.optimiser_generator.zero_grad()
output = self.discriminator(data_fake)
loss = self.criterion(output, real_label)
loss.backward()
self.optimiser_generator.step()
# Book-keeping
loss = loss.detach().cpu()
accuracy = metrics.accuracy_score(real_label, output > 0.5)
return loss, accuracy
def train(self, train_loader: DataLoader, epochs: int = 10) -> None:
"""
Training function for GAN
Notice that contrary to regular NN training we do not define an early exit strategy here. Since both adversary
networks are supposed to keep improving there is no obvious convergence in the classical sense.
Parameters
----------
train_loader: DataLoader
PyTorch DataLoader with training data
epochs: int
Number of epochs
Returns
-------
None
"""
for epoch in range(epochs):
# Visualisation at the end of the epoch is done in eval -> back to train()
self.generator.train()
self.discriminator.train()
loss_g = 0.0
loss_d = 0.0
accuracy_generator = 0
accuracy_discriminator = 0
for data in train_loader:
# Data batches
image, _ = data
image = image.to(DEVICE)
b_size = len(image)
data_fake = self.generator(self.create_noise(b_size)).detach()
data_real = image
# train the discriminator network
loss_batch, acc_batch = self.train_discriminator(data_real, data_fake)
loss_d += loss_batch
accuracy_discriminator += acc_batch
# train the generator network
data_fake = self.generator(self.create_noise(b_size))
loss_batch, acc_batch = self.train_generator(data_fake)
loss_g += loss_batch
accuracy_generator += acc_batch
# --- Book-keeping ---
epoch_loss_g = loss_g / len(train_loader) # total generator loss for the epoch
epoch_loss_d = loss_d / len(train_loader) # total discriminator loss for the epoch
self.losses['generator'].append(epoch_loss_g)
self.losses['discriminator'].append(epoch_loss_d)
self.accuracies['generator'].append(accuracy_generator / len(train_loader))
self.accuracies['discriminator'].append(accuracy_discriminator / len(train_loader))
print(f"Epoch {epoch + 1} of {epochs}")
print(f"Generator loss: {epoch_loss_g:.8f}, Discriminator loss: {epoch_loss_d:.8f}")
# Visualise the state after each epoch to track the progress
self.visualise(epoch)
def save_generator(self, fn: Union[str, Path]) -> None:
"""
Save the Generator for future purposes
Parameters
----------
fn: Union[str, Path]
Returns
-------
None
"""
fn = Path(fn)
fn.parents[0].mkdir(parents=True, exist_ok=True)
torch.save(self.generator.state_dict(), fn)
| true |
da4932ea7583389ac11208369534cf08a3e58ef7 | Python | jacwye/FYP23-CME-Code-2021 | /FFTfunctionCombined.py | UTF-8 | 3,483 | 2.765625 | 3 | [] | no_license | # Separate python script to implement FFT calculations on sensor data
# Import python modules:
# Array manipulation utility
import numpy as np
# Plotting library
import matplotlib.pyplot as plt
# FFT function from SciPy
from scipy.fftpack import fft
# Used to serialise data
import pickle
# Import functions from other python scripts
# Import ingestor functions
from ingestor import update_sensor_latest_threshold_breach
# Import firebase storage functions
from firebaseStorageHelper import uploadFFTFiles
import time
# FFT function
def fftFunction(clientNumber, machine_id, sensor_id, timestamp):
print("~~~~Performing FFT~~~~")
signalArray = []
signalFile = open("recievedSignal" + clientNumber + ".txt","r")
# Store signal data into this instance
for line in signalFile:
signalArray.append(line)
# Declare FFT variables
if clientNumber == "Test Rig":
# number of samples in one file
print(len(signalArray))
length = len(signalArray)
# sampling frequency
Fs = 1592
#hilbs_sig = hilbert(signalArray)
#abs_hilbs_sig = [abs(x) for x in hilbs_sig]
fLength = np.arange(0.0,length/2)
xtFFT = fft(signalArray)
print(len(xtFFT))
down_sample = 1
elif clientNumber != "PI_3":
# number of samples in one file
length = 97656*6
# sampling frequency
Fs = 102500
fLength = np.arange(0.0,length/2)
xtFFT = fft(signalArray)
down_sample = 5
else:
# number of samples
length = 839680
# sampling frequency
Fs = 20480
#hilbs_sig = hilbert(signalArray)
#abs_hilbs_sig = [abs(x) for x in hilbs_sig]
fLength = np.arange(0.0,length/2)
xtFFT = fft(signalArray)
down_sample = 8
# Get array for x-axis (frequency) and y-axis (Amplitude)
P2 = [abs(x/length) for x in xtFFT]
endIndex = int(length/2+1)
P1 = P2[1:endIndex]
P3 = [x*2 for x in P1]
f = (Fs*fLength/length)
fig, ax = plt.subplots()
print("~~~~FFT Completed~~~~")
# Make sure x and y axis arrays are equal sizes
if len(f) != len(P3):
f = f[:-1]
P3_array = np.array(P3)
#downsample data for webserver plot
ds_f = f[0:f.size:down_sample]
ds_P3 = P3[0:P3_array.size:down_sample]
ax.plot(ds_f,ds_P3)
plt.ylabel('Amplitude')
plt.xlabel('Frequency(Hz)')
plt.title('Spectrum in Python')
# Combine amplitude and frequency arrays into single array & transpose into separate columns
FFTdata = np.array([P3,f])
FFTdata = FFTdata.T
# Store figure // this chunk of code takes the most of time in FFT
with open(clientNumber+'_X.pickle', 'wb') as handle:
pickle.dump(ds_f, handle)
with open(clientNumber+'_Y.pickle', 'wb') as handle:
pickle.dump(ds_P3, handle)
plt.savefig("generated\\" + sensor_id + ".png" , bbox_inches='tight')
with open("generated\\" + sensor_id + ".txt", "w") as txt_file:
for line in FFTdata:
txt_file.write("%s\n" % line)
## Generated file will contain square brackets so need to remove it (so that can be analysed in Matlab)
with open("generated\\" + sensor_id + ".txt", 'r') as my_file:
text = my_file.read()
text = text.replace("[", "")
text = text.replace("]", "")
uploadFFTFiles(sensor_id, timestamp)
update_sensor_latest_threshold_breach(machine_id, sensor_id, timestamp) | true |
f53eb28c382626b1ea4e71d7cb14b96843eaca02 | Python | daniilstv/Flask-ML-API | /app3.py | UTF-8 | 2,548 | 2.546875 | 3 | [] | no_license | from flask import Flask, request, jsonify
import pickle
# from xgboost import XGBClassifier
import xgboost as xgb
from sklearn.preprocessing import LabelEncoder
print('xgboost version ',xgb.__version__)
from process_data2 import process_input
# For logging
import logging
import traceback
from logging.handlers import RotatingFileHandler
from time import strftime, time
file_name = "model_.pkl"
xgb_model_loaded = pickle.load(open(file_name, "rb"))
print(xgb_model_loaded)
app = Flask(__name__)
# Logging
handler = RotatingFileHandler('app.log', maxBytes=100000, backupCount=5)
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
logger.addHandler(handler)
@app.route("/")
def index():
return "Prediction API on xgboost model"
@app.route("/predict", methods=['GET','POST'])
def predict():
json_input = request.get_json(force=True)
print('json_input: ',json_input)
# Request logging
current_datatime = strftime('[%Y-%b-%d %H:%M:%S]')
ip_address = request.headers.get("X-Forwarded-For", request.remote_addr)
logger.info(f'{current_datatime} request from {ip_address}: {request.json}')
start_prediction = time()
# id = json_input['ID']
user_data = process_input(json_input)
print('user_data:', user_data)
# prediction_Claims = xgb_model_loaded.predict(user_data)
user_data_matrix = xgb.DMatrix(user_data)
prediction_Claims = xgb_model_loaded.predict(user_data_matrix) # Посчитаем предсказанное значения
ClaimInd = int(prediction_Claims[0])
print('prediction:', ClaimInd)
#
# id = json_input['id']
#
# result = {
# 'ID': id,
# 'ClaimInd': 'ClaimInd'
# }
# Response logging
end_prediction = time()
duration = round(end_prediction - start_prediction, 6)
current_datatime = strftime('[%Y-%b-%d %H:%M:%S]')
logger.info(f'{current_datatime} predicted for {duration} msec: {ClaimInd}\n')
return jsonify(ClaimInd)
# @app.errorhandler(Exception)
# def exceptions(e):
# current_datatime = strftime('[%Y-%b-%d %H:%M:%S]')
# error_message = traceback.format_exc()
# logger.error('%s %s %s %s %s 5xx INTERNAL SERVER ERROR\n%s',
# current_datatime,
# request.remote_addr,
# request.method,
# request.scheme,
# request.full_path,
# error_message)
# return jsonify({'error': 'Internal Server Error'}), 500
if __name__ == '__main__':
app.run(debug=True) | true |
2c7653b6b56ffed8d7a68c45fbcd827a3b239d31 | Python | DiracSea/LC | /Math/factorialTrailingZeroes.py | UTF-8 | 651 | 3.625 | 4 | [] | no_license | class Solution:
def trailingZeroes(self, n: int) -> int:
# trailing zero so not include middle zero
# trailing zero must be 10 = 2*5
# 2 is enough so calculate num of 5
# we encounter a multiple of 5 every 5 numbers
# 10 = 2*5
# 25 = 5*5 -> 5 10 15 20 25 = 1+1+1+1+2 = 5's 5 1's 25
# count = n/5 + n/25 + n/125 + ... + 0
# f(n)=n/5+f(n/5)
return 0 if not n else n//5 + self.trailingZeroes(n//5)
def trailingZeroes1(self, n: int) -> int:
# f(n)=n/5+f(n/5)
c, i = 0, 5
while n >= i:
c += n//i
i *= 5
return c | true |
d04b51f27fb9d5111fb92da11a41c10bf4ffc31a | Python | SatyamJindal/Competitive-Programming | /CodeChef/Ada and crayons.py | UTF-8 | 587 | 3.40625 | 3 | [] | no_license | t=int(input())
for i in range(t):
s=input().rstrip('\r')
count1=0
count2=0
flag1=0
flag2=0
for j in range(len(s)):
if(s[j]=='U'):
flag1=1
if(flag2==1):
count2+=1
flag2=0
elif(s[j]=='D'):
flag2=1
if(flag1==1):
count1+=1
flag1=0
if(flag1==1):
count1+=1
elif(flag2==1):
count2+=1
if(count1<=count2):
print(count1)
else:
print(count2)
| true |
1d2b854851b657aa6ac55135fa2c0bf96b28742a | Python | isolde18/demo | /demo_list.py | UTF-8 | 1,408 | 4.25 | 4 | [] | no_license | #lists can be thought of as a series of boxes;
#each box having different value assigned;
#append is used to add a new item to the end of the list;
#len returns how many items are in a list;
#the valid indexes (as in numbers that can be used inside of the [])of a list
#range from 0 to len-1);the index function tells where the first location of
#an item is located in a list;
#to get help on all the functions, type help(list) in the interactive Python interpreter;
demolist=["life",42,"the universe",6,"and",9]
print ("demolist=",demolist)
demolist.append ("everything")
print ("after 'everything' was appended demolist is now:")
print (demolist)
print ("len (demolist)=",len(demolist))
print ("demolist.index(42)=",demolist.index (42))
print("demolist [1]=",demolist[1])
#Next loop through the list
for c in range (len(demolist)):
print ("demolist[",c,"]=",demolist[c])#creates a variable c,which starts at 0
del demolist[2]
print ("After the 'universe' was removed demolist is now:")
print(demolist)
if "life"in demolist:
print("'life' was found in demolist")
else:
print("'life'was not found in demolist")
if "amoeba" in demolist:
print ("'amoeba' was found in demolist")
if "amoeba" not in demolist:
print("'amoeba' was not found in demolist")
another_list=[42,7,0,123]
another_list.sort()
print ("The sorted another_list is", another_list)
| true |
efc7177f30d5168f1bf764fb4f1f65cef48c5165 | Python | mymentech/PHP-Decoder-Encoding-by-TeleAgent.IR---ResellerCenter.IR- | /decoder.py | UTF-8 | 1,744 | 2.578125 | 3 | [] | no_license | '''
File name: PHP Decoder "Encoding by TeleAgent.IR - ResellerCenter.IR".py
Author: Ehsan Nezami
Date created: 19/11/2018
Web: http://nezami.me/
Python Version: 2.7
'''
import os
import re
import base64
import zlib
def listFiles(path, extension):
return [f for f in os.listdir(path) if f.endswith(extension)]
path_name = raw_input("What is your path of php files? \n Example : C:\\files\\ \n ")
for files in listFiles(path_name, '.php'):
print files
start = '$_QXXCZD("'
end = '"));'
f=open(files,'r')
for input in f.readlines():
data= re.findall(re.escape(start)+"(.*)"+re.escape(end),input)
for x in data:
x=base64.b64decode(x)
start1 = '.$_ZUI("'
end1 = '"));'
data1= re.findall(re.escape(start1)+"(.*)"+re.escape(end1),x)
for x1 in data1:
x1=base64.b64decode(x1)
start2 = '$_IRRGRHMF("'
end2 = '"));'
data2= re.findall(re.escape(start2)+"(.*)"+re.escape(end2),x1)
for x2 in data2:
x2=base64.b64decode(x2)
start3 = '$_EFTYPYA("'
end3 = '"));'
data3= re.findall(re.escape(start3)+"(.*)"+re.escape(end3),x2)
for x3 in data3:
x3=base64.b64decode(x3)
start4 = '$_AOKDOJCRH("'
end4 = '"));'
data4= re.findall(re.escape(start4)+"(.*)"+re.escape(end4),x3)
for x4 in data4:
x4=base64.b64decode(x4)
start5 = '$_NZHLDCOUMASYWHUKYETFVEDDJELK("'
end5 = '")));'
data5= re.findall(re.escape(start5)+"(.*)"+re.escape(end5),x4)
for x5 in data5:
compressed = base64.b64decode(x5)
decoded=zlib.decompress(compressed, -15)
print decoded
output=file('dec-'+files,'a')
output.write(decoded) | true |
a4f92b33789bbf1fcaf58ce4d0194f61a14a1166 | Python | ghesio/AortaSegmentator | /data_preprocessing/data_locator.py | UTF-8 | 4,890 | 2.78125 | 3 | [
"MIT"
] | permissive | """
Generates a JSON file containing info from sliced DICOMs
JSON format:
"patient_id": {
"roi_dir": "Root directory containing the ROI slices",
"axial": {
"min_slice": min slices index containing info (not all background),
"max_slice": max slices index containing info (not all background),
},
"coronal": { as above },
"sagittal": { as above },
"coordinates" : { contains minimum and maximum not blank informative pixel coordinate }
"scan_dir": "Root directory containing the scan slices",
"partition": if the patient belongs to train, validation or test set
}
"""
import json
# LOGGING
import logging
from utils import custom_logger
import os
import re
import imageio
import numpy as np
from utils.misc import remove_everything_after_last
# define validation and test size
validation_size = 10
test_size = 10
# global
separator = '/' # \\ windows, / unix
data_out_dir = 'data/out'
info_json = 'data/info.json'
def read_image_information_in_directory(directory):
__files = [x for x in os.listdir(directory) if '.png' in x]
__files.sort()
# used to get the first and last informative slice
bound = [None, None]
# used to get the non background pixel coordinate in both direction, min and max
min_info = [9999, 9999]
max_info = [-1, -1]
for i in range(len(__files) - 1):
current_image_path = directory + '/' + __files[i]
next_image_path = directory + '/' + __files[i + 1]
# read the image into a numpy array
current_image = np.array(imageio.imread(uri=current_image_path), dtype='uint8')
next_image = np.array(imageio.imread(uri=next_image_path), dtype='uint8')
background_color = current_image[0, 0]
# a slice is informative if it's not only background
if not np.all(current_image == background_color):
if bound[0] is None:
bound[0] = i + 1
if not np.all(current_image == background_color) and np.all(next_image == background_color):
bound[1] = i + 1
rows, cols = np.where(current_image != background_color)
if min(rows) < min_info[0]:
min_info[0] = min(rows)
if min(cols) < min_info[1]:
min_info[1] = min(cols)
if max(rows) > max_info[0]:
max_info[0] = max(rows)
if min(cols) > max_info[1]:
max_info[1] = max(cols)
if bound[1] is None:
bound[1] = len(__files) + 1
return bound, min_info, max_info
if __name__ == "__main__":
# read all directory in '..data/out'
dir_names = []
for root, dirs, files in os.walk(data_out_dir):
if not dirs:
dir_names += [os.path.abspath(root)]
dir_names.sort()
patient_map = {}
for _dir in dir_names:
patient_id = re.sub(r'^.*?data' + re.escape(separator) + 'out' + re.escape(separator), '', _dir).split(separator, 1)[0]
if patient_id not in patient_map:
patient_map[patient_id] = {}
patient_map[patient_id]['coordinates'] = {}
# ignore if cut directory
if 'roi' in _dir:
# get information about informative images in 'roi' dir
patient_map[patient_id]['roi_dir'] = remove_everything_after_last(_dir, separator)
logging.info('Opening directory ' + _dir)
info = read_image_information_in_directory(_dir)
if 'axial' in _dir:
# Y-X plane
patient_map[patient_id]['axial'] = {}
patient_map[patient_id]['axial']['min_slice'] = int(info[0][0])
patient_map[patient_id]['axial']['max_slice'] = int(info[0][1])
patient_map[patient_id]['coordinates']['min_y'] = int(info[1][0])
patient_map[patient_id]['coordinates']['min_x'] = int(info[1][1])
patient_map[patient_id]['coordinates']['max_y'] = int(info[2][0])
patient_map[patient_id]['coordinates']['max_x'] = int(info[2][1])
elif 'coronal' in _dir:
# Z-X plane
patient_map[patient_id]['coronal'] = {}
patient_map[patient_id]['coronal']['min_slice'] = int(info[0][0])
patient_map[patient_id]['coronal']['max_slice'] = int(info[0][1])
patient_map[patient_id]['coordinates']['min_z'] = int(info[1][0])
patient_map[patient_id]['coordinates']['max_z'] = int(info[2][0])
elif 'sagittal' in _dir:
patient_map[patient_id]['sagittal'] = {}
patient_map[patient_id]['sagittal']['min_slice'] = int(info[0][0])
patient_map[patient_id]['sagittal']['max_slice'] = int(info[0][1])
else:
patient_map[patient_id]['scan_dir'] = remove_everything_after_last(_dir, separator)
# define to which set of data the patients belongs to (train, validation, test)
total_patients = len(patient_map.keys())
counter = 0
for patient in patient_map:
if counter < total_patients - test_size - validation_size:
patient_map[patient]['partition'] = 'train'
else:
if total_patients - test_size - validation_size <= counter < total_patients - test_size:
patient_map[patient]['partition'] = 'validation'
else:
patient_map[patient]['partition'] = 'test'
counter = counter + 1
logging.info("Writing JSON info file")
with open(info_json, 'w') as outfile:
json.dump(patient_map, outfile, indent=4)
exit(0)
| true |
d8e93419c23329100038e9807ba1f63464769864 | Python | xuyang06/noveldeal | /dealdeliver/dealreceiver/receiver/receiver.py | UTF-8 | 1,786 | 2.515625 | 3 | [] | no_license | '''
Created on Sep 7, 2015
@author: xuyan_000
'''
import pika
import sys
import cPickle
import redis
#kafka_server=['localhost:9092']
#topic_list = ['prada']
def topic_fill(top_str):
new_str = ''
for c in top_str:
if (c >= 'a' and c <= 'z') or (c >= 'A' and c <= 'Z'):
new_str += c.lower()
return new_str
class Receiver(object):
def __init__(self, receiver_id, binding_keys):
self.receiver_id = receiver_id
self._redis = redis.StrictRedis(host='localhost', port=6379, db=0)
self.connection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost'))
self.channel = self.connection.channel()
self.channel.exchange_declare(exchange='topic_news', type='topic')
result = self.channel.queue_declare(exclusive=True)
self.queue_name = result.method.queue
for binding_key in binding_keys:
binding_key = '#.' + topic_fill(binding_key) + '.#'
self.channel.queue_bind(exchange='topic_news', queue=self.queue_name, routing_key=binding_key)
def callback(self, ch, method, properties, body):
print " [x] %r:%r" % (method.routing_key, body,)
self._redis.sadd(self.receiver_id, body)
#self.msgs.append(cPickle.loads(body))
#print " [x] %r:%r" % (method.routing_key, body,)
def process(self):
self.channel.basic_consume(self.callback, queue=self.queue_name, no_ack=True)
try:
self.channel.start_consuming()
except KeyboardInterrupt:
self.channel.stop_consuming()
def stop(self):
self.channel.stop_consuming()
self.connection.close()
if __name__ == '__main__':
receiver1 = Receiver('tinglu', ['prada'])
receiver1.process()
| true |
8d7fe4ccf4cc25161bfad28b25b27051cc31f96e | Python | williamccondori/unsa_sistema_academico | /control_horas_lectivas/servicios/usuario_service.py | UTF-8 | 684 | 2.65625 | 3 | [] | no_license | from control_horas_lectivas.models import UserSystem
from control_horas_lectivas.dtos.usuario_dto import UsuarioDto
class UsuarioService():
def login(self, username, password):
usuario = UserSystem.objects.filter(username=username)
if len(usuario) is 0:
raise ValueError('Usuario incorrecto!')
usuario = usuario[0]
"""
print(usuario.user.password)
if not usuario.user.password == password:
raise ValueError('Contraseña incorrecto!')
"""
usuario_dto = UsuarioDto(
usuario.username,
usuario.departament.id
)
return usuario_dto
| true |
a3eb93b1869d9eccf3320eb44119b6ab1d4a69f2 | Python | NostraJames/Python_Expirements | /pri.py | UTF-8 | 206 | 2.984375 | 3 | [] | no_license | def prime(s=int,e=int):
for p in range(s, e+1):
if p > 1:
for n in range(2, p):
if (p % n) == 0:
break
else:
print(p)
| true |
d82fd426fecb0e553380ea016354acab34d2a010 | Python | GabrielGM01/Exercicios_Logica_Python | /Sort_Simples.py | UTF-8 | 399 | 3.875 | 4 | [] | no_license | """Leia 3 valores inteiros e ordene-os em ordem crescente.
No final, mostre os valores em ordem crescente, uma linha em branco e em seguida, os valores na sequência como foram lidos."""
a,b,c = input().split()
a = int(a)
b = int(b)
c = int(c)
n1 = [a,b,c]
n2 = [a,b,c]
n1.sort(key=int)
print("{}\n{}\n{}\n".format(n1[0],n1[1],n1[2]))
print("{}\n{}\n{}".format(n2[0],n2[1],n2[2]))
| true |
23271a13c3d41dab0bacea7bdab8567324a0e27d | Python | Sana-mohd/functionsQuestions | /length.py | UTF-8 | 277 | 3.203125 | 3 | [] | no_license | def length_fun(my_list):
count=0
for x in my_list:
count=count+1
return count
print(length_fun([2,3,6,2,4,9]))
def length_fun(my_list):
count=0
i=-1
while True:
count+=1
i=i-1
return count
print(length_fun([6,7,13,8])) | true |
6efc8787291c1db7a38caccda700edfa31b98bf2 | Python | vinaykumar-yadav/MyWork-vinay | /Class.py | UTF-8 | 222 | 3.671875 | 4 | [] | no_license | class Person:
def __init__(self, name, age):
self.name = name
self.age = age
def myFunc(self):
print("Hello my name is :" + self.name)
objPerson = Person('vinay', 28)
objPerson.myFunc()
| true |
cafd7e1514cc22977480c588317278a000e0d78c | Python | justengel/mp_event_loop | /mp_event_loop/__init__.py | UTF-8 | 9,915 | 2.515625 | 3 | [
"MIT"
] | permissive | from .mp_functions import print_exception, is_parent_process_alive, mark_task_done, LoopQueueSize, \
stop_event_loop, run_loop, process_event, run_event_loop, run_consumer_loop, QUEUE_TIMEOUT
from .events import Event, CacheEvent, CacheObjectEvent, SaveVarEvent, VarEvent
from .mp_proxy import ProxyEvent, proxy_output_handler, Proxy
from .event_loop import EventLoop
try:
from .async_event_loop import AsyncManager, AsyncEvent, AsyncEventLoop
except (ImportError, SyntaxError):
pass
from .pool import Pool
from multiprocessing import freeze_support
import importlib
def use(lib):
"""Change the multiprocessing library that is being used.
`multiprocess` and `multiprocessing_on_dill` are some alternatives, because normal pickling can be annoying.
Note:
This code does the same as
..code-block :: python
>>> import mp_event_loop
>>> # import multiprocess as mp
>>> import multiprocessing as mp
>>>
>>> mp_event_loop.EventLoop.alive_event_class = mp.Event
>>> mp_event_loop.EventLoop.queue_class = mp.JoinableQueue
>>> mp_event_loop.EventLoop.event_loop_class = mp.Process
>>>
>>> loop = mp_event_loop.EventLoop()
Args:
lib (str/module): String module name to load or the module you want to use. The module should have an Event,
JoinableQueue or Queue, and Process or Thread attributes to use.
"""
if isinstance(lib, str):
lib = importlib.import_module(lib)
try:
EventLoop.alive_event_class = lib.Event
except AttributeError as err:
print_exception(err, "Not able to change the alive_event_class (Event) using the library " + repr(lib))
try:
EventLoop.queue_class = lib.JoinableQueue
except AttributeError:
try:
EventLoop.queue_class = lib.Queue
except AttributeError as err:
print_exception(err, "Not able to change the queue_class (Queue) using the library " + repr(lib))
try:
EventLoop.event_loop_class = lib.Process
except AttributeError as err:
try:
EventLoop.event_loop_class = lib.Thread
except AttributeError:
print_exception(err, "Not able to change the event_loop_class (Process) using the library " + repr(lib))
# ========== Global Event Loop Functions ==========
DefaultEventLoop = EventLoop
GLOBAL_NAME = 'Global Event Loop'
__loop__ = None
def get_event_loop(output_handlers=None, event_queue=None, consumer_queue=None, initialize_process=None, name=None,
has_results=True):
"""Return the global event loop. If it does not exist create it. It will still need to be started or used as a
context manager using the `with` statement.
Args:
output_handlers (list/tuple/callable)[None]: Function or list of funcs that process executed events with results.
event_queue (Queue)[None]: Custom event queue for the event loop.
consumer_queue (Queue)[None]: Custom consumer queue for the consumer process.
initialize_process (function)[None]: Function to create and show widgets returning a dict of widgets and
variable names to save for use.
name (str)['main']: Event loop name. This name is passed to the event process and consumer process.
has_results (bool)[True]: Should this event loop create a consumer process to run executed events
through process_output.
"""
if name is None:
name = GLOBAL_NAME
global __loop__
if __loop__ is None:
__loop__ = DefaultEventLoop(name=name, event_queue=event_queue, consumer_queue=consumer_queue,
output_handlers=output_handlers, initialize_process=initialize_process,
has_results=has_results)
return __loop__
def add_output_handler(handler):
"""Add output handlers into the main global loop.
The handler must be a callable that returns a boolean. If the handler returns True no other handlers after will
be called.
Args:
handler (function/method): Returns True or False to stop propagating the event. Must take one event arg.
"""
global __loop__
if __loop__ is None:
__loop__ = DefaultEventLoop(GLOBAL_NAME)
return __loop__.add_output_handler(handler)
def insert_output_handler(index, handler):
"""Insert output handlers into the main global loop.
The handler must be a callable that returns a boolean. If the handler returns True no other handlers after will
be called.
Args:
index (int): Index position to insert the handler at.
handler (function/method): Returns True or False to stop propagating the event. Must take one event arg.
"""
global __loop__
if __loop__ is None:
__loop__ = DefaultEventLoop(GLOBAL_NAME)
return __loop__.insert_output_handler(index, handler)
def add_event(target, *args, has_output=None, event_key=None, cache=False, re_register=False, start_loop=True,**kwargs):
"""Add an event to the main global loop to be run in a separate process.
Args:
target (function/method/callable/Event): Event or callable to run in a separate process.
*args (tuple): Arguments to pass into the target function.
has_output (bool) [False]: If True save the executed event and put it on the consumer/output queue.
event_key (str)[None]: Key to identify the event or output result.
cache (bool) [False]: If the target object should be cached.
re_register (bool)[False]: Forcibly register this object in the other process.
start_loop (bool)[True]: If True start running the event loop.
**kwargs (dict): Keyword arguments to pass into the target function.
args (tuple)[None]: Keyword args argument.
kwargs (dict)[None]: Keyword kwargs argument.
"""
global __loop__
if __loop__ is None:
__loop__ = DefaultEventLoop(GLOBAL_NAME)
__loop__.add_event(target, *args, has_output=has_output, event_key=event_key, cache=cache,
re_register=re_register, **kwargs)
if start_loop and not __loop__.is_running():
__loop__.start()
def add_cache_event(target, *args, has_output=None, event_key=None, re_register=False, start_loop=True, **kwargs):
"""Add an event that uses cached objects to the main global loop.
Args:
target (function/method/callable/Event): Event or callable to run in a separate process.
*args (tuple): Arguments to pass into the target function.
has_output (bool) [False]: If True save the executed event and put it on the consumer/output queue.
event_key (str)[None]: Key to identify the event or output result.
re_register (bool)[False]: Forcibly register this object in the other process.
start_loop (bool)[True]: If True start running the event loop.
**kwargs (dict): Keyword arguments to pass into the target function.
args (tuple)[None]: Keyword args argument.
kwargs (dict)[None]: Keyword kwargs argument.
"""
global __loop__
if __loop__ is None:
__loop__ = DefaultEventLoop(GLOBAL_NAME)
__loop__.add_cache_event(target, *args, has_output=has_output, event_key=event_key,
re_register=re_register, **kwargs)
if start_loop and not __loop__.is_running():
__loop__.start()
def cache_object(*args, **kwargs):
"""Save an object in the separate processes, so the object can persist.
Args:
obj (object): Object to save in the separate process. This object will keep it's values between cache events
has_output (bool)[False]: If True the cache object will be a result passed into the output_handlers.
event_key (str)[None]: Key to identify the event or output result.
re_register (bool)[False]: Forcibly register this object in the other process.
"""
global __loop__
if __loop__ is None:
__loop__ = DefaultEventLoop(GLOBAL_NAME)
__loop__.cache_object(*args, **kwargs)
def is_running():
"""Return if the main global loop is running."""
global __loop__
return __loop__ is not None and __loop__.is_running()
def start():
"""Start running the main global loop."""
global __loop__
if __loop__ is None:
__loop__ = DefaultEventLoop(GLOBAL_NAME)
if not __loop__.is_running():
__loop__.start()
def run(events=None, output_handlers=None):
"""Run events on the global event loop and let the program continue.
Args:
events (list/tuple/Event): List of events to add to the event queue.
output_handlers (list/tuple/callable): Function or list of functions to add as an output handler.
"""
global __loop__
if __loop__ is None:
__loop__ = DefaultEventLoop(GLOBAL_NAME)
return __loop__.run(events=events, output_handlers=output_handlers)
def run_until_complete(events=None, output_handlers=None):
"""Run the global event loop until all of the events are complete.
Args:
events (list/tuple/Event): List of events to add to the event queue.
output_handlers (list/tuple/callable): Function or list of functions to add as an output handler.
"""
global __loop__
if __loop__ is None:
__loop__ = DefaultEventLoop(GLOBAL_NAME)
return __loop__.run_until_complete(events=events, output_handlers=output_handlers)
def wait():
"""Wait for the main global loop."""
global __loop__
if __loop__ is not None:
__loop__.wait()
def stop():
"""Stop the main global loop from running."""
global __loop__
if __loop__ is not None:
__loop__.stop()
def close():
"""Close the main global loop."""
global __loop__
if __loop__ is not None:
__loop__.close()
| true |
ecbfff56846546eaff8f394769d05ba84709d7ce | Python | ThorsteinnJonsson/SagaRNN | /main.py | UTF-8 | 1,837 | 2.703125 | 3 | [] | no_license | import argparse
from train import *
from generate import *
def get_args():
argparser = argparse.ArgumentParser()
argparser.add_argument('mode', type=str, help="Specify as \"train\" or \"generate\"")
argparser.add_argument('--pretrained_model', type=str, default="")
argparser.add_argument('--dataset_filename', type=str, default="data/icelandic_sagas.txt")
# Training-specific args
argparser.add_argument('--num_epochs', type=int, default=250)
argparser.add_argument('--batch_size', type=int, default=100)
argparser.add_argument('--chunk_len', type=int, default=200)
argparser.add_argument('--learning_rate', type=float, default=0.01)
# Generate-specific args
argparser.add_argument('--prediction_len', type=int, default=1000)
argparser.add_argument('--seed', type=str, default="A")
return argparser.parse_args()
def do_train(args):
print("Training...")
print("================================================")
trainer = Trainer()
# trainer = Trainer("saga_model.pt")
trainer.train(args.dataset_filename,
args.num_epochs,
args.batch_size,
args.chunk_len,
args.learning_rate)
print("================================================")
def do_generate(args):
print("Generating...")
print("================================================")
generate_sample(args.seed,
args.pretrained_model,
args.prediction_len,
args.dataset_filename)
print("================================================")
if __name__ == "__main__":
args = get_args()
if args.mode == "train":
do_train(args)
elif args.mode == "generate":
do_generate(args)
else:
print ("Mode \"" + args.mode + "\" not recognized. Please specify it as either \"train\" or \"generate\"") | true |
a244acf690876abf85f9114600dfacc7077dde85 | Python | coder9a/Python_Data_Structure | /Operations on Queue.py | UTF-8 | 965 | 4.71875 | 5 | [] | no_license | from Create_Queue import Queue
# Create empty Queue object
q = Queue()
# display menu
choice = 0
while choice<5:
print('Queue Operations')
print('1. Add Elements')
print('2. Delete Elements')
print('3. Search an Element')
print('4. Exit')
choice = int(input('Enter your choice : '))
if choice == 1:
element = float(input('Enter element : '))
q.add(element)
elif choice == 2:
element = q.delete()
if element == -1:
print('Queue is empty')
else:
print('Deleted element = ', element)
elif choice == 3:
element = input('Enter element : ')
pos = q.search(element)
if pos == -1:
print('Queue is empty ')
elif pos == -2:
print('Element not found in the Queue')
else:
print('Element found at position ', pos)
else:
break
print('Queue : ', q.display()) | true |
b0f270975eafa2c80727e7ce52ad34fb307d6d6d | Python | 540928898/LeetCodeMe | /Python/LeetCode/LeetCode61rotateList.py | UTF-8 | 506 | 2.734375 | 3 | [] | no_license |
from LeetCode.TreeProblem.TreeUtils import *
class Solution:
def rotateRight(self, head, k) :
if not head:
return head
N = 1
end = head
newHead = head
while(end.next):
N += 1
end = end.next
end.next = newHead
count = 1
xunhuan = N- k%N
while count <= xunhuan:
end = end.next
newHead = newHead.next
count += 1
end.next = None
return newHead
| true |
bea83c4aef079a51f645c60dbc93a5eded5dde37 | Python | Holovachko/Lab_7 | /ex 4.py | UTF-8 | 388 | 3.625 | 4 | [] | no_license | matrix = []
i = int(input('Кількість рядків = '))
j = int(input('кількість стовпців = '))
for m in range(i):
b = []
for m in range(j):
b.append(float(input('Введіть елементи матриці ')))
matrix.append(b)
for n in matrix:
if matrix.index(n)%2 !=0:
n.sort()
print(n)
else:
continue
| true |
460041b43b18b0be26ebb47f7bead64b265305d1 | Python | Revathy979/Vespa | /UllenAyya/src/cli/service/user_log_service.py | UTF-8 | 2,251 | 2.875 | 3 | [] | no_license | import time
import json
import os.path
from models.userlog import UserLog
from service.user_service import UserService
from datetime import datetime
class UserLogService(object):
"""
Manges User Log data
"""
user_logs = []
userservice=UserService()
def __init__(self):
if os.path.exists('data.json'):
with open("data.json") as out:
self.user_logs = json.load(out)
def punch_in(self, user_name):
user_log = UserLog(user_name,types ="in")
self.user_logs.append(user_log.as_serializable())
self.write_file()
return user_log.time_stamp
def punch_out(self, user_name):
user_log = UserLog(user_name,types ="out")
self.user_logs.append(user_log.as_serializable())
self.write_file()
return user_log.time_stamp
def get_by_user_name(self, user_name):
user_logs = list(filter(lambda x: x['user_name'] == user_name, self.user_logs))
date_time_log = list(map(self.map_date_time, user_logs))
logged_dates = list(map(lambda x: x['date'], date_time_log))
logged_unique_dates = list(dict.fromkeys(logged_dates))
user_timesheet = []
for date in logged_unique_dates:
in_times = [d["time"] for d in date_time_log if d['date'] == date and d["types"] == "in"]
out_times = [d["time"] for d in date_time_log if d['date'] == date and d["types"] == "out"]
out_time = "-"
if any(out_times):
out_time = max(out_times)
timesheet = {
"date" : date,
"in_time" : min(in_times),
"out_time" : out_time
}
user_timesheet.append(timesheet)
return user_timesheet
def write_file(self):
with open("data.json",'w') as out:
json.dump( self.user_logs, out)
def map_date_time(self, timestamp_log):
time_stamp = datetime.strptime(timestamp_log["time_stamp"], "%d/%m/%Y %H:%M:%S")
date_time_log={
"date":time_stamp.strftime("%d/%m/%Y"),
"time":time_stamp.strftime("%H:%M:%S"),
"types" :timestamp_log["types"]
}
return date_time_log
| true |
e5649292fb538ad3e1f162c5b519de66d7576722 | Python | hexalellogram/ict-but-python | /ICT-03/Easter.py | UTF-8 | 991 | 3.1875 | 3 | [] | no_license | class Easter:
def solveEaster(self):
a = self % 19
print("a = " + str(a))
b = self // 100
print("b = " + str(b))
c = self % 100
print("c = " + str(c))
d = b // 4
print("d = " + str(d))
e = b % 4
print("e = " + str(e))
f = (b + 8) // 25
print("f = " + str(f))
g = (b - f + 1) // 3
print("g = " + str(g))
h = (19 * a + b - d - g + 15) % 30
print("h = " + str(h))
i = c // 4
print("i = " + str(i))
k = c % 4
print("k = " + str(k))
r = (32 + 2 * e + 2 * i - h - k) % 7
print("r = " + str(r))
m = (a + 11 * h + 22 * r) // 451
print("m = " + str(m))
n = (h + r - 7 * m + 114) // 31
print("n = " + str(n))
p = (h + r - 7 * m + 114) % 31
print("p = " + str(p))
print("Easter in " + str(self) + " falls on " + (str(n)) + "/" + (str(p + 1)))
solveEaster(2020)
| true |
88d3ddaddbe583702406451c15ffc63c588895e3 | Python | ZhengWang1988/Git-Repository | /我的学习笔记/python进阶视频学习笔记.py | UTF-8 | 26,784 | 3.484375 | 3 | [] | no_license | # 如何在列表,字典,集合中根据条件筛选数据
from random import randint
data = [randint(-10,10) for _ in range(10)]
filter(lambda x:x >= 0, data)
[for x in data if x >= 0]
timeit filter(lambda x:x >= 0, data) #测试该步骤运行耗时
d = {x:randint(60,100) for x in range(1,21)} #学号:成绩
{k:v for k,v in d.iteritems() if v >= 90}
s = set(data)
{for x in s if x % 3 == 0}
# 如何为元组中的每个元素命名,提高程序可读性
students = ('Jim',22,'male','Jim@mail.com')
NAME,AGE,SEX,EMAIL = range(4)
print(students[NAME])
print(students[AGE])
from collections import namedtuple
namedtuple('Students',['name','age','sex','email'])
s1 = Students('Jim',22,'male','Jim@mail.com')
s2 = Students(name='Jim',age=22,sex='male',email='Jim@mail.com')
print(s1.name)
# 如何统计序列中元素出现的频度
from random import randint
data = [randint(0,20) for _ in range(30)]
c = dict.fromkeys(data,0)
for x in data:
c[x] += 1
from collections import Counter
c2 = Counter(data)
c2.most_common(3) #出现频度最高的前三个
# 筛选文章中单词出现频度最高的前十
import re
txt = open(filename).read()
c3 = Counter(re.split('\W+', txt))
c3.most_common(10)
# 如何根据字典中值的大小,对字典中的项排序
from random import randint
dicts = {k:randint(60,100) for k in 'abcdefg'}
# zip(dicts.values(),dicts.keys())
sorted(zip(dicts.itervalues(),dicts.iterkeys()))
sorted(dicts.items(),key=lambda x:x[1]) #把每一个元组传入sorted函数,并设置key为元组第二个值
# 如何快速找到多个字典中的公共键(key)
from random import randint,sample
sample('abcdefg', randint(3,6)) #随机从中取3--6个字符
s1 = {x:randint(1,4) for x in sample('abcdefg',randint(3,6))}
s2 = {x:randint(1,4) for x in sample('abcdefg',randint(3,6))}
s3 = {x:randint(1,4) for x in sample('abcdefg',randint(3,6))}
res = []
for k in s1:
if k in s2 and k in s3:
res.append(k)
s1.viewkeys & s2.viewkeys & s3.viewkeys #三个字典的公共键的集合
map(dict.viewkeys,[s1,s2,s3]) #由字典的键组成的列表
reduce(lambda a,b:a & b,map(dict.viewkeys,[s1,s2,s3]))
# 如何让字典保持有序?
from collections import OrderedDict
d = OrderedDict()
d['Jim'] = (1,24)
d['Lily'] = (2,27)
d['Leo'] = (3,31)
for k in d:
print(k)
=================================================
from time import time
from random import randint
from collections import OrderedDict
d = OrderedDict()
players = list('ABCDEFGH')
start = time()
for i in range(8):
input()
p = players.pop(randint(0,7 - i))
end = time()
print(i+1,p,end-start)
d[p] = (i+1,end-start)
print()
print('-'*30)
for k in d:
print(k,d[k])
=================================================
# 如何实现用户的历史记录功能(最多N条)
from collections import deque
N = randint(0,100)
history = deque([], 5) #长度为5的列表,遵循先进先出的原则
def guess(k):
if k == N:
print('right')
return True
if k < N:
print('less than N')
else:
print('greater than N')
return False
while True:
line = input('please input a number:')
if line.isdigit():
k = int(line)
history.append(k)
if guess(k):
break
elif line == 'history' or line == 'h?':
print(list(history))
import pickle
# pickle可以将队列(python对象)存入文件,再次运行程序时将其导入
pickle.dump(obj,open('history_file',w))
pickle.load(open('history_file'))
# 如何实现可迭代对象和迭代器对象
# 列表,字符串均为可迭代对象,可实现__iter__方法
lists = [1,2,3,4,5,6] #可迭代对象
iter(lists) #迭代器对象
=================================================
import requests
def getWeather(city):
r = requests.get('http://wthrcdn.etouch.cn/weather_mini?city=' + city)
data = r.json()['data']['forecast'][0]
return '%s : %s, %s ' % (city,data['low'],data['high'])
print(getWeather('北京'))
print(getWeather('上海'))
from collections import Iterable,Iterator
# 实现迭代器对象
class WeatherIterrator(Iterator):
def __init__(self,cities):
self.cities = cities
self.index = 0
def getWeather(self,city):
r = requests.get('http://wthrcdn.etouch.cn/weather_mini?city=' + city)
data = r.json()['data']['forecast'][0]
return '%s : %s, %s ' % (city,data['low'],data['high'])
def next(self):
if self.index == len(self.cities):
raise StopIteration
city = self.cities[self.index]:
self.index += 1
return self.getWeather(city)
# 实现可迭代对象
class WeatherIterable(Iterable):
def __init__(self,cities):
self.cities = cities
def __iter__(self):
return WeatherIterrator(self.cities)
=================================================
# 如何使用生成器函数实现可迭代对象
class PrimeNumber:
def __init__(self,start,end):
self.start = start
self.end = end
def isPrime(self,k): #判断传入的参数是否是素数
if k < 2:
return False
for i in range(2,k):
if k % i == 0:
return False
return True
def __iter__(self):
for k in range(self.start,self.end + 1):
if self.isPrime(k):
yield k #将这个范围内的所有值进行遍历,判断是否是素数,返回所有素数
# 如何进行反向迭代以及如何实现反向迭代
l = [1,2,3,4,5,6]
l.reverse() #会改变原列表
l[::-1] #会生成新列表,浪费内存
reversed(l) #生成一个反向的迭代器
class FloatRange:
def __init__(self,start,end,step=0.1):
self.start = start
self.end = end
self.step = step
def __iter__(self):
t = self.start
while t <= self.end:
yield t
t += self.step
def __reversed__(self):
t = self.end
while t >= self.start:
yield t
t -= self.step
# 如何对迭代器做切片操作
from itertools import islice
f = open(filename,'r',encoding='utf-8')
s = islice(f, 10,30) #生成一个迭代器(10行到30行)
islice(f,500) #从开始到500行
islice(f,10,None) #从第10行到最后
# 如何在一个for语句中迭代多个可迭代对象
from random import randint
yuwen = [randint(60,100) for _ in range(40)] #生成语文成绩列表
shuxue = [randint(60,100) for _ in range(40)]
english = [randint(60,100) for _ in range(40)]
# 内置函数zip,能将多个可迭代对象合并,每次迭代返回一个元组
zip([1,2,3,4],['a','b','c','d']) #[(1,'a'),(2'b),(3,'c'),(4,'d')]
for y,s,e in zip(yuwen,shuxue,english):
total.append(y + s + e)
# 标准库中的itertools.chain,能将多个可迭代对象连接
from itertools import chain
c1 = [randint(60,100) for _ in range(38)]
c2 = [randint(60,100) for _ in range(45)]
c3 = [randint(60,100) for _ in range(40)]
c4 = [randint(60,100) for _ in range(42)]
count = 0
for s in chain(c1,c2,c3,c4):
if s > 90:
count += 1
# 如何拆分含有多种分隔符的字符串
# res = s.split(',')
# map(lambda x:x.split('.'),res)
def mySplit(s,ds):
res = [s]
for d in ds:
t = []
map(lambda x:t.extend(x.split(d)),res)
res = t
return [for x in res if x] #过滤空字符串
s = r'a,b.c;d|e/fg/hij]kl(mn,opqr{stu,vw`xyz'
print(mySplit(s,',.;|/]({`\t'))
import re
re.split(r'[,.;|/]({`\t]+', s)
# 如何判断字符串a是否以字符串b开头或结尾?
import os,stat
os.listdir('.') #列出当前目录下所有文件的列表
s = 'go.sh'
s.endswith(('.sh','.py')) #判断s是以.sh或.py结尾
[name for name in os.listdir('.') if name.endswith(('.sh','.py'))]
os.stat('e.py') #列出该文件的状态信息
# oct(os.stat('e.py').st_mode) 以八进制形式展示文件权限信息
# 如何调整字符串中文本的格式 2016-10-01 --> 10/01/2016
import re
log = open(filename).read()
re.sub('(\d{4})-(\d{2})-(\d{2})', r'\2/\3/\1', log)
re.sub('(?P<year>\d{4})-(?P<month>\d{2})-(?P<day>\d{2})', r'\g<month>/\g<day>/\g<year>', log)
# 如何将多个小字符串拼接成一个大的字符串
lists = ['ab','cd','ef','gh','hi','jk']
s = ''
for i in lists:
s += i
# (内存开销较大,不建议使用)
''.join(lists) <推荐使用该方法>
list2 = ['ab','cd',123,45]
''.join((str(x) for x in list2)) #生成器方式进行join拼接
# 如何对字符串进行左,右,居中对齐?
s = 'abc'
s.ljust(20)
s.ljust(20, '=')
s.rjust(20)
s.center(30)
format(s,'<20')
format(s,'>20')
format(s,'^20')
d = {'apple':200,'google':350,'facebook':165,'Android OS':124}
m = max(map(len,d.keys())) #取各个键的最大长度
for k in d:
print(k.ljust(m),':',d[k])
# 如何去掉字符串中不需要的字符
s = ' abc 123 '
s.strip() #去掉两头的空白字符
s.lstrip() #去掉左边的空白
s.rstrip() #去掉右边的空白
s = '+++abc---'
s.strip('+-') #去掉'+' 和'-'
s = 'abc:123'
s[:3] + s[4:]
s = '\tabc\txyz'
s.replace('\t', '')
s = '\r\tabc\t\rbvc\n'
import re
re.sub('[\t\r\n]', '', s)
s = 'abc1234567xyz'
import string
string.maketrans('abcxyz','xyzabc') #制造字符的映射关系 a映射x b映射y
s.translate(string.maketrans('abcxyz','xyzabc'))
# xyz1234567abc
# 如何读写文本文件
f = open('py3.txt','wt',encoding='utf-8')
f.write('你好')
f.close()
f = open('py3.txt','rt',encoding='utf-8')
print(f.read())
# 如何处理二进制文件
f = open('demo.wav','rb')
info = f.read(44)
import struct
struct.unpack('h', info[22:24]) #音频文件声道数
struct.unpack('i', info[4:28]) #采样频率
import array
n = (f.tell() - 44) / 2
buf = array.array('h', (0 for _ in range(n)))
f.seed(44) #文件指针指向数据部分
f.readinto(buf) #将数据读入buf中
# open函数想以二进制模式打开文件,指定mode参数为'b'
# 二进制数据可以用readinto,读入到提前分配好的buffer中,便于数据处理
# 解析二进制数据可以使用标准库中的struct模块的unpack方法
# 如何设置文件的缓冲
全缓冲:
# 普通文件默认的缓冲区是4096个字节
f = open('demo.txt','w',buffering=2048)
f.write('a'*1024) #tail实时查看文件无显示内容
f.write('b'*1024) #依然无内容显示
f.write('c') #文件写入内容超出缓冲区设置大小,文本内容显示出来
行缓冲:
f = open('demo2.txt','w',buffering=1)
f.write('abc')
f.write('\n')
f.write('xyz\n')
无缓冲:
f = open('demo2.txt','w',buffering=0)
f.write('123')
# 如何将文件映射到内存
1.在访问某些二进制文件时,希望能把文件映射到内存中,可以实现随机访问. (framebuffer设备文件)
2.某些嵌入式设备,寄存器被编址到内训地址空间,我们可以映射/dev/mem某范围,去访问这些寄存器
3.如果多个进程映射同一个文件,还能实现进程通信的目的.
# 如何使用临时文件?
from tempfile import TemporaryFile,NamedTemporaryFile
f = TemporaryFile() #创建一个临时文件对象
f.write('hello,world' * 10000) #向临时文件写入临时数据
f.seek(0) #文件指针指向临时文件头部
f.read(100) #读取数据
# 系统中找不到该文件,只能有该临时文件的对象来访问
ntf = NamedTemporaryFile()
print(ntf.name)
# 系统中临时文件目录中可以找到该临时文件,创建新的临时文件时之前的会被删除,可设置默认参数delete=False来保存之前的临时文件
# 如何读写csv数据?
from urllib import urlretrieve
urlretrieve('http://table.finance.yahoo.com/table.csv?s=000001.sz','pingan.csv')
import csv
rf = open('pingan.csv','rb')
reader = csv.reader(rf)
header = (reader.next()) #逐行打印
wf = open('pingan.csv','wb')
writer = csv.writer(wf)
writer.writerow(header) #写入头部
writer.writerow(reader.next())
wf.flush() #保存到文件中
================================================
import csv
with open('pingan.csv','rb') as rf:
reader = csv.reader(rf)
with open('pingan2.csv','wb') as wf:
writer = csv.writer(wf)
headers = reader.next()
writer.writerow(headers)
for row in reader:
if row[0] < '2016-01-01':
break
if int(row[5]) >= 50000000:
writer.writerow(row)
print('writing end')
================================================
# 如何读写json数据
# json.dumps() 和 json.loads() 的参数是字典.
# json.dump() 和 json.load() 的参数是文件.
with open('dump.json','wb') as f:
json.dump({'a':1,'b':2,'c':3},f)
# 如何解析简单的XML文档
from xml.etree.ElementTree import parse
f = open('demo.xml')
et = parse(f)
root = et.getroot() #获取根节点
root.tag #获取元素标签
for child in root:
print(child.get('name'))
# 如何读写excel文件?
import xlrd,xlwt
book = xlrd.open_workbook('demo.xlsx')
sheet = book.sheet_by_index(0) #根据索引获取excel文件的sheet
sheet = book.sheet_by_name('sheetname') #根据sheet名获取Excel文件的sheet
print(sheet.nrows)
print(sheet.ncols)
print(sheet.cell(0,0))
print(sheet.row(1))
wbook = xlwt.Workbook()
wsheet = wbook.add_sheet('sheet1')
wsheet.write(row,col,label)
wbook.save('output.xlsx')
================================================
import xlrd,xlwt
rbook = xlrd.open_workbook('demo.xlsx')
rsheet = rbook.sheet_by_index(0)
nc = rsheet.ncols
rsheet.put_cell(0,nc,xlrd.XL_CELL_TEXT,'总分',None)
for row in range(1,rsheet.nrows):
total = sum(rsheet.row_values(row,1))
rsheet.put_cell(row,nc,xlrd.XL_CELL_TEXT,total,None)
wbook = xlwt.Workbook()
wsheet = wbook.add_sheet(rsheet.name)
style = xlwt.easyxf('align:vertical center,horizontal center')
for r in range(rsheet.nrows):
for c in range(rsheet.ncols):
wsheet.write(r,c,rsheet.cell_values(r,c),style)
wbook.save('output.xlsx')
================================================
# 如何派生内置不可变类型并修改其实例化行为?
# 实际案例:想自定义一种新类型的元组,对于传入的可迭代对象,只保留其中int类型且值大于0的元素,例如:IntTuple([1,-1,'abc',6,['x','y'],3]) ==> (1,6,3) 要求IntTuple是内置tuple的子类,如何实现?
class IntTuple(tuple):
def __new__(cls,iterable):
g = (x for x in iterable if isinstance(x,int) and x > 0)
return super(IntTuple,cls).__new__(cls,g)
def __init__(self,iterable):
super(IntTuple,self).__init__(iterable)
# 如何为创建大量实例节省内存?
# 解决方案:定义类的__slots__属性,它是用来声明实例属性名字的列表.
import sys
sys.getsizeof(object, default) #查看对象的消耗内存大小
class Player(object):
__slots__ = ['id','name','age','job'] #绑定实例化属性,属性实例化之后无法拓展,达到节省内存消耗的目的
def __init__(self,id,name,age,job):
self.id = id
self.name = name
self.age = age
self.job = job
# 如何让对象支持上下文管理?
# 实际案例:我们实现了一个telnet客户端的类TelnetClient,调用实例的start()方法启动客户端与服务器交互,交互完毕后需调用cleanup()方法,关闭已连接的socket,以及将操作历史记录写入文件并关闭. 能否让TelnetClient的实例支持上下文管理协议,从而替代手工调用cleanup()方法???
解决方案:实现上下文管理协议,需定义实例的__enter__,__exit__方法,它们分别在with开始和结束时被调用
from telnetlib import Telnet
from sys import stdin,stdout
from collections import deque
class TelnetClient(object):
def __init__(self,addr,port=23):
self.addr = addr
self.port = port
self.tn = None
def start(self):
self.tn = Telnet(self.addr,self.port)
self.history = deque()
# user
t = self.tn.read_until('login:')
stdout.write(t)
user = stdin.readline()
self.tn.write(user)
# password
t = self.tn.read_until('Password:')
if t.startswith(user[:-1]):t = t[len(user) + 1:]
stdout.write(t)
self.tn.write(stdin.readline())
t = self.tn.read_until('$ ')
stdout.write(t)
while True:
uinput = stdin.readline()
if not uinput:
break
self.history.append(uinput)
self.tn.write(uinput)
t = self.tn.read_until('$ ')
stdout.write(t[len(uinput) + 1:])
# def cleanup(self):
# self.tn.close()
# self.tn = None
# with open(self.addr + '_history.txt','w') as f:
# f.writelines(self.history)
def __enter__(self):
self.tn = Telnet(self.addr,self.port)
self.history = deque()
return self
def __exit__(self,exc_type,exc_val,exc_tb):
self.tn.close()
self.tn = None
with open(self.addr + '_history.txt','w') as f:
f.writelines(self.history)
with TelnetClient('127.0.0.1') as client:
client.start()
# 如何创建可管理的对象属性?
# 使用调用方法在形式上不如访问属性简洁,能否在形式上是属性访问,实际上是调用方法?
# 解决方案:使用property函数为类创建可管理属性,fget/fset/fdel对应相应属性
from math import pi
class Circle(object):
def __init__(self,radius):
self.radius = radius
def getRadius(self):
return self.radius
def setRadius(self,value):
if not isinstance(value, (int, long, float)):
raise ValueError('wrong type.')
self.radius = float(value)
def getArea(self):
return self.radius ** 2 * pi
R = property(getRadius, setRadius)
# 如何让类支持比较操作
# 解决方案:比较符号运算重载,需要实现以下方法:__lt__,__le__,__gt__,__ge__,__eq__,__ne__ .
class Rectangle(object):
def __init__(self,w,h):
self.h = h
self.w = w
def area(self):
return self.w * self.h
def __lt__(self,obj):
return self.area() < obj.area()
def __le__(self,obj):
return self.area() <= obj.area()
def __gt__(self,obj):
return self.area() > obj.area()
def __ge__(self,obj):
return self.area() >= obj.area()
def __eq__(self,obj):
return self.area() == obj.area()
def __ne__(self,obj):
return self.area() != obj.area()
# 使用标准库下的functools下的类装饰器可以简化此过程
from functools import total_ordering
@total_ordering
class Rectangle(object):
def __init__(self,w,h):
self.h = h
self.w = w
def area(self):
return self.w * self.h
def __eq__(self,obj):
return self.area() == obj.area()
def __lt__(self,obj):
return self.area() < obj.area()
# 如何使用描述符对实例属性做类型检查?
# 实际案例:在某项目中,实现了一些类,希望能像静态类型语言那样对实例属性做类型检查
# 要求:1 可以对实例变量名指定类型 2 赋予不正确的类型时抛出异常
# 解决方案:使用描述符来实现需要类型检查的属性:分别实现__get__,__set__,__delete__ 方法,在__set__内使用isinstance函数做类型检查
class Attr(object):
"""docstring for Attr"""
def __init__(self, name,type_):
'''定义属性和对应的类型'''
self.name = name
self.type = type_
def __get__(self,instance,cls):
return instance.__dict__[self.name]
def __set__(self,instance,value):
if not isinstance(value,self.type_):
raise TypeError('expected an %s' % self.type_)
instance.__dict__[self.name] = value
def __delete__(self,instance):
del instance.__dict__[self.name]
class Person(object):
name = Attr('name', str)
age = Attr('age', int)
height = Attr('height', float)
# 如何在环状数据结构中管理内存?
# python中垃圾回收器通过引用计数来回收垃圾对象,某些环状数据结构存在对象间的循环引用,同时del掉引用的节点,两个对象不能被立即回收,该如何解决?
# 解决方案:使用标准库weakref,可以创建一种能访问对象但不增加引用计数的对象(类似于Objective-C中的弱引用)
import sys
sys.getrefcount(object) #查看对象的引用计数
import weakref
class Data(object):
"""docstring for Data"""
def __init__(self, value,owner):
# self.owner = owner
self.owner = weakref.ref(owner)
self.value = value
def __str__(self):
return "%s's data,value is %s" % (self.owner,self.value)
def __del__(self):
print('in Data.__del__')
class Node(object):
def __init__(self,value):
self.data = Data(value, self)
def __del__(self):
print('in Node.__del__')
node = Node(100)
del node
# 如何通过实例方法名字的字符串调用方法
# 实际案例:项目中代码使用了三个不同库中的图形类:Circle,Triangle,Rectangle. 每个类都有一个获取图形面积的接口(方法),但接口名字不同,我们可以实现一个统一的获取面积的函数,使用每种方法名进行尝试,调用相应类的接口
# 解决方案:1 使用内置函数getattr,通过名字在实例上获取方法对象,然后调用. 2 使用标准库中的operator下的methodcaller函数调用
class Circle(object):
pass
class Triangle(object):
pass
class Rectangle(object):
pass
def getArea(shape):
for name in ('area','getarea','get_area'):
f = getattr(shape, name, None)
if f:
return f()
s1 = Circle(2)
s2 = Rectangle(3,3)
s3 = Triangle(2,3,4)
shapes = [s1,s2,s3]
print(getArea,shapes)
from operator import methodcaller
s = 'abc123abc321'
s.find('abc', 3)
methodcaller('find','abc',4)
# 如何使用多线程?
====================================================
import csv
from xml.etree.ElementTree import Element,ElementTree
import requests
from StringIO import StringIO
from xml_pretty import pretty
def download(url):
response = requests.get(url,timeout=3)
if response.ok:
return StringIO(response.content)
def csvToXml(scsv,fxml):
reader = csv.reader(scsv)
headers = reader.next()
headers = map(lambda h:h.replace(' ',''),headers)
root = Element('Data')
for row in reader:
eRow = Element('Row')
root.append(eRow)
for tag,text in zip(headers,row):
e = Element(tag)
e.text = text
eRow.append(e)
pretty(root)
et = ElementTree(root)
et.write(fxml)
# if __name__ == "__main__":
# url = "http://table.finance.yahoo.com/table.csv?s=000001.sz"
# rf = download(url)
# if rf:
# with open('000001.xml','wb') as wf:
# csvToXml(rf,wf)
====================================================
def handle(sid):
print('Downloading...(%d)' % sid)
url = "http://table.finance.yahoo.com/table.csv?s=%s.sz"
url %= str(sid).rjust(6, '0') #股票代码000001,只需传入1即可,其它数字自动以0填充
rf = download(url)
if rf is None:return
print('Covert to XML...(%d)' % sid)
fname = str(sid).rjust(6, '0') + '.xml'
with open(fname,'wb') as wf:
csvToXml(rf, wf)
from threading import Thread
# 方法1:
t = Thread(target=handle,args=(1,))
t.start()
print('main thread')
# 方法2:
class MyThread(Thread):
def __init__(self,sid):
Thread.__init__(self)
self.sid = sid
def run(self):
handle(self.sid)
threadList = []
for i in range(1,11)
t = MyThread(i)
threadList.append(t)
t.start()
for t in threadList:
t.join() #阻塞函数,会让子线程全部退出之后主线程再退出
print('main thread')
# 如何线程间通信?
====================================================
import csv
from xml.etree.ElementTree import Element,ElementTree
import requests
from StringIO import StringIO
from xml_pretty import pretty
from threading import Thread
from Queue import Queue
class DownloadThread(Thread): #下载线程类
def __init__(self,sid):
Thread.__init__(self)
self.sid = sid
self.url = "http://table.finance.yahoo.com/table.csv?s=%s.sz"
self.url %= str(sid).rjust(6, '0')
def download(self,url):
response = requests.get(url,timeout=3)
if response.ok:
return StringIO(response.content)
def run(self): #线程入口方法
print('Download',self.sid)
data = self.download(self.url)
self.queue.put((self.sid,data))
class ConvertThread(Thread):
def __init__(self,queue):
Thread.__init__(self)
self.queue = queue
def csvToXml(self,scsv,fxml):
reader = csv.reader(scsv)
headers = reader.next()
headers = map(lambda h:h.replace(' ',''),headers)
root = Element('Data')
for row in reader:
eRow = Element('Row')
root.append(eRow)
for tag,text in zip(headers,row):
e = Element(tag)
e.text = text
eRow.append(e)
pretty(root)
et = ElementTree(root)
et.write(fxml)
def run(self):
while True:
sid,data = self.queue.get()
print('Convert',sid)
if sid == -1:
break
if data:
fname = str(sid).rjust(6, '0') + '.xml'
with open(fname,'wb') as wf:
self.csvToXml(data, wf)
q = Queue()
dThreads = [DownloadThread(i,q) for i in range(1,11)]
cThread = ConvertThread(q)
for t in dThreads:
t.start()
cThread.start()
for t in dThreads:
t.join()
q.put(-1,None)
====================================================
# 如何使用函数装饰器?
# 定义装饰器函数,用来生成一个在原函数基础添加了新功能的函数,替代原函数
def memo(func):
cache = {}
def wrap(*args):
if args not in cache:
cache[args] = func(*args)
return cache[args]
return wrap
@memo
def fibonacci(n):
"""斐波那契数列"""
return 1 if n <= 1 else fibonacci(n-1) + fibonacci(n-2)
# 没有装饰器函数,需修改原函数以提高运算效率:
def fibonacci2(n,cache=None):
if cache is None:
cache = {} #创建新字典
if n in cache:
return cache[n] #返回n的值
if n <= 1:
return 1
cache[n] = fibonacci2(n-1,cache) + fibonacci2(n-2,cache)
# 10个台阶的楼梯,从下面走到上面,一次只能迈1-3个台阶,且不能后退,走完楼梯共有多少种方法.
@memo
def climb(n,steps):
count = 0
if n == 0:
count = 1
elif n > 0:
for step in steps:
count += climb(n-step, steps)
return count
如何为被装饰的函数保存元数据?
# f.__name__ 函数的名字
# f.__doc__ 函数的文档字符串
# f.__moudle__ 函数所属模块名
# f.__dict__ 默认字典
# f.__defaults__ 默认参数元组
# 使用装饰器后,再使用上面这些属性访问时,看到的是内部包裹函数的元数据,原来函数的元数据便丢失掉了,应该如何解决?
====================================================
from functools import update_wrapper,wraps
def mydecrator(func):
@wraps(func)
def wrapper(*args,*kargs):
'''wrapper function'''
print('In wrapper!')
func(*args,*kargs)
# update_wrapper(wrapper, func,('__name__','__doc__',''),('__dict__'))
return wrapper
@mydecrator
def example():
'''example function'''
print('In example')
print(example.__name__)
print(example.__doc__)
====================================================
# 如何实现属性可修改的函数装饰器?
为分析程序内哪些函数执行时间开销较大,定义一个带timeout的函数装饰器,装饰功能如下: 1 统计被装饰函数单次调用运行时间 2 时间大于参数timeout的,将此次函数调用记录到log日志中 3 运行时可修改timeout的值.
from functools import wraps
import time
import logging
def warn(timeout):
def decorator(func):
def wrapper(*args,**kargs):
start = time.time()
res = func(*args,**kargs)
used = time.time() - start
if used > timeout:
msg = '%s:%s > %s' % (func.__name__,used,timeout)
logging.warn(msg)
return res
def setTimeout(k):
nonlocal timeout
timeout = k
wrapper.setTimeout = setTimeout
return wrapper
return decorator
from random import randint
@warn(1.5)
def test():
print("In test")
while randint(0, 1):
time.sleep(0.5)
for _ in range(30):
test()
| true |
4f2353ab1f7233791505d55dd371c23be599b0c3 | Python | shuuwook/PyMesh | /scripts/svg_to_mesh.py | UTF-8 | 909 | 2.65625 | 3 | [] | no_license | #!/usr/bin/env python
""" Convert a svg file into 2D triangle mesh.
"""
import argparse
import pymesh
import numpy as np
def parse_args():
parser = argparse.ArgumentParser(__doc__);
parser.add_argument("input_svg");
parser.add_argument("output_mesh");
return parser.parse_args();
def main():
args = parse_args();
wires = pymesh.wires.WireNetwork();
wires.load_from_file(args.input_svg);
vertices, edges, __ = pymesh.remove_duplicated_vertices_raw(wires.vertices, wires.edges, 0.0);
wires.load_from_data(vertices, edges);
tri = pymesh.triangle();
tri.points = wires.vertices;
tri.segments = wires.edges;
tri.run();
mesh = tri.mesh;
regions = tri.regions;
mesh.add_attribute("regions");
mesh.set_attribute("regions", regions);
pymesh.save_mesh(args.output_mesh, mesh, *mesh.attribute_names);
if __name__ == "__main__":
main();
| true |
07b79416c4b0beb3a0996ae59e064c66fd246789 | Python | ikunikunkun/Python_Calculation_Method | /code/2/_2_6.py | UTF-8 | 1,507 | 3.03125 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
"""
import numpy as np
import matplotlib.pyplot as plt
def _2_6(X, Y, ml, mr):
H = [X[i] - X[i-1] for i in range(1, len(X))]
alpha = [H[j-1]/(H[j-1]+H[j]) for j in range(1, len(H))]
beta = [H[j]/(H[j-1]+H[j]) for j in range(1, len(H))]
D = [3*(beta[j-1]/H[j-1]*(Y[j] - Y[j-1]) + alpha[j-1]/H[j]*(Y[j+1] - Y[j])) for j in range(1,len(H))]
M = [None]*len(X)
M[0], M[-1] = ml, mr
A = []
b = []
for j in range(1, len(H)):
if M[j-1] != None:
A.append([2, alpha[j-1]])
b.append(D[j-1] - beta[j-1]*M[j-1])
if M[j+1] != None:
A.append([beta[j-1], 2])
b.append(D[j-1] - alpha[j-1]*M[j+1])
A = np.mat(A)
b = np.mat(b).T
M[1:-1] = [float(i) for i in np.linalg.solve(A,b)]
return H, M
# Solve the equation
def foo(X, Y, H, M, x):
for j in range(len(X)):
if x < X[j]:
break
j = j-1
y = (1+2*(x - X[j])/H[j])*((x - X[j+1])/H[j])**2*Y[j] + \
(1-2*(x - X[j+1])/H[j])*((x - X[j])/H[j])**2*Y[j+1] + \
(x - X[j])*((x - X[j+1])/H[j])**2*M[j] + \
(x - X[j+1])*((x - X[j])/H[j])**2*M[j+1]
return y
X = [-1, 0, 1, 2]
Y = [0, 0.5, 2, 1.5]
ml = 0.5
mr = -0.5
H, M = _2_6(X, Y, ml, mr)
print("M:", M)
x = 1.5
y = foo(X, Y, H, M, x)
print(y)
xx = [i for i in np.arange(-0.99, 1.99, 0.01)]
yy = [foo(X, Y, H, M, x) for x in xx]
plt.plot(X, Y, 'o')
plt.plot(xx, yy, '-')
plt.show() | true |
5d4d2770cbd791394f35d7ec700d6be7e79a4d2f | Python | gwax/mtg_ssm | /mtg_ssm/serialization/interface.py | UTF-8 | 2,473 | 2.703125 | 3 | [
"MIT"
] | permissive | """Interface definition for serializers."""
import abc
from pathlib import Path
from typing import ClassVar, Dict, List, Optional, Set, Tuple, Type
from mtg_ssm.containers.collection import MagicCollection
from mtg_ssm.containers.indexes import Oracle
class Error(Exception):
"""Base error for serializers."""
class UnknownDialect(Exception):
"""Raised when an (extension, dialect) pair is requested."""
class DeserializationError(Error):
"""Raised when there is an error reading counts from a file."""
class SerializationDialect(metaclass=abc.ABCMeta):
"""Abstract interface for mtg ssm serialization dialect."""
_EXT_DIALECT_DOC: ClassVar[Set[Tuple[str, str, str]]] = set()
_EXT_DIALECT_TO_IMPL: ClassVar[
Dict[Tuple[str, str], Type["SerializationDialect"]]
] = {}
extension: ClassVar[Optional[str]] = None
dialect: ClassVar[Optional[str]] = None
def __init_subclass__(cls: Type["SerializationDialect"]) -> None:
super().__init_subclass__()
if cls.extension is not None and cls.dialect is not None:
cls._EXT_DIALECT_DOC.add(
(cls.extension, cls.dialect, cls.__doc__ or cls.__name__)
)
cls._EXT_DIALECT_TO_IMPL[(cls.extension, cls.dialect)] = cls
@abc.abstractmethod
def write(self, path: Path, collection: MagicCollection) -> None:
"""Write print counts to a file."""
@abc.abstractmethod
def read(self, path: Path, oracle: Oracle) -> MagicCollection:
"""Read print counts from file."""
@classmethod
def dialects(
cls: Type["SerializationDialect"],
) -> List[Tuple[str, Optional[str], Optional[str]]]:
"""List of (extension, dialect, description) of registered dialects."""
return sorted(
(ext, dial or "", doc or "") for ext, dial, doc in cls._EXT_DIALECT_DOC
)
@classmethod
def by_extension(
cls: Type["SerializationDialect"],
extension: str,
dialect_mappings: Dict[str, str],
) -> Type["SerializationDialect"]:
"""Get a serializer class for a given extension and dialect mapping."""
dialect = dialect_mappings.get(extension, extension)
try:
return cls._EXT_DIALECT_TO_IMPL[(extension, dialect)]
except KeyError as err:
raise UnknownDialect(
f'File extension: "{extension}" dialect: "{dialect}" not found in registry'
) from err
| true |
7c1b88e55f88e59e04648899efa271d665664283 | Python | 1053274270/python | /爬虫/test.py | UTF-8 | 1,037 | 2.828125 | 3 | [] | no_license | import requests
import re
import time
import os,sys
#url='http://www.14epep.com/vodata/1608/play.html?1608-0-1'
headers = {
'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36'
}
def star():
urls=['http://www.14epep.com/arts/{}.html'.format(str(i)) for i in range(393293,393299)]
tp=re.compile(r'src.+?"(.+?jpg)"')
for url in urls:
print(url)
html=requests.get(url,headers=headers)
jpg=re.findall(tp,html.text)
save(jpg)
def save(urls):
i=0
for url in urls:
print(url)
try:
tupian=requests.get(url,timeout=20)
img=tupian.content
with open('F:\\刘畅\\学习\\html\\tp\\'+str(0)+'\\'+str(i)+'.jpg','wb') as f:
f.write(img)
print(str(i)+',ok')
i+=1
except :
print('no')
pass
if __name__=='__main__':
print(1)
star()
print(1)
| true |
0ffc156f4de6e4d9b916276507a7b04067c69d9c | Python | FarmerB05/ProjectNegative42 | /gui/text.py | UTF-8 | 1,015 | 3.34375 | 3 | [] | no_license | import pygame
class Text:
def __init__(self, screen, text, rect, underline=False, font='chalkduster.ttf', font_size=25, font_color=None):
self.screen = screen
self.text = text
self.rect = rect
self.font = font
self.font_size = font_size
if font_color is None:
self.font_color = (255, 255, 255)
else:
self.font_color = font_color
# text
pygame.font.init()
self.font = pygame.font.SysFont(self.font, self.font_size)
if underline:
self.font.set_underline(True)
self.font_text = self.font.render(str(self.text), True, self.font_color)
def update(self):
self.font_text = self.font.render(str(self.text), True, self.font_color)
def draw(self):
text_rect = (self.font_text.get_width(), self.font_text.get_height())
self.screen.blit(self.font_text, (self.rect[0] + self.rect[2]/2 - text_rect[0]/2, self.rect[1] + self.rect[3]/2 - text_rect[1]/2))
| true |
93e4f4009a070b97415f44c6b01351e4283ce94b | Python | TeaCanMakeMeDrunk/homework | /code/test--6.py | UTF-8 | 2,550 | 3.296875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 18 11:36:39 2018
练习6:
1.显示4个商品然后打印分割线,只要第一个36个商品信息
2.列出36个商品
3.获取所有的商品价格并且给商品排序,从高到底
4.按照销量排序
5.商品过滤,只要15天退款或者包邮的商品信息,显示
@author: admin558
"""
url='https://s.taobao.com/search?q=%E8%A3%99%E5%AD%90&imgfile=&commend=all&ssid=s5-e&search_type=item&sourceId=tb.index&spm=a21bo.2017.201856-taobao-item.1&ie=utf8&initiative_id=tbindexz_20170306&s=0&ajax=true'
import urllib.request as r#导入联网工具包,名为为r
data=r.urlopen(url).read().decode('utf-8','ignore')
import json#将字符串转换为字典
data=json.loads(data)
length=len(data['mods']['itemlist']['data']['auctions'])
#打印36个商品信息,每四个打印一个分割线
def printMods():
b=0
for i in range(length):
print(data['mods']['itemlist']['data']['auctions'][i]['view_price'])
print(data['mods']['itemlist']['data']['auctions'][i]['view_sales'])
print(data['mods']['itemlist']['data']['auctions'][i]['title'])
print(data['mods']['itemlist']['data']['auctions'][i]['nick'])
print(data['mods']['itemlist']['data']['auctions'][i]['item_loc'])
b+=1
if b%4==0:
print('----------------------------------------------------------')
#调用方法
printMods()
#获取所有的商品并且给商品排序,从高到底
def sortAll(value,name):
priceList=[]
for i in range(length):
if(name == '商品销量'):
priceList.append(int(data['mods']['itemlist']['data']['auctions'][i][value][0:-3]))
else:
priceList.append(float(data['mods']['itemlist']['data']['auctions'][i][value]))
print(name+'排序,从高到底:')
sortList=sorted(priceList)
print(list(reversed(sortList)))
#调用方法
sortAll('view_price','商品价格')
sortAll('view_sales','商品销量')
print('-----------------------------------------------------------------------------------')
#商品过滤,只要15天退款或者包邮的商品信息,显示
print('商品信息:')
for i in range(15):
price=float(data['mods']['itemlist']['data']['auctions'][i]['view_price'])
if price>80 and price<498:
print('商品价格大于100')
elif price<60 and price>40:
continue
print('商品价格在40到60之间')
else:
while price==499.0:
print('最大商品价格是499.0')
break
| true |
07028a5b157c0874c540bb954459fab508ef38ea | Python | E2057SalihPoyraz/Python-Daily_Challenges | /corresponding-column-title.py | UTF-8 | 1,002 | 3.8125 | 4 | [] | no_license | # QUESTION: Level of Interview Question = Easy
# Given a positive integer, return its corresponding column title as appear in an Excel sheet.
# For example:
# 1 -> A
# 2 -> B
# 3 -> C
# ...
# 26 -> Z
# 27 -> AA
# 28 -> AB
# ...
# Example 1:
# Input: 1; Output: "A"
# Example 2:
# Input: 28; Output: "AB"
# Example 3:
# Input: 701; Output: "ZY"
# çözüm-1:
def reply(input):
letters = ["A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z"]
num = list(range(1,27))
data = dict(zip(num, letters))
if input < 27 :
return data[input]
else:
first = data[input // len(letters)]
second = data[input % len(letters)]
return first+second
# çözüm-2:
def convertToTitle(n):
result = ""
while n > 26:
result = chr(65+(n-1)%26) + result
n = (n-1) // 26
return chr(64+n) + result | true |
ff8f80c5fd2fac88c3404d2f93c17b2508e7720d | Python | z3ntu/ReturnApplication | /main.py | UTF-8 | 2,667 | 2.546875 | 3 | [] | no_license | from __future__ import print_function
from time import sleep
import sys
import os
import requests
import subprocess
import logging
if sys.version_info >= (3, 0):
print("Python 3")
import configparser
else:
print("Python 2")
import ConfigParser
def works_again(batch_success_again):
print("Works again.")
logging.warning("Works again.")
if os.path.isfile(batch_success_again):
subprocess.call(batch_success_again)
else:
print("Not calling binary because it was not found: " + batch_success_again)
def fail(batch_fail):
if last_status != "1":
print("Fail!")
logging.error("Failed!")
if os.path.isfile(batch_fail):
subprocess.call(batch_fail)
else:
print("Not calling binary because it was not found: " + batch_fail)
else:
print("Fail but not calling script")
def main():
if sys.version_info >= (3, 0):
# Python 3
config = configparser.ConfigParser()
config.read('config.ini')
delay = int(config['DEFAULT']['delay'])
batch_success_again = config['DEFAULT']['batch_success_again']
url = config['DEFAULT']['url']
batch_fail = config['DEFAULT']['batch_fail']
else:
# Python 2
config = ConfigParser.ConfigParser()
config.read('config.ini')
delay = int(config.get("DEFAULT", "delay"))
batch_success_again = config.get("DEFAULT", "batch_success_again")
url = config.get("DEFAULT", "url")
batch_fail = config.get("DEFAULT", "batch_fail")
last_status = "0"
logging.warning("Started.")
if os.path.isfile(batch_success_again):
subprocess.call(batch_success_again)
else:
print("Not calling binary because it was not found: " + batch_success_again)
# Main loop
while True:
try:
# get status
response = requests.get(url)
# trim output
status = response.text.replace("\n", "")
except Exception as e:
print("Exception while getting the status. Treating as fail.")
print(e)
status = 1
# check current status
if status == "1": # fail
fail(batch_fail)
elif last_status == "1": # success again
works_again(batch_success_again)
else:
print("Works.")
# save current status for next iteration
last_status = status
# sleep for specificed duration
sleep(delay)
if __name__ == '__main__':
logging.basicConfig(filename='logging.log', level=logging.WARNING, format='%(asctime)s %(message)s')
main()
| true |
befe22c9e3107187bdf91a2a310a82e16fbe65ae | Python | MeetGandhi/Computing | /Programs27Oct/writedata.py | UTF-8 | 395 | 3.625 | 4 | [] | no_license | # Write data to File
f=open("Input.txt","r")
of=open("Output.txt","w")
header = f.readline() # Does not contain actual info about marks
of.write(header)
for line in f:
lst=line.split()
n=len(line)-1
total=int(lst[2])+int(lst[3]) #int() converts TO integer
newline=line[:n]+" "+str(total)+"\n" #str() converts TO string
of.write(newline)
f.close()
of.close()
| true |
45bea9ab77e8f51d6bab851408def63261e9fe95 | Python | root221/scan | /stitch.py | UTF-8 | 5,332 | 2.734375 | 3 | [] | no_license | import cv2
import numpy as np
import pickle
MIN_MATCH_COUNT = 200
class Stitcher:
def __init__(self,img_list=None,H_lst=None):
self.img_list = img_list
self.H_lst = H_lst
def stitch_all_images(self):
if len(self.img_list) == 1:
for i in range(len(self.img_list[0])-1):
img1 = self.img_list[0][i]
img2 = self.img_list[0][i+1]
if not self.H_lst:
H = self.find_homography(img1,img2)
else:
H = self.H_lst[0][i]
(result,offsety) = self.stitch(img1,img2,"horizontal",H)
self.img_list[0][i+1] = result
return self.img_list[len(self.img_list)-1][len(self.img_list[0])-1]
def stitch(self,img1,img2,direction,H,blend=1):
top_left = np.dot(H,np.array([0,0,1]))
top_right = np.dot(H,np.array([img2.shape[1],0,1]))
top_right = top_right / top_right[-1]
bottom_left = np.dot(H,np.array([0,img2.shape[0],1]))
bottom_left = bottom_left / bottom_left[-1]
bottom_right = np.dot(H,np.array([img2.shape[1],img2.shape[0],1]))
bottom_right = bottom_right / bottom_right[-1]
if(direction == "horizontal"):
# warp image left to right
height = int(min(bottom_right[1],bottom_left[1]))
result = cv2.warpPerspective(img2,H,(int(min(bottom_right[0],top_right[0])),height ))
offset_y = int(max(top_right[1],top_left[1]))
if offset_y < 0:
offset_y = 0
# get two overlap subimages
overlap_left = int(max(top_left[0],bottom_left[0]))
overlap_right = img1.shape[1]
# height - 1 ???
subimg2 = result[offset_y:height-1,overlap_left:overlap_right].copy()
subimg1 = img1[offset_y:height-1,overlap_left:overlap_right].copy()
# alpha blending two overlap image
overlap_width = overlap_right - overlap_left
dst = subimg2.copy()
for j in range(10):
alpha = j * 0.1
a = subimg1[:,(j * overlap_width/10) : ((j+1) * overlap_width/10)]
b = subimg2[:,(j * overlap_width/10) : ((j+1) * overlap_width/10)]
dst[:,(j * overlap_width/10) : ((j+1) * overlap_width/10)] = cv2.addWeighted(a,1 - alpha,b,alpha,0)
min_height = min(result.shape[0],img1.shape[0])
result[0:min_height, 0:img1.shape[1]] = img1[0:min_height,0:img1.shape[1]]
result[offset_y:height-1,overlap_left:overlap_right] = dst
else:
# warp image top to bottom
bottom = int(min(bottom_right[1],bottom_left[1]))
result = cv2.warpPerspective(img2,H,(img1.shape[1],bottom))
# get two overlap subimages
overlap_top = int(max(top_right[1],top_left[1]))
overlap_bottom = img1.shape[0]
subimg2 = result[overlap_top:overlap_bottom,0:img2.shape[1]].copy()
subimg1 = img1[overlap_top:overlap_bottom,0:img2.shape[1]].copy()
# alpha blending two overlap image
overlap_height = overlap_bottom - overlap_top
delta = overlap_height / 10
dst = subimg2.copy()
for j in range(10):
alpha = j * 0.1
a = subimg1[ j * delta : (j+1) * delta,:]
b = subimg2[ j * delta : (j+1) * delta,:]
dst[j * delta : (j+1) * delta,:] = cv2.addWeighted(a,1 - alpha,b,alpha,0)
#dst[j * delta : (j+1) * delta,:] = cv2.addWeighted(a,1 ,b,0,0)
# paste img1 to result image
result[0:img1.shape[0], 0:img1.shape[1]] = img1[0:img1.shape[0],0:img1.shape[1]]
# paste overlap(blend) region to result image
result[overlap_top:overlap_bottom,0:img2.shape[1]] = dst
offset_y = 0
return (result,offset_y)
def find_homography(self,img1,img2):
# get detector and descriptor
detector = cv2.FeatureDetector_create("SIFT")
extractor = cv2.DescriptorExtractor_create("SIFT")
gray = cv2.cvtColor(img1,cv2.COLOR_BGR2GRAY)
# finds the keypoint in the image
kps = detector.detect(gray)
(kp1,des1) = extractor.compute(gray,kps)
gray = cv2.cvtColor(img2,cv2.COLOR_BGR2GRAY)
# finds the keypoint in the image
kps = detector.detect(gray)
(kp2,des2) = extractor.compute(gray,kps)
# match key point
bf = cv2.BFMatcher()
matches = bf.knnMatch(des1,des2, k=2)
# Apply ration test
good = []
for m,n in matches:
if m.distance < 0.8*n.distance:
good.append(m)
if len(good) > MIN_MATCH_COUNT:
src_pts = np.float32([ kp2[m.trainIdx].pt for m in good]).reshape(-1,1,2)
dst_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2)
H,mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0)
return H
def drawMatch(self,img1,img2,kp1,kp2,good,mask,direction,filename):
if(direction == "horizontal"):
w = img1.shape[1]
vis = np.zeros((max(img1.shape[0],img2.shape[0]),img1.shape[1] + img2.shape[1],3),dtype="uint8")
vis[0:img1.shape[0],0:img1.shape[1],:] = img1
vis[0:img2.shape[0],img1.shape[1]:] = img2
for (m,s) in zip(good,mask):
if s:
pt1 = (int(kp1[m.queryIdx].pt[0]),int(kp1[m.queryIdx].pt[1]))
pt2 = (int(kp2[m.trainIdx].pt[0]+w),int(kp2[m.trainIdx].pt[1]))
cv2.line(vis, pt1, pt2, (0, 255, 0), 1)
else:
h = img1.shape[0]
vis = np.zeros((img1.shape[0] + img2.shape[0],max(img1.shape[1],img2.shape[1]),3) ,dtype="uint8")
vis[0:img1.shape[0],0:img1.shape[1]] = img1
vis[img1.shape[0]:,0:img2.shape[1]] = img2
for (m,s) in zip(good,mask):
if s:
pt1 = (int(kp1[m.queryIdx].pt[0]), int(kp1[m.queryIdx].pt[1]))
pt2 = (int(kp2[m.trainIdx].pt[0]), int(kp2[m.trainIdx].pt[1])+h)
cv2.line(vis,pt1,pt2,(0,255,0),1)
cv2.imwrite(filename,vis)
| true |
956cd66252489c54e3f00877c1181176380e0a2b | Python | Aasthaengg/IBMdataset | /Python_codes/p03359/s929930152.py | UTF-8 | 113 | 2.9375 | 3 | [] | no_license | month, day = map(int, input().split())
if (day >= month):
cnt = month
else:
cnt = month - 1
print(cnt) | true |
1aadc9848d9eef7d3323798583f2034354ee2d03 | Python | winni2k/cortexpy | /src/cortexpy/test/driver/graph/serializer.py | UTF-8 | 2,506 | 2.5625 | 3 | [
"Apache-2.0"
] | permissive | import attr
from cortexpy.graph.contig_retriever import ContigRetriever
from cortexpy.graph.interactor import Interactor
from cortexpy.graph.parser.random_access import RandomAccess
from cortexpy.graph.serializer import unitig
from cortexpy.graph.traversal.engine import Engine
from cortexpy.test import builder as builder
from cortexpy.test.expectation.kmer import CollapsedKmerUnitgGraphExpectation
@attr.s(slots=True)
class SerializerTestDriver(object):
graph_builder = attr.ib(attr.Factory(builder.Graph))
contig_to_retrieve = attr.ib(None)
retriever = attr.ib(None)
traverse = attr.ib(False)
retrieve = attr.ib(False)
traversal_start_kmer = attr.ib(None)
traversal_colors = attr.ib((0,))
def with_kmer_size(self, n):
self.graph_builder.with_kmer_size(n)
return self
def with_kmer(self, *args):
self.graph_builder.with_kmer(*args)
return self
def traverse_with_start_kmer_and_colors(self, start_kmer, *colors):
self.traverse = True
self.traversal_start_kmer = start_kmer
self.traversal_colors = colors
return self
def retrieve_contig(self, contig):
self.retrieve = True
self.contig_to_retrieve = contig
return self
def run(self):
if self.retrieve:
self.retriever = ContigRetriever(self.graph_builder.build())
return self.retriever.get_kmer_graph(self.contig_to_retrieve)
elif self.traverse:
traverser = Engine(RandomAccess(self.graph_builder.build()),
traversal_colors=self.traversal_colors)
graph = traverser.traverse_from(self.traversal_start_kmer).graph
return Interactor(graph) \
.make_graph_nodes_consistent([self.traversal_start_kmer]) \
.graph
else:
raise Exception("Need to load a command")
@attr.s(slots=True)
class CollapseKmerUnitigsTestDriver(object):
serializer_driver = attr.ib(attr.Factory(SerializerTestDriver))
def __getattr__(self, name):
serializer_method = getattr(self.serializer_driver, name)
def method(*args):
serializer_method(*args)
return self
return method
def run(self):
kmer_graph = self.serializer_driver.run()
collapser = unitig \
.UnitigCollapser(kmer_graph) \
.collapse_kmer_unitigs()
return CollapsedKmerUnitgGraphExpectation(collapser.unitig_graph)
| true |
4f71b4e12a83edce3fb130da75cdebf21ec1c605 | Python | elmart/biicodemaps | /tests/test_builders.py | UTF-8 | 3,231 | 2.90625 | 3 | [] | no_license | import os
from biicodemaps.builders import (BCMStringMapBuilder, BCMFileMapBuilder,
RETStringMapBuilder, RETFileMapBuilder)
class TestBuildingFromBCMString:
def test_works_with_correct_input(self):
map_ = BCMStringMapBuilder('''
# A sample map
[cities]
A, 0.5, -1.2
B, -3.2, 0.8
C, 5, 5
[roads]
A, B
B, C
''').build()
assert len(map_.cities) == 3
assert len(map_.roads) == 2
city = map_.city('B')
assert city.name == 'B' and city.x == -3.2 and city.y == 0.8
assert len(city.roads) == 2
class TestBuildingFromBCMFile:
def test_loads_ok(self):
BCMFileMapBuilder(os.path.join(os.path.dirname(__file__),
'sample_map.bcm')).build()
class TestBuildingFromRETString:
def test_diagonal(self):
map_, spec = RETStringMapBuilder('''
| |
| o |
| x |
| x |
| x |
| |
| x |
| x |
| xxx |
| |
| |
''').build()
assert spec['origin'] == (2, 1)
assert len(map_.cities) == 113
assert len(map_.roads) == 363
for coords in [(3, -1), (4, -2), (5, -3), (0, -5),
(0, -6), (0, -7), (1, -7), (2, -7)]:
assert not map_.city('%s:%s' % coords)
def test_non_diagonal(self):
map_, spec = RETStringMapBuilder('''
| |
| o |
| x |
| x |
| x |
| |
| x |
| x |
| xxx |
| |
| |
''', diagonal=False).build()
assert spec['origin'] == (2, 1)
assert len(map_.cities) == 113
assert len(map_.roads) == 192
for coords in [(3, -1), (4, -2), (5, -3), (0, -5),
(0, -6), (0, -7), (1, -7), (2, -7)]:
assert not map_.city('%s:%s' % coords)
class TestBuildingFromRETFile:
def test_loads_ok(self):
RETFileMapBuilder(os.path.join(os.path.dirname(__file__),
'sample_map.ret')).build()
| true |
b5810ee901e7f85d904307646a80e84444b69312 | Python | kobeomseok95/codingTest | /boj/gold/16236.py | UTF-8 | 1,540 | 2.9375 | 3 | [] | no_license | from sys import stdin
from collections import deque
READ = lambda : stdin.readline().strip()
dy, dx = [-1, 0, 1, 0], [0, 1, 0, -1]
INF = int(1e9)
n = int(READ())
arr = []
for _ in range(n):
arr.append(list(map(int, READ().split())))
now_size, now_y, now_x = 2, 0, 0
for i in range(n):
for j in range(n):
if arr[i][j] == 9:
now_y, now_x = i, j
arr[now_y][now_x] = 0
def bfs():
dist = [[-1 for _ in range(n)] for _ in range(n)]
q = deque([(now_y, now_x)])
dist[now_y][now_x] = 0
while q:
y, x = q.popleft()
for i in range(4):
ny, nx = y + dy[i], x + dx[i]
if 0 <= ny < n and 0 <= nx < n:
if dist[ny][nx] == -1 and arr[ny][nx] <= now_size:
dist[ny][nx] = dist[y][x] + 1
q.append((ny, nx))
return dist
def find(dist):
min_dist = INF
y, x = 0, 0
for i in range(n):
for j in range(n):
if dist[i][j] != -1 and 1 <= arr[i][j] < now_size:
if min_dist > dist[i][j]:
min_dist = dist[i][j]
y, x = i, j
if min_dist == INF:
return None
else:
return y, x, min_dist
result, ate = 0, 0
while True:
value = find(bfs())
if value == None:
print(result)
break
else:
now_y, now_x = value[0], value[1]
result += value[2]
arr[now_y][now_x] = 0
ate += 1
if now_size <= ate:
ate = 0
now_size += 1 | true |
ddc01726446806d6b1a240851f86071a3661a91c | Python | mindt102/C4T_A03 | /Session 13/app.py | UTF-8 | 2,007 | 2.703125 | 3 | [] | no_license | from flask import Flask, redirect, render_template, request
import psycopg2
from secret import username, password, db_name
app = Flask(__name__)
@app.route('/')
def index():
return 'hello'
@app.route('/example')
def test():
return 'this is test'
@app.route('/redirect')
def test_redirect():
return redirect('https://www.google.com/')
@app.route('/website', methods=["GET", "POST"])
def website():
if request.method == "POST":
form = request.form
print(form)
print(type(form))
answer1 = form["question1"]
answer2 = form["question2"]
print(answer1)
print(answer2)
user_agent = request.user_agent
print(user_agent.platform)
print(user_agent.version)
print(user_agent.browser)
print(user_agent.language)
src_string = 'postgresql://{}:{}@localhost:5432/{}'.format(username, password, db_name)
conn = psycopg2.connect(src_string)
sql = '''
insert into answers(answer1, answer2, platform, browser) values ('{}', '{}', '{}', '{}')
'''.format(answer1, answer2, user_agent.platform, user_agent.browser)
cursor = conn.cursor()
cursor.execute(sql)
conn.commit()
conn.close()
return render_template("test.html")
elif request.method == "GET":
src_string = 'postgresql://{}:{}@localhost:5432/{}'.format(username, password, db_name)
conn = psycopg2.connect(src_string)
sql = '''
CREATE TABLE if not exists answers
(
id serial primary key,
answer1 varchar,
answer2 varchar,
platform varchar,
browser varchar
)
'''
cursor = conn.cursor()
cursor.execute(sql)
conn.commit()
conn.close()
return render_template("test.html")
# @app.route('/test/<name>')
# def test_name(name):
# return 'my name is {}'.format(name)
if __name__ == '__main__':
app.run() | true |
f6b06ec350d92a658b45756253b5e0ed130378fa | Python | skirat/Awesome_Python_Scripts | /EthicalHackingScripts/password-cracker/password-cracker.py | UTF-8 | 1,124 | 3.578125 | 4 | [
"MIT"
] | permissive | import hashlib
print("**************PASSWORD CRACKER ******************")
# To check if the password
# found or not.
pass_found = 0
input_hash = input("Enter the hashed password:")
pass_doc = input("\nEnter passwords filename including path(root / home/):")
try:
# trying to open the password file.
pass_file = open(pass_doc, 'r')
except:
print("Error:")
print(pass_doc, "is not found.\nPlease give the path of file correctly.")
quit()
# comparing the input_hash with the hashes
# of the words in password file,
# and finding password.
for word in pass_file:
# encoding the word into utf-8 format
enc_word = word.encode('utf-8')
# Hashing a word into md5 hash
hash_word = hashlib.md5(enc_word.strip())
# digesting that hash into a hexa decimal value
digest = hash_word.hexdigest()
if digest == input_hash:
# comparing hashes
print("Password found.\nThe password is:", word)
pass_found = 1
break
# if password is not found.
if not pass_found:
print("Password is not found in the", pass_doc, "file")
print('\n')
print("***************** Thank you **********************")
| true |
fe9a1baf871de1f786ec8c77e4cedfa525aff93e | Python | chenm001/pydgin | /scripts/get-test-list.py | UTF-8 | 871 | 3.140625 | 3 | [
"BSD-3-Clause"
] | permissive | #!/usr/bin/env python
#=========================================================================
# get-test-list.py
#=========================================================================
# Generates a list of tests in a directory
usage = """Usage:
./get-test-list.py dir [extension]
Prints the list of tests in a python-friendly fashion in dir. Extension
can be specified to determine which files are tests (which will be
stripped). By default, the extension is ".d"."""
import os
import sys
if len( sys.argv ) < 2:
print usage
sys.exit(1)
dir = sys.argv[1]
ext = sys.argv[2] if len( sys.argv ) >= 3 else ".d"
ext_len = len( ext )
ext_files = filter( lambda f : f.endswith( ext ), os.listdir( dir ) )
test_files = sorted( map( lambda f : f[:-ext_len], ext_files ) )
print "tests = [\n ",
print ",\n ".join( map( '"{}"'.format, test_files ) )
print "]"
| true |
3a9e607618428f7bb2778b92188cad653c74b17e | Python | nameusea/pyGreat | /application/ctrlmobile/t04test.py | UTF-8 | 1,167 | 2.546875 | 3 | [] | no_license | # 根据元素获取坐标
# python+uiautomator+adb dump(Android手机自动化) 根据文本寻找所在坐标并点击
# https://blog.csdn.net/u014520313/article/details/79218897
# ! -*- coding:utf-8 -*-
# ! /usr/bin/python
import tempfile
import os
import re
import xml.etree.cElementTree as et
import time
import random
def tap_coord_by_name_id(deviceid, attrib_name, text_name):
time.sleep(6)
os.popen('adb -s' + ' ' + deviceid + ' ' + 'shell uiautomator dump --compressed /data/local/tmp/uidump.xml')
os.popen('adb -s' + ' ' + deviceid + ' ' + r'pull data/local/tmp/uidump.xml E:\code\Smart\uidump.xml')
source = et.parse("uidump.xml")
root = source.getroot()
for node in root.iter("node"):
if node.attrib[attrib_name] == text_name:
bounds = node.attrib["bounds"]
pattern = re.compile(r"\d+")
coord = pattern.findall(bounds)
Xpoint = (int(coord[2]) - int(coord[0])) / 2.0 + int(coord[0])
Ypoint = (int(coord[3]) - int(coord[1])) / 2.0 + int(coord[1])
os.popen('adb -s' + ' ' + deviceid + ' ' + 'shell input tap %s %s' % (str(Xpoint), str(Ypoint))) | true |
3cf41c899bf28d0c594964d1f80fcd5f08279637 | Python | kakkarotssj/Algorithms | /Backtracking/knightstour.py | UTF-8 | 1,345 | 3.296875 | 3 | [] | no_license | from sys import stdin, stdout
ti = lambda : stdin.readline().strip()
ma = lambda fxn, ti : map(fxn, ti.split())
ol = lambda arr : stdout.write(' '.join(str(i) for i in arr) + '\n')
os = lambda i : stdout.write(str(i) + '\n')
olws = lambda arr : stdout.write(''.join(str(i) for i in arr) + '\n')
def printchess(chess):
for i in range(8):
for j in range(8):
print chess[i][j],
print ""
def is_cell_valid(chess, nextx, nexty):
if nextx >= 0 and nextx <= 7 and nexty >= 0 and nexty <= 7:
if chess[nextx][nexty] == -1:
return True
return False
def utilknighttour(x, y, move_no, chess, movex, movey):
if move_no == 64:
return True
else:
for i in range(8):
nextx = x + movex[i]
nexty = y + movey[i]
# print nextx, nexty
if is_cell_valid(chess, nextx, nexty):
chess[nextx][nexty] = move_no
if utilknighttour(nextx, nexty, move_no+1, chess, movex, movey):
return True
else:
chess[nextx][nexty] = -1
return False
def knighttour():
movex = [2, 1, -1, -2, -2, -1, 1, 2]
movey = [1, 2, 2, 1, -1, -2, -2, -1]
chess = [[-1 for _ in range(8)] for _ in range(8)]
chess[0][0] = 0
move_no = 1
x, y = 0, 0
if not utilknighttour(x, y, move_no, chess, movex, movey):
os("Solution does not exist.")
else:
printchess(chess)
def main():
knighttour()
if __name__ == '__main__':
main()
| true |
efc6c83b220d2e34a0d7feb93649fd13230d2eb1 | Python | sauravgsh16/DataStructures_Algorithms | /g4g/ALGO/Searching/Coding_Problems/27_find_closest_pair_from_2_sorted_arrays.py | UTF-8 | 644 | 3.90625 | 4 | [] | no_license | ''' Find closest pair to a given number x, from two sorted arrays '''
def find_pair(arr1, arr2, x):
diff = 2**32
l_idx = r_idx = 0
l = 0
r = len(arr2) - 1
while l < len(arr1) and r >= 0:
if abs(arr1[l] + arr2[r] - x) < diff:
l_idx = l
r_idx = r
diff = abs(arr1[l] + arr2[r] - x)
if arr1[l] + arr2[r] > x:
r -= 1
else:
l += 1
return arr1[l_idx], arr2[r_idx]
arr1 = [1, 4, 5, 7]
arr2 = [10, 20, 30, 40]
x = 32
arr3 = [1, 4, 5, 7]
arr4 = [10, 20, 30, 40]
y = 50
print find_pair(arr1, arr2, x)
print find_pair(arr3, arr4, y)
| true |
b706c80097e939691b2e6af90709d855fe1841d7 | Python | goddoe/chatbot-engine | /src/chatbot_server/chatbot_rest_instance_server.py | UTF-8 | 2,279 | 2.5625 | 3 | [] | no_license | #!/usr/bin/env python
import os
import pickle
import argparse
from socket import *
from flask import Flask, render_template, session, request
from flask_restful import Resource, Api, reqparse
from datetime import datetime
from chatbot.chatbot import Chatbot
app = Flask(__name__)
api = Api(app)
resource_name_list_path = os.environ['CE_SRC'] + '/data/chatbot_info/resource_name_list.pickle'
chatbot_instance_dir_path = os.environ['CE_SRC'] + '/data/chatbot_instance'
def init_arg_parser():
with open(resource_name_list_path, "rb") as f:
resource_name_list = pickle.load(f)
ps = reqparse.RequestParser()
for resource_name in resource_name_list:
ps.add_argument(resource_name)
return ps
class Chatbot_rest(Resource):
def __init__(self):
self.chatbot_instance_path = ''
pass
def __del__(self):
pass
def get(self,user_id):
user_id, query = user_id.split('_')
print(query)
self.load_chatbot(user_id)
chatbot =self.chatbot
print("receive : " + str(query))
result = chatbot.talk(query)
print("response : " + str(result))
self.save_chatbot()
return result
def post(self,user_id):
self.load_chatbot(user_id)
msg_from_user= dict(request.get_json(force=True))
print("receive : " + str(msg_from_user))
msg = msg_from_user['message']['text']
msg_to_user = self.chatbot.talk(msg)
msg_to_user['code'] = 200
msg_to_user['parameter'] = {}
print("response : " + str(msg_to_user))
self.save_chatbot()
return msg_to_user
def load_chatbot(self,user_id):
self.chatbot_instance_path = chatbot_instance_dir_path + '/'+str(user_id)
self.chatbot_instance_path += '.cbinstance'
self.chatbot = Chatbot()
self.chatbot.load(self.chatbot_instance_path)
def save_chatbot(self):
self.chatbot.save(self.chatbot_instance_path)
parser = init_arg_parser()
api.add_resource(Chatbot_rest, '/chatbotinstance/<string:user_id>')
def main():
app.run(host='0.0.0.0', port=6070, debug=True)
#while True:
# print(chatbot.talk(input()))
if __name__ == '__main__':
main()
| true |
3b7a8c5b58b4a330bab40bde2f53aa36c7a7e7be | Python | codingblocks/get-outta-here | /src/scenes/cards/click_buffer.py | UTF-8 | 528 | 2.75 | 3 | [
"MIT"
] | permissive | import pygame
from src.config import MOUSE_CLICK_BUFFER
class ClickBufferer:
def __init__(self):
self.last_click = pygame.time.get_ticks() - MOUSE_CLICK_BUFFER
def buffer_clicked(self):
if pygame.mouse.get_pressed()[0]:
ticks = pygame.time.get_ticks()
able_to_click = self.last_click + MOUSE_CLICK_BUFFER < ticks
if able_to_click:
self.last_click = ticks
return True
else:
return False
| true |
30bdad90f8f6e53a163233815cd7191925c58e98 | Python | jlr-academy/MP-Rachana-Joshi | /menu_MP.py | UTF-8 | 8,323 | 2.96875 | 3 | [] | no_license | from os import system
from typing import List
import dict_Functions_MP
import db_Functions
import csv
import pymysql
import exporttocsv
products =[]
couriers = []
orders =[]
def main_menu():
dict_Functions_MP.clear_screen()
menu = input('''
Please enter your choice:
-------------------------
0 Save & Exit
1 Product Menu
2 Courier Menu
3 Orders Menu
-------------------------
''')
if menu == ' ':
dict_Functions_MP.clear_screen()
exporttocsv.export_products_to_csv_d()
exporttocsv.export_couriers_to_csv_d()
#dict_Functions_MP.save_dict(products,"products.csv")
#dict_Functions_MP.save_dict(couriers,"couriers.csv")
dict_Functions_MP.save_dict(orders,"orders.csv")
print("\nData saved. Thanks for visiting, bye!\n")
exit()
if menu == '0':
dict_Functions_MP.clear_screen()
exporttocsv.export_products_to_csv_d()
exporttocsv.export_couriers_to_csv_d()
#dict_Functions_MP.save_dict(products,"products.csv")
#dict_Functions_MP.save_dict(couriers,"couriers.csv")
dict_Functions_MP.save_dict(orders,"orders.csv")
print("\nData saved. Thanks for visiting, bye!\n")
exit()
elif menu == '1':
dict_Functions_MP.clear_screen()
ask_choice_prod()
elif menu == '2':
dict_Functions_MP.clear_screen()
ask_choice_courier()
elif menu == '3':
dict_Functions_MP.clear_screen()
ask_choice_order()
else:
print("********** Incorrect choice, please enter choice between 0 - 3 **********")
main_menu()
#Function to ask choice from the user for product menu
def ask_choice_prod():
dict_Functions_MP.clear_screen()
print("\n********** Product Menu **********\n")
prod =input('''
Enter your choice:
------------------------------
0 Main Menu
1 Print product list
2 Create a new product
3 Update an existing product
4 Delete an existing product
------------------------------
''')
if prod == ' ':
dict_Functions_MP.clear_screen()
exporttocsv.export_products_to_csv_d()
exporttocsv.export_couriers_to_csv_d()
#dict_Functions_MP.save_dict(products,"products.csv")
#dict_Functions_MP.save_dict(couriers,"couriers.csv")
dict_Functions_MP.save_dict(orders,"orders.csv")
print("\nData saved. Thanks for visiting, bye!")
exit()
if prod == '0':
main_menu()
if prod == '1':
dict_Functions_MP.clear_screen()
#dict_Functions_MP.print_prod(products)
db_Functions.print_products_d()
input("\nPlease press any key to continue...")
ask_choice_prod()
if prod == '2':
dict_Functions_MP.clear_screen()
db_Functions.create_product_d()
#dict_Functions_MP.create_product(products)
input("\nPlease press any key to continue...")
ask_choice_prod()
if prod == '3':
dict_Functions_MP.clear_screen()
db_Functions.update_product_d()
#dict_Functions_MP.update_product(products)
input("\nPlease press any key to continue...")
ask_choice_prod()
if prod == '4':
dict_Functions_MP.clear_screen()
db_Functions.delete_product_d()
#dict_Functions_MP.del_product(products)
input("\nPlease press any key to continue...")
ask_choice_prod()
else:
print("********** Incorrect choice, please enter choice between 0 - 4 **********")
ask_choice_prod()
#Function to ask choice from the user for courier menu
def ask_choice_courier():
dict_Functions_MP.clear_screen()
print("\n********** Courier Menu **********\n")
courier =input('''
Enter your choice:
------------------------------
0 Main Menu
1 Print courier list
2 Create a new courier
3 Update an existing courier
4 Delete an existing courier
------------------------------
''')
if courier == ' ':
dict_Functions_MP.clear_screen()
exporttocsv.export_products_to_csv_d()
exporttocsv.export_couriers_to_csv_d()
#dict_Functions_MP.save_dict(products,"products.csv")
#dict_Functions_MP.save_dict(couriers,"couriers.csv")
dict_Functions_MP.save_dict(orders,"orders.csv")
print("\nData saved. Thanks for visiting, bye!")
exit()
if courier == '0':
main_menu()
if courier == '1':
dict_Functions_MP.clear_screen()
#dict_Functions_MP.print_courier(couriers)
db_Functions.print_courier_d()
input("\nPlease press any key to continue...")
ask_choice_courier()
if courier == '2':
dict_Functions_MP.clear_screen()
#dict_Functions_MP.create_courier(couriers)
db_Functions.create_courier_d()
input("\nPlease press any key to continue...")
ask_choice_courier()
if courier == '3':
dict_Functions_MP.clear_screen()
#dict_Functions_MP.update_courier(couriers)
db_Functions.update_courier_d()
input("\nPlease press any key to continue...")
ask_choice_courier()
if courier == '4':
dict_Functions_MP.clear_screen()
#dict_Functions_MP.del_courier(couriers)
db_Functions.delete_courier_d()
input("\nPlease press any key to continue...")
ask_choice_courier()
else:
print("********** Incorrect choice, please enter choice between 0 - 4 **********")
ask_choice_courier()
#Function to ask choice from the user for order menu
def ask_choice_order():
dict_Functions_MP.clear_screen()
print("\n********** Order Menu **********\n")
order =input('''
Enter your choice:
------------------------------
0 Main Menu
1 Print orders list
2 Create a new order
3 Update an existing order status
4 Update an existing order
5 Delete an existing order
------------------------------
''')
if order == ' ':
dict_Functions_MP.clear_screen()
exporttocsv.export_products_to_csv_d()
exporttocsv.export_couriers_to_csv_d()
#dict_Functions_MP.save_dict(products,"products.csv")
#dict_Functions_MP.save_dict(couriers,"couriers.csv")
dict_Functions_MP.save_dict(orders,"orders.csv")
print("\nFiles saved. Thanks for visiting, bye!")
exit()
if order == '0':
main_menu()
if order == '1':
dict_Functions_MP.clear_screen()
dict_Functions_MP.print_order(orders)
dict_Functions_MP.print_grp_order(orders)
#dict_Functions_MP.print_order_grp(orders)
input("\nPlease press any key to continue...")
ask_choice_order()
if order == '2':
dict_Functions_MP.clear_screen()
dict_Functions_MP.create_order(orders,couriers,products)
input("\nPlease press any key to continue...")
ask_choice_order()
if order == '3':
dict_Functions_MP.clear_screen()
dict_Functions_MP.update_order_status(orders)
input("\nPlease press any key to continue...")
ask_choice_order()
if order == '4':
dict_Functions_MP.clear_screen()
dict_Functions_MP.update_order(orders,couriers,products)
input("\nPlease press any key to continue...")
ask_choice_order()
if order == '5':
dict_Functions_MP.clear_screen()
dict_Functions_MP.del_order(orders)
input("\nPlease press any key to continue...")
ask_choice_order()
else:
print("********** Incorrect choice, please enter choice between 0 - 4 **********")
ask_choice_order()
#load all files into theor respective lists.
# If the files do not exist or are empty, then lists ar empty
if __name__ == '__main__':
#products = dict_Functions_MP.load_dict("products.csv")
#couriers = dict_Functions_MP.load_dict("couriers.csv")
#cursor = dict_Functions_MP.load_env_var_d(0)
orders = dict_Functions_MP.load_dict("orders.csv")
main_menu()
| true |
7e0d9d973e4387d5054df33d3e9dab6ef2f5f5d4 | Python | skymoonfp/python_learning | /python_project/mythread/multi_process.py | UTF-8 | 1,978 | 3.328125 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
"""
**************************
文件: multi-process.py
IDE: PyCharm
创建时间: 2019/5/27 17:24
@author: skymoon
"""
import time
from multiprocessing import Pool
from threading import Thread
from mytime.timing import timer
def f(x):
time.sleep(0.05) # 无阻塞时,singleprocessing快;有阻塞时,multiprocessing快
return x * x
@timer
def multiprocessing(n):
with Pool(n) as p:
a = p.map(f, list(range(100)))
print(a)
@timer
def singleprocessing():
a = list(map(f, list(range(100))))
print(a)
class MyThread(Thread):
def __init__(self, target, args):
# Thread.__init__(self)
super(MyThread, self).__init__()
self.targer = target
self.args = args
self.result = self.targer(self.args)
def get_result(self):
return self.result
# @timer
# def multithreading():
# a = []
# for i in range(100):
# for j in range(5):
# t = MyThread(func=f, x=i)
# t.start()
# a.append(t.get_result())
# i += 1
# print(a)
#
# @timer
# def multithreading(n):
# with Thread(n) as p:
# a = p.map(f, list(range(100)))
# print(a)
#
@timer
def multithreading():
lis = list(range(100))
threads = []
for i in range(12):
t = MyThread(target=f, args=lis[i])
threads.append(t)
for i in range(12):
threads[i].start()
for i in range(12):
threads[i].join()
for i in range(100):
for j in range(12):
print(threads[j].get_result())
if __name__ == '__main__':
# multiprocessing(1) # 5.28586483001709
# print()
# multiprocessing(5) # 1.3174772262573242
# print()
multiprocessing(12) # 1.148735761642456
# print()
# multiprocessing(20) # 1.372330665588379
# print()
# singleprocessing() # 5.072439432144165
# print()
multithreading()
| true |
3378f7d06818bfea5aa3ef6e3910c788a3f31d87 | Python | yiyingli/Web-Security-Academy-Series | /sql-injection/lab-13/sqli-lab-13.py | UTF-8 | 1,065 | 2.546875 | 3 | [] | no_license | import sys
import requests
import urllib3
import urllib
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
proxies = {'http': 'http:127.0.0.1:8080', 'https': 'https://127.0.0.1:8080'}
def blind_sqli_check(url):
sqli_payload = "' || (SELECT pg_sleep(10))--"
sqli_payload_encoded = urllib.parse.quote(sqli_payload)
cookies = {'TrackingId': 'fY3mWGvtddfW37rS' + sqli_payload_encoded, 'session': '3tjAqEsmAUv1oSufDDKMp8Dpr9LKqwcd'}
r = requests.get(url, cookies=cookies, verify=False, proxies=proxies)
if int(r.elapsed.total_seconds()) > 10:
print("(+) Vulnerable to blind-based SQL injection")
else:
print("(-) Not vulnerable to blind based SQL injection")
def main():
if len(sys.argv) != 2:
print("(+) Usage: %s <url>" % sys.argv[0])
pring("(+) Example: %s www.example.com" % sys.argv[0])
sys.exit(-1)
url = sys.argv[1]
print("(+) Checking if tracking cookie is vulnerable to time-based blind SQLi....")
blind_sqli_check(url)
if __name__ == "__main__":
main() | true |
0f7d3e29c432006e6553b8d999340b5da147fcd1 | Python | Heisenber-Y/learn_python | /day1_15/day6.py | UTF-8 | 2,989 | 4.21875 | 4 | [] | no_license | #函数和模块的使用
"""
输入M和N计算C(M,N)
"""
# m=int(input('m=: '))
# n=int(input('n=: '))
#
# fm=1
# for num in range(1,m+1):
# fm *= num
# fn=1
# for num in range(1,n+1):
# fm *= num
# fmn=1
# for num in range(1,m-n+1):
# fmn *= fmn
# print(fm // fn //fmn)
"""
求阶乘
:param num: 非负整数
:return: num的阶乘
"""
# num=int(input("请输入: "))
# def factorial(num):
# result=1
# for n in range(1,num+1):
# result *=n
# return result
#
#
# print(factorial(num))
"""
摇色子
:param n: 色子的个数
:return: n颗色子点数之和
"""
# from random import randint
#
#
# def roll_dice(n=2):
# total=0
# for i in range(n):
# total += randint(1,6)
# return total
#
# def add(a=0,b=0,c=0):
# return a+b+c
# # 如果没有指定参数那么使用默认值摇两颗色子
# print(roll_dice())
# # 摇三颗色子
# print(roll_dice(3))
# print(add())
# print(add(1))
# print(add(1, 2))
# print(add(1, 2, 3))
# # 传递参数时可以不按照设定的顺序进行传递
# print(add(c=50, a=100, b=200))
# 在参数名前面的*表示args是一个可变参数
# 即在调用add函数时可以传入0个或多个参数
# def add(*args):
# total=0
# for var in args:
# total+=var
# return total
# print(add())
# print(add(1))
# print(add(1, 2))
# print(add(1, 2, 3))
# print(add(1, 3, 5, 7, 9))
#用模块管理函数
"""对于任何一种编程语言来说,给变量、函数这样的标识符起名字都是一个让人头疼的问题,
因为我们会遇到命名冲突这种尴尬的情况。最简单的场景就是在同一个.py文件中定义了两个同名函数,
由于Python没有函数重载的概念,那么后面的定义会覆盖之前的定义,也就意味着两个函数同名函数实际上只有一个是存在的。"""
# def foo():
# print('hello-world')
# def foo():
# print('haiyoushui ')
#
# foo()
#
# from model1 import foo
#
# foo()
#
# from model2 import foo
# foo()
# from model1 import foo
# from model2 import foo
# foo()
# print('---'.format(__name__))
# def foo():
# pass
# def bar():
# pass
#
# if __name__=="__main__":
# print('call foo()'.format(__name__))
# foo()
# print('call bar()')
# bar()
#我们来讨论一下Python中有关变量作用域的问题。
# def foo():
# b='hello'
# def bar():
# c=True
# print(a)
# print(b)
# print(c)
# bar()
#
# if __name__ =='__main__':
# a=100
# foo()
#通过函数调用修改全局变量`a`的值,但实际上下面的代码是做不到的
# def foo():
# a=200
# print(a)
#
# if __name__=='__main__':
# a=100
# foo()
# print(a)
#全局变量
# def foo():
# global a
# a=200
# print(a)
#
# if __name__=="__main__":
# a=100
# foo()
# print(a)
def main():
print("--")
if __name__=='__main__':
main()
| true |
8465e0816f778e3befeba79635aade75218a427a | Python | fangulob/CursoPython | /Calculadora.py | UTF-8 | 540 | 4.28125 | 4 | [] | no_license | num1=int(input("Digite número A :"))
num2=int(input("Digite número B :"))
suma=num1+num2
resta=num1-num2
multiplicacion=num1*num2
division=num1/num2
potencia=num1 ** num2
print("Suma: "+str(num1) + " + " +str(num2) +" = "+str(suma))
print("Resta: "+str(num1) + " - " +str(num2) +" = "+str(resta))
print("Multiplicacion: "+str(num1) + " * " +str(num2) +" = "+str(multiplicacion))
print("Division: " +str(num1) + " / " +str(num2) +" = "+str(division))
print("Potencia: " +str(num1) + " ^ " +str(num2) +" = "+str(potencia))
| true |
6777bf867db6a7c48b4068f3698a19a92bf7c078 | Python | madlechuck/lpthw | /Ex13b.py | UTF-8 | 309 | 3.015625 | 3 | [] | no_license | from sys import argv
script, name = argv
print "\n"
print "Hello %r, what is you last name? " % (str(name)),
last_name = raw_input()
print ".-.-.-.\n" * 10,
print ".-.-.-.\n" * 10,
print ".-.-.-.\n" * 10,
print "Hello %s %s" % (name, last_name)
print ".-.-.-." * 10
print ".-.-.-." * 10
print ".-.-.-." * 10
| true |
b024c376a56adfa4678374b72718e87c21c5dc56 | Python | guohuacao/Python-Database-Web-Access | /json/json_project.py | UTF-8 | 592 | 2.8125 | 3 | [] | no_license | import json
import urllib
url = 'http://python-data.dr-chuck.net/comments_187133.json'
Count = 0
Sum = 0
# while True:
# address = raw_input('Enter location: ')
# if len(address) < 1 : break
#
# url = serviceurl + urllib.urlencode({'sensor':'false', 'address': address})
print 'Retrieving', url
uh = urllib.urlopen(url)
data = uh.read()
print 'Retrieved',len(data),'characters'
#print data
info = json.loads(data)
#print 'User count:', len(info)
#print json.dumps(info, indent = 4)
lst = info["comments"]
print len(lst)
for item in lst:
Sum += int(item["count"])
print Sum
| true |
46235c93922ae52bdfdf356785ea8f70eaaa0f5f | Python | antinwo/wex | /info.py | UTF-8 | 2,777 | 3.03125 | 3 | [] | no_license | import httplib2
import json
import operator
import time
from functools import reduce
from decimal import Decimal
# parse dict
def get_from_dict(data_dict, map_list):
return reduce(operator.getitem, map_list, data_dict)
# get prices info for chosen pair
def url(pair):
symbol = 'https://wex.nz/api/3/ticker/' + pair
return symbol
# info url with list of available pairs
pairs_url = 'https://wex.nz/api/3/info'
http = httplib2.Http()
pair_response, pair_content = http.request(pairs_url, 'GET')
pairs = json.loads(pair_content.decode('utf-8'))
print("Available pair list: ", list(pairs["pairs"].keys()))
print("Server time :", time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(get_from_dict(pairs, ["server_time"]))))
user_input = input("Please enter pair symbol: ")
# temp dictionary
previous_data = {}
change = Decimal(0)
percent_change = Decimal(0)
data_content = False
while True:
try:
if previous_data:
change = Decimal(0)
data = json.loads(data_content.decode('utf-8'))
print("prev High " + user_input + ": ", get_from_dict(data, [user_input, "high"]))
print("prev Low " + user_input + ": ", get_from_dict(data, [user_input, "low"]))
print("prev Last " + user_input + ":", get_from_dict(data, [user_input, "last"]))
print("prev Updated:",
time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(get_from_dict(data, [user_input, "updated"]))))
data_response, data_content = http.request(url(user_input), 'GET')
data = json.loads(data_content.decode('utf-8'))
# calculating price change
if user_input in previous_data:
change = Decimal(
get_from_dict(data, [user_input, "last"])) - Decimal(get_from_dict(previous_data, [user_input, "last"]))
percent_change = (Decimal(
get_from_dict(data, [user_input, "last"])) / Decimal(
get_from_dict(previous_data, [user_input, "last"]))-1)*100
else:
change = Decimal(0)
percent_change = Decimal(0)
print("High " + user_input + ": ", get_from_dict(data, [user_input, "high"]))
print("Low " + user_input + ": ", get_from_dict(data, [user_input, "low"]))
print("Last " + user_input + ":", get_from_dict(data, [user_input, "last"]))
print("Updated:",
time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(get_from_dict(data, [user_input, "updated"]))))
print("Absolute change: ", change.quantize(Decimal('0.001')))
print("Relative change: ", percent_change.quantize(Decimal('0.00001')), "%")
previous_data = data
time.sleep(30)
except KeyError as key_err:
print("There's no such pair: ", key_err)
| true |
5efd317c8b23238577e1731ff6214fa670d27f13 | Python | zabcdefghijklmnopqrstuvwxy/leecode | /LeeCode/topic456/python/topic456.py | UTF-8 | 354 | 2.625 | 3 | [] | no_license | class Solution:
def find132pattern(self, nums: List[int]) -> bool:
stack = []
Min=float('-inf')
for i in range(len(nums)-1,-1,-1):
if nums[i] < Min:
return True
while stack and nums[i] > stack[-1]:
Min=stack.pop()
stack.append(nums[i])
return False | true |
494124dd0326e21bc02eac2d03e7494a63e2fb87 | Python | MathAdventurer/Data_Mining | /week7/Plot_decision_boundary.py | UTF-8 | 3,635 | 3.28125 | 3 | [
"MIT"
] | permissive | #coding=utf8
"""
Created on Thu Mar 12 17:48:23 2020
@author: Neal LONG
Hint max() is a built-in function in Python
"""
import pickle
import matplotlib.pyplot as plt
import numpy as np
def hinge_loss(f_x,y_true,margin=1):
"""
Compute the hinge loss given the returned value from
a linear discrimination function on the feature x and its label y
"""
return max(0,margin-y_true*f_x)
# pass #++insert your code here to replace pass++
def zero_one_loss(f_x,y_true):
"""
Compute the zero-one loss given the returned value from
a linear discrimination function on the feature x and its label y
"""
if f_x*y_true>=0:
return 0
else:
return 1
with open('Q2_fetures.pkl','rb') as rf:
X = pickle.load(rf)
with open('Q2_labels.pkl','rb') as rf:
Y_true = pickle.load(rf)
Y_true[Y_true==0]=-1
print(len(X),len(Y_true))
def linear_func(W,X):
"""
General form of a 2-d linear function with w0 as intercept
"""
return W[0]+W[1]*X[0]+W[2]*X[1]
def boundary_line(W,x):
y= -(W[0]+W[1]*x)/W[2]
return y
W = (-0.45236953,2.23604794, -3.94803128)
#f(x) = -0.45236953+2.23604794*X[0]-3.94803128*X[1] = 0
# ->3.94803128*X[1] = -0.45236953+2.23604794*X[0]
# y = (-0.45236953+2.23604794*x)/3.94803128
plt.figure(1, figsize=(8, 8))
plt.scatter(X[:, 0], X[:, 1], c=Y_true)
#generate dense plots
s = np.arange(min(X[:, 0]),max(X[:, 0]),0.1)
#generate the corresponding y for each z in s
t = []
for z in s:
t.append((-0.45236953+2.23604794*z)/3.94803128)
#plt.plot(s, t,label = 'W')
#
W1 = (-0.762686,1.50126098,-2.3948365 )
W2 = (-0.422686,1.50126098,-2.3948365 )
W3 = (-0.59862686,1.50126098,-2.3948365)
# W1 = (-0.5986268-1,1.50126098,-2.3948365 )
# W2 = (-0.5986268+1,1.50126098,-2.3948365 )
# W3 = (-0.59862686,1.50126098,-2.3948365 )
W1 = (-0.59862686-0.17,1.50126098,-2.3948365 )
W2 = (-0.59862686+0.17,1.50126098,-2.3948365 )
W3 = (-0.59862686,1.50126098,-2.3948365 )
for W, label in zip((W1,W2,W3), ('W1','W2','W3')):
#zip((W1,W2,W3), ('W1','W2','W3')) = [(W1,'W1',1),(W2,'w2',2),(W3,'W3',3)]
t = [boundary_line(W, x) for x in s]
plt.plot(s, t, label = label)
plt.legend()
plt.show()
# #class zip(object)
# | zip(*iterables) --> zip object
# |
# | Return a zip object whose .__next__() method returns a tuple where
# | the i-th element comes from the i-th iterable argument. The .__next__()
# | method continues until the shortest iterable in the argument sequence
# | is exhausted and then it raises StopIteration.
# 对应相同维度的数据
# zip.__next__() 相当于 next(), iteration结束后都会报错
#Compute zero_one_loss
print("\nZero one loss:")
for W, label in zip((W1,W2,W3), ('W1','W2','W3')): # 对应赋值, zip 函数
zero_one_loss_total = 0
for i in range(len(X)):
x_i = X[i]
f_x_i=linear_func(W,x_i)
y_i = Y_true[i]
loss = zero_one_loss(f_x_i,y_i)
if loss >0:
# print(i,f_x_i,y_i,loss)
zero_one_loss_total+=loss
print(label, zero_one_loss_total)
#Compute hinge loss
print("\nHinge loss:")
for W, label in zip((W1,W2,W3), ('W1','W2','W3')):
hinge_loss_total = 0
for i in range(len(X)):
x_i = X[i]
f_x_i=linear_func(W,x_i)
y_i = Y_true[i]
loss = hinge_loss(f_x_i,y_i,1)
if loss >0:
hinge_loss_total+=loss
print(label, hinge_loss_total)
| true |
781ba0cac90a2304372c17ae87bd5fc3eed7fe9e | Python | varennes/1dwalk | /analysis.py | UTF-8 | 1,274 | 2.59375 | 3 | [
"MIT"
] | permissive | import numpy as np
import matplotlib.pyplot as plt
def format(value):
return "%.3f" % value
f = open( 'input.txt', 'r')
content = [x.strip('\n') for x in f.readlines()]
content.pop(0)
content = [ float(x) for x in content]
f.close()
runTotal = int(content[0])
N = int(content[1])
L = content[2]
v = content[3]
print ' '
print runTotal
print N
print L
print v
tMean = [];
tSdev = [];
for i in range(9,N,10):
filename = 'tRun00' + str(i+1) + '.dat'
if (i+1) >= 10:
filename = 'tRun0' + str(i+1) + '.dat'
if (i+1) >= 100:
filename = 'tRun' + str(i+1) + '.dat'
f = open(filename,'r')
tRun = [ float(x.strip('\n')) for x in f.readlines()]
tMean.append(np.mean(tRun))
tSdev.append(np.std(tRun))
f.close()
vStr = '%.0f' % (v*100)
filename = 'fpt_v0_' + vStr + '.dat'
fo = open(filename, 'w')
j = 0
for i in range(9,N,10):
s = str(i+1) + ' ' + str(tMean[j]) + ' ' + str(tSdev[j]) + '\n'
fo.write(s)
j += 1
# fo.write(str(formatted))
fo.close()
# n = [ i+1 for i in range(N)]
# plt.errorbar( n, tMean, yerr=tSdev)
# plt.xlim([min(n)-1, max(n)+1])
# # plt.xticks(n)
# plt.xlabel('N')
# plt.ylabel('FPT')
# plt.show()
#
# plt.errorbar( n, tMean, yerr=tSdev)
# plt.xscale('log')
# plt.yscale('log')
# plt.show()
| true |
201101ab6a3cc0f048850bd34973407a9f6ee493 | Python | tarajano/udemy_python_oop | /time_class.py | UTF-8 | 237 | 3.171875 | 3 | [] | no_license |
import datetime
from date_class import Date
class Time(Date):
def get_time(self):
return datetime.datetime.today().strftime('%H:%M:%S')
def print_me(self):
print(self.get_date() + ' ' + self.get_time())
| true |
7e73f676b0c04ddc15e04d23267655a6c1e897dc | Python | SyedSajjadHaider/Tools_scripts | /function_extract.py | UTF-8 | 1,170 | 3.09375 | 3 | [] | no_license | #READ ME :- wherever path "/path/of/file/file.c" is given change it with your own file.c path
# This script extract functions from .c file and print it on the terminal
# You can redirect it to the file for example -> python3 file.c > all_fun_def.c
# The [extract] function gets the 'start' and 'end' of function line number , you can do with whatever you like
# you can contact me at sajjads26@gmail.com
import re
import linecache
def extract(start,end ):
#print("function found at",start,end)
for i in range(start,end+1):
print(linecache.getline("/path/of/file/file.c",i),end='')
def count( line,start ):
f = open("/path/of/file/file.c","r")
end=0
counter = 0
for line in f:
end=end+1
if(end > start):
if ('{' in line):
counter = counter + 1
if ('}' in line):
counter = counter - 1
if ( counter == 0):
extract(start,end )
return
f = open("/path/of/file/file.c","r+")
start=0
flag1=0
for line in f:
start=start+1
if ('(' in line) and line.endswith(')\n') :
if linecache.getline("/path/of/file/file.c",start+1) == '{\n':
count(line,start)
| true |
fad0ed45a12e1de2a22c6292c6fdbe79ef65c30a | Python | Natacha7/Python | /Condicional/Diferente.py | UTF-8 | 304 | 4 | 4 | [] | no_license | #Leer dos número y decir si son iguales o no
def son_iguales(a, b):
if (a == b):
return("son iguales")
else:
return("son diferentes")
a = int(input("Digite Numero1: "))
b = int(input("Digite Numero2: "))
respuesta = son_iguales(a, b)
print("Los números son: ", respuesta)
| true |
8370ae64d9d71ab71d2780554b14be68b10306db | Python | KevinJW/OpenColorIO | /src/bindings/python/DocStrings/LogTransform.py | UTF-8 | 589 | 2.640625 | 3 | [
"BSD-3-Clause",
"CC-BY-4.0",
"BSD-2-Clause",
"Zlib"
] | permissive | # SPDX-License-Identifier: BSD-3-Clause
# Copyright Contributors to the OpenColorIO Project.
class LogTransform:
"""
LogTransform
"""
def __init__(self):
pass
def getBase(self):
"""
getBase()
Returns the base of :py:class:`PyOpenColorIO.LogTransform`.
"""
pass
def setBase(self, base):
"""
setBase(base)
Sets the base in :py:class:`PyOpenColorIO.LogTransform`.
:param base: base of log transform
:type base: float
"""
pass
| true |
fe40e24853c4d4418707a1eca156857ec07777cd | Python | Fence/Documents | /DRL_data/Actions to Sequence/Learning High-level Planning from Text/hierarchical_planning/code/feature_computation/FeatureSpace.py | UTF-8 | 724 | 3.109375 | 3 | [] | no_license |
class FeatureSpace:
def __init__(self):
self.dFeatureToIndex = {};
self.dIndexToFeature = {};
# svm light requires feature indexes to start at 1
self.iIndex = 1;
def FeatureIndex(self, sFeature):
if sFeature in self.dFeatureToIndex:
return self.dFeatureToIndex[sFeature];
else:
self.dFeatureToIndex[sFeature] = self.iIndex;
self.dIndexToFeature[self.iIndex] = sFeature;
self.iIndex += 1;
return self.iIndex-1;
fs = FeatureSpace();
def FeatureIndex(sFeature):
return fs.FeatureIndex(sFeature);
def FeatureString(iIndex):
return fs.dIndexToFeature[iIndex];
def MaxIndex():
return fs.iIndex;
| true |
fa352ecc8fe21024b6283165e296478c1c5aaa4b | Python | bangyanz/pythonprojects1 | /oop.py | UTF-8 | 2,836 | 4.0625 | 4 | [] | no_license | class Animal(object):
def __init__(self):
print
"Animal created"
def whoAmI(self):
print
"Animal"
def eat(self):
print
"Eating"
class Dog(Animal):
def __init__(self):
# Animal.__init__(self)
print
"Dog created"
def whoAmI(self):
print
"Dog"
def bark(self):
print
"Woof!"
d = Dog()
d.eat()
class Book(object):
def __init__(self, title, author, pages):
print
"A book is created"
self.title = title
self.author = author
self.pages = pages
def __str__(self):
return "Title:%s , author:%s, pages:%s " % (self.title, self.author, self.pages)
def __len__(self):
return self.pages
def __del__(self):
print
"A book is destroyed"
book = Book("Python Rocks!", "Jose Portilla", 159)
class Vehicle(object):
"""A vehicle for sale by Jeffco Car Dealership.
Attributes:
wheels: An integer representing the number of wheels the vehicle has.
miles: The integral number of miles driven on the vehicle.
make: The make of the vehicle as a string.
model: The model of the vehicle as a string.
year: The integral year the vehicle was built.
sold_on: The date the vehicle was sold.
"""
base_sale_price = 0
def __init__(self, wheels, miles, make, model, year, sold_on):
"""Return a new Vehicle object."""
self.wheels = wheels
self.miles = miles
self.make = make
self.model = model
self.year = year
self.sold_on = sold_on
def sale_price(self):
"""Return the sale price for this vehicle as a float amount."""
if self.sold_on is not None:
return 0.0 # Already sold
return 5000.0 * self.wheels
def purchase_price(self):
"""Return the price for which we would pay to purchase the vehicle."""
if self.sold_on is None:
return 0.0 # Not yet sold
return self.base_sale_price - (.10 * self.miles)
class Car(Vehicle):
def __init__(self, wheels, miles, make, model, year, sold_on):
"""Return a new Car object."""
self.wheels = wheels
self.miles = miles
self.make = make
self.model = model
self.year = year
self.sold_on = sold_on
self.base_sale_price = 8000
class Truck(Vehicle):
def __init__(self, wheels, miles, make, model, year, sold_on):
"""Return a new Truck object."""
self.wheels = wheels
self.miles = miles
self.make = make
self.model = model
self.year = year
self.sold_on = sold_on
self.base_sale_price = 10000
a = Car(3, 1, 'dodge', '11', '2012', '21')
b = a.purchase_price()
print
b
print
a.wheels
| true |
7d391b1bd7bd14937f63130e98bb50ec0092cacf | Python | bill666500/algorithms | /dp/word_break.py | UTF-8 | 362 | 3.40625 | 3 | [] | no_license | def word_break(s, word_dict):
"""
:type s: str
:type word_dict: Set[str]
:rtype: bool
"""
f = [False] * (len(s)+1)
f[0] = True
for i in range(1, len(s)+1):
for j in range(0, i):
if f[j] and s[j:i] in word_dict:
f[i] = True
break
return f[-1]
s = "keonkim"
dic = ["keon", "kim"]
print(word_break(s, dic))
| true |
046197d873dfcaeed989c776b6c5a56762fe21b0 | Python | yamengzhou/Machine_Learning | /deeplearning/projects/pytorch/src/numpy_vs_pytorch/pytorch_version.py | UTF-8 | 1,069 | 3.09375 | 3 | [] | no_license | # simple network by using pytorch
import torch
def main():
dtype = torch.FloatTensor
# N is batch size
N = 64
# D_in is input dimension
D_in = 1000
# H is hidden dimension
H = 100
# D_out is output dimension
D_out = 10
learning_rate = 1e-6
x = torch.randn(N, D_in).type(dtype)
y = torch.randn(N, D_out).type(dtype)
w1 = torch.randn(D_in, H).type(dtype)
w2 = torch.randn(H, D_out).type(dtype)
for t in range(500):
# forward pass
h = x.mm(w1)
h_relu = h.clamp(min=0)
y_pred = h_relu.mm(w2)
# compute loss
loss = (y_pred - y).pow(2).sum()
print t, loss
# backpropagation
grad_y_pred = 2.0 * (y_pred - y)
grad_h_relu = grad_y_pred.mm(w2.t())
grad_w2 = h_relu.t().mm(grad_y_pred)
grad_h = grad_h_relu.clone()
grad_h[h < 0] = 0
grad_w1 = x.t().mm(grad_h)
# update weights
w1 -= grad_w1 * learning_rate
w2 -= grad_w2 * learning_rate
if __name__ == '__main__':
main() | true |
bb38e5bfeb123b4230c23e7d62318d6f548d68fe | Python | BeehiveSystems/PracticePython | /2 - Odd or Even.py | UTF-8 | 285 | 4.375 | 4 | [] | no_license | number = input("Enter a number and I will tell you if it is odd or even: ")
remainder = int(number) % 2
if (int(number) % 4) == 0:
print("The number is even and divisible by 4.")
elif (int(number) % 2) == 1:
print("The number is odd.")
else:
print("The number is even.")
| true |
95de1982e31b7d671b618363e0fcd9637716e155 | Python | hzy95/fastaTools | /selectBed.py | UTF-8 | 2,088 | 2.984375 | 3 | [] | no_license | """
Created on Mon Nov 19 10:01:48 2018
@author: liuyuan
"""
import argparse,sys
import random
def getopt():
'''parsing the opts'''
parser=argparse.ArgumentParser(
description='selectBed.py: A program to get select bed file accroding seqname',
usage='selectBed.py -b bedfile -o outputfile -key keyword -n number'
)
parser.add_argument('-key','--keyword',type=str,help="The name of sequences which you want wo get",required=True)
parser.add_argument('-b','--bed',type=argparse.FileType('r'),help="bed file path",required=True)
parser.add_argument('-o','--output',type=argparse.FileType('w'),help="output file path",required=True)
parser.add_argument('-n','--number',type=int,help="The number of the sequence you want get")
args=parser.parse_args()
return args
def transform_line(line):
'''used to transform the line of ded file from list to str'''
com_line=''
for i in line:
com_line+=i
com_line+='\t'
com_line+='\n'
return com_line
def getAll(args):
'''used to get all of the line in bed file according keyword'''
keyword=args.keyword
getbed_list=list()
bedfile=filter(lambda x:x.strip(),args.bed.readlines())
bed_list=map(lambda x:x.strip().split(), bedfile)
bed_list=list(bed_list)
for line in bed_list:
if keyword in line[3]:
getbed_list.append(line)
result=map(transform_line,getbed_list)
return result
def getSub(args):
'''used to get a certain number of line from bed file'''
keyword=args.keyword
all_result=getAll(args)
all_result=list(all_result)
result=list()
random.seed(100)
get_indexs=random.sample(range(len(list(all_result))),args.number)
for i in get_indexs:
result.append(all_result[i])
return result
if __name__=='__main__':
args=getopt()
print ("start")
if args.number:
result=getSub(args)
args.output.writelines(result)
else:
result=getAll(args)
args.output.writelines(result)
print ('finished') | true |
4f95e0b68b4baf30dbb5e1cfe1a27e977ec28622 | Python | miferreiro/CDAP-Map-Reduce | /Exercise 2/CombinedQuestions/join_mapper.py | UTF-8 | 588 | 2.8125 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import sys
# ---------------------------------------------------------------------------
# This mapper accepts <card, section price> values and makes an append of all elements
# ---------------------------------------------------------------------------
for line in sys.stdin:
line = line.strip()
key_value = line.split("\t")
# If the input does not have six fields, it is discarded
if len(key_value) == 6:
#<card, section price>
print( '%s\t%s\t%s' % (key_value[5], key_value[3], key_value[4]) )
else:
continue
| true |
d9f3d51366746f6fa349e6aff64c09c8ff624d85 | Python | emadehsan/hacks | /delParseObjects.py | UTF-8 | 1,165 | 3.46875 | 3 | [] | no_license | '''
Python Script to automate task of
Deleting all the Parse Objects in a Class
@author Emad Ehsan
'''
import httplib
import json
# Insert addresses & credentials here
address = '<IP:PORT>'
parseUrl = '/parse/classes/<ClassName>'
parseAppId = '<APP_ID>'
headers = {'X-Parse-Application-Id': parseAppId}
objs = {}
'''
Single Request to Parse returns around 100 objects
'''
def getObjs():
global objs
conn = httplib.HTTPConnection(address)
conn.request('GET', parseUrl, None, headers)
resp = conn.getresponse()
jsonData = resp.read()
pyData = json.loads(jsonData)
objs = pyData["results"]
if (len(objs) > 0):
return True
return False
'''
Delete all objects received in a single request
'''
def delMultipleObjs():
# Now make delete request seperate for each object
for c in objs:
conn2 = httplib.HTTPConnection(address)
url = parseUrl + '/' + c["objectId"]
conn2.request('DELETE', url, None, headers)
resp2 = conn2.getresponse()
print 'Del: ' + c["objectId"] + ', resp: ' + resp2.read()
def delAll():
i = 1
while getObjs():
print str(i) + 'th GET'
delMultipleObjs()
i += 1
print 'Done!'
if __name__ == '__main__':
delAll() | true |
d3400a0469b134711285c233112794bf40f5083f | Python | axd8911/Leetcode | /wei_ruan_gao_pin/0003_Longest_Substring_Without_Repeating_Characters.py | UTF-8 | 705 | 3.265625 | 3 | [] | no_license | class Solution:
def lengthOfLongestSubstring(self, s: str) -> int:
#做一个dict,里面保存的是每个字母的当前index
#如果当前字母存在于字典,并且序列大于front,那就需要把front更新成那个序列的下一个,并且把需要更新成当前index
front = 0
maxLength = 0
res = ''
dict = collections.defaultdict(int)
for i in range(len(s)):
if s[i] in dict and dict[s[i]] >= front:
front = dict[s[i]]+1
dict[s[i]] = i
if i-front+1>maxLength:
res = s[front:i+1]
maxLength = i-front+1
return maxLength
| true |
f4795eba5b525fb7bf90e864ff20ec16fa1619a7 | Python | JamCrumpet/email_generator | /test3.py | UTF-8 | 1,278 | 3.3125 | 3 | [] | no_license | import pandas as pd
import random
# read CSV files and saves as dataframes
df_domains = pd.read_csv("domains.csv")
df_female_first_name = pd.read_csv("female_first_names.csv")
df_last_names = pd.read_csv("last_names.csv")
df_male_first_name = pd.read_csv("male_first_names.csv")
# extract necessary columns
column_domains = df_domains["domain"]
column_female_first_name = df_female_first_name["name"]
column_last_name = df_last_names["lastname"]
column_male_first_name = df_male_first_name["name"]
# pick random values from column
rd_domain = random.choice(column_domains)
rd_female_first_name = random.choice(column_female_first_name)
rd_last_name = random.choice(column_last_name)
rd_male_first_name = random.choice(column_male_first_name)
symbols = ["-", "_", "."]
# Random emails with female first name
rd_fe1 = rd_female_first_name + rd_last_name + "@" + rd_domain
rd_fe2 = rd_female_first_name + str(random.randrange(81,99)) + "@" + rd_domain
rd_fe3 = rd_female_first_name + random.choice(symbols) + rd_last_name + "@" + rd_domain
rd_fe = rd_fe1, rd_fe2, rd_fe3
group = []
for name in range(3):
rd_full_email = random.choice(df_female_first_name) + "@" + random.choice(df_domains)
group.append(rd_full_email)
print(group) | true |
b0315b86cda4ff7844a4743bdc18dc9bfdb8a504 | Python | CoderQingli/MyLeetCode | /24. Swap Nodes in Pairs.py | UTF-8 | 313 | 3.328125 | 3 | [] | no_license | def swapPairs(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
tmp = ListNode(0)
tmp.next = head
res = tmp
while tmp.next and tmp.next.next:
a = tmp.next
b = tmp.next.next
tmp.next, b.next, a.next = b, a, b.next
tmp = a
return res.next | true |
e379cc1d38f288531037be435c4588a162258b3c | Python | Hiranmayee94/Big-Mart-Python | /Bigmart.py | UTF-8 | 3,623 | 2.96875 | 3 | [] | no_license | #importing the data
import pandas as pd
import numpy as np
import os
wkdir = os.chdir('C:\\Users\\hi\\Desktop\\Data Science\\Python\\Big mart')
train=pd.read_csv('Train_Data.csv')
test=pd.read_csv('Test_Data.csv')
#summarising the data
summary=train.describe()
train['Outlet_Size'].value_counts()
#finding the missing values
train.isnull().sum()
train.info()
#imputing the missing values
new_item_wt=np.where(train['Item_Weight'].isnull(),12.60,train['Item_Weight'])
#overriding the column
train['Item_Weight']=new_item_wt
new_item_os=np.where(train['Outlet_Size'].isnull(),'Medium',train['Outlet_Size'])
train['Outlet_Size']=new_item_os
#checking if missing values are imputed
train.info()
#importing sklearn packages
from sklearn.preprocessing import LabelEncoder
LE=LabelEncoder()
#converting the categorical variables into signals
train['Item_Type']=LE.fit_transform(train['Item_Type'])
train['Item_Type'].value_counts()
train['Outlet_Size']=LE.fit_transform(train['Outlet_Size'])
train['Outlet_Size'].value_counts()
train['Outlet_Type']=LE.fit_transform(train['Outlet_Type'])
train['Outlet_Type'].value_counts()
#handling inconsistent values
train['Item_Fat_Content'].value_counts()
train['Item_Fat_Content'].replace('LF','Low Fat',inplace=True)
train['Item_Fat_Content'].replace('low fat','Low Fat',inplace=True)
train['Item_Fat_Content'].replace('reg','Regular',inplace=True)
train['Item_Fat_Content'].value_counts()
#converting the categorical variables into signals
train['Item_Fat_Content']=LE.fit_transform(train['Item_Fat_Content'])
train['Item_Fat_Content'].value_counts()
train['Outlet_Location_Type']=LE.fit_transform(train['Outlet_Location_Type'])
train['Outlet_Location_Type'].value_counts()
#no of years in business
train['Outlet_Establishment_Year'].value_counts()
train['noofyears']=2018-train['Outlet_Establishment_Year']
#dividing the data into Dependant and Independant
train.info()
Y=train['Item_Outlet_Sales']
X=train[['Item_Weight','Item_Fat_Content','Item_Visibility','Item_Type','Item_MRP','noofyears',
'Outlet_Size','Outlet_Location_Type','Outlet_Type']]
#applying linear and logistic regression
import statsmodels.api as sm
model_lm=sm.OLS(Y,X).fit()
model_lm.summary()
from sklearn import linear_model
lm=linear_model.LinearRegression()
model=lm.fit(X,Y)
preds_LR=model.predict(X)
from sklearn.metrics import mean_squared_error
rmse_LR=np.sqrt(mean_squared_error(Y,preds_LR))
print(rmse_LR)
########### Applying random forest ################ use RandomForestClassifier for categorical
from sklearn.ensemble import RandomForestRegressor
rf=RandomForestRegressor(n_estimators=500)
model_rf=rf.fit(X,Y)
preds_rf=model_rf.predict(X)
rmse_RF=np.sqrt(mean_squared_error(Y,preds_rf))
print(rmse_RF)
################### Applying supoort vector machine ################
from sklearn.svm import SVR
svr_r=SVR(kernel='rbf')
model_svr=svr_r.fit(X,Y)
preds_svr=model_svr.predict(X)
rmse_svr=np.sqrt(mean_squared_error(Y,preds_svr))
print(rmse_svr)
from sklearn.svm import SVR
svr_r=SVR(kernel='poly')
model_svr=svr_r.fit(X,Y)
preds_svr=model_svr.predict(X)
rmse_svr=np.sqrt(mean_squared_error(Y,preds_svr))
print(rmse_svr)
##################### Applying Neural Network######################
from sklearn.neural_network import MLPRegressor
MLP=MLPRegressor(activation='relu',max_iter=100,hidden_layer_sizes=(10,10,10))
MLP.fit(X,Y)
preds_mlp=MLP.predict(X)
from sklearn.metrics import mean_squared_error
rmse=np.sqrt(mean_squared_error(Y,preds_mlp))
print(rmse) | true |
8789131161032f32d5aea06386b8e07de4f5c6d7 | Python | andrewrizk/ie_pandas | /src/ie_pandas/DataFrame.py | UTF-8 | 5,943 | 3.609375 | 4 | [
"MIT"
] | permissive | import logging
import numpy as np
import matplotlib.pyplot as plt
class DataFrame:
def __init__(self, data, cols=None, index=None):
"""Dataframe class takes an input of types: list of lists, numpy arrays, a dictionary of lists, and a dictionary of numpy arrays and returns a dataframe with the specified input. The class method also works with an optional argument of column names and row names as list."""
if isinstance(data, np.ndarray) and data.dtype.type is np.str_:
logging.warning(
'All values in the dataframe are strings, if you wish to avoid this add dtype="object" inside the numpy array'
)
elif isinstance(data, list):
data = np.array(data, dtype=object)
elif isinstance(data, dict):
cols = list(data.keys())
matrix = []
for ind in range(len(data[cols[0]])):
row = [data[col][ind] for col in cols]
matrix.append(row)
data = np.array(matrix, dtype=object)
if cols is None:
cols = [str(col) for col in list(range(len(data[0])))]
if index is None:
index = list(range(len(data)))
self.cols = cols
self.index = index
self.data = data
def __getitem__(self, items):
"""Used to map the specified index to the corresponding values within the dataframe"""
if isinstance(items, list):
cols = [self.cols.index(item) for item in items]
return self.data[:, cols]
elif isinstance(items, str):
cols = self.cols.index(items)
return self.data[:, cols]
return self.data[items]
def formatted_frame(self):
string = "\t" + "\t".join(map(str, self.cols)) + "\n"
for ind, row in enumerate(self.data):
string += str(self.index[ind]) + " |\t" + "\t".join(map(str, row)) + "\n"
return string
def __str__(self):
"""Used as a representation for the class object"""
return self.formatted_frame()
def __repr__(self):
"""Used as a representation of the class object"""
return self.formatted_frame()
def get_row(self, row):
"""Returns selected row from a dataframe by specifying row index"""
if isinstance(row, str):
row = self.index.index(row)
return self.data[row].tolist()
def __setitem__(self, index, value):
"""Used to alter/update the values in the specified index to new values"""
self.data[index] = value
def num_cols(self):
"""Returns the numeric columns in a dataframe as an array of lists"""
lst = []
for i in range(len(self[1])):
for j in self[:, i]:
try:
float(j)
lst.append(i)
except:
pass
lst_indices = []
for i in lst:
if lst.count(i) == len(self[:, 1]):
lst_indices.append(i)
lst_indices = list(set(lst_indices))
self_float = self[:, lst_indices].astype("float64")
return self_float
def min(self):
"""Returns a list of the minimum values for each of the numeric columns in a dataframe"""
self_float = self.num_cols()
mins = []
for i in range(len(self_float[1])):
mins.append(self_float[:, i].min())
mins = [int(i) if i == int(i) else float(i) for i in mins]
return mins
def max(self):
"""Returns a list of the maximum values for each of the numeric columns in a dataframe"""
self_float = self.num_cols()
maxs = []
for i in range(len(self_float[1])):
maxs.append(self_float[:, i].max())
maxs = [int(i) if i == int(i) else float(i) for i in maxs]
return maxs
def mean(self):
"""Returns a list of column means for all numeric columns in a dataframe"""
self_float = self.num_cols()
mean_lst = []
for i in range(len(self_float[1])):
mean_lst.append(self_float[:, i].mean())
mean_lst = [int(i) if i == int(i) else float(i) for i in mean_lst]
return mean_lst
def median_from_list(self, lst):
"""Returns the median of a sorted list/column taking into consideration whether the column has an even or odd number of values"""
sortedLst = sorted(lst)
lstLen = len(lst)
index = (lstLen - 1) // 2
if lstLen % 2:
return sortedLst[index]
else:
return (sortedLst[index] + sortedLst[index + 1]) / 2.0
def median(self):
"""Returns a list of column medians for all numeric columns in a dataframe. The function appends median items computed from the median_from_list function and forms a list of medians"""
self_float = self.num_cols()
median_lst = []
for i in range(len(self_float[1])):
median_lst.append(self.median_from_list(self_float[:, i]))
median_lst = [int(i) if i == int(i) else float(i) for i in median_lst]
return median_lst
def sum(self):
"""Returns a list of column summation for all numeric columns in a dataframe"""
self_float = self.num_cols()
summed = []
for i in range(len(self_float[1])):
summed.append(self_float[:, i].sum())
summed = [int(i) if i == int(i) else float(i) for i in summed]
return summed
def visualize(self, col1, col2):
"""To return a plot, graphically showing relationship between 2 numerical columns."""
lst = list(np.concatenate((col1, col2)))
for i in lst:
try:
float(i)
except:
return print('Please enter numerical columns only.')
plt.plot(col1, col2)
plt.plot(col1, col2, 'o')
plt.show() | true |
af4450f5aec4c82c560c2b9ed227c0169022bb97 | Python | XiomRB/tytus | /parser/team08/Tytus_SQLPARSER_G8/Instrucciones/Sql_select/Select.py | UTF-8 | 888 | 3.109375 | 3 | [
"MIT"
] | permissive | from Instrucciones.TablaSimbolos.Instruccion import Instruccion
class Select(Instruccion):
#dist tipo lcol lcol linners where lrows
def __init__(self, dist, tipo, lcol, lcol2, linners, where, lrows, linea, columna):
Instruccion.__init__(self,tipo,linea,columna)
self.dist = dist
self.lcol = lcol
self.lcol2 = lcol2
self.linners = linners
self.where = where
self.lrows = lrows
def ejecutar(self, tabla, arbol):
super().ejecutar(tabla,arbol)
if(self.lcol == "*"):
#vamos a mostrar todos
#haremos un for
val = ""
val = self.lcol2.devolverTabla(tabla,arbol)
else:
#vamos a mostrar por columna
print("mostrar por columna")
'''
instruccion = Select("hola mundo",None, 1,2)
instruccion.ejecutar(None,None)
''' | true |
76ecd1078f356e9a1a19dc61ddb32f36625e7426 | Python | qdzzyb2014/LeetCode | /algorithms/InvertBinaryTree/InvertBinaryTree.py | UTF-8 | 878 | 3.703125 | 4 | [] | no_license | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
# @param {TreeNode} root
# @return {TreeNode}
def invertTree(self, root):
if not root:
return
root.right, root.left = self.invertTree(root.left), self.invertTree(root.right)
return root
# DFS
def invertTree2(self, root):
stack = [root]
while stack:
node = stack.pop()
if node:
node.left, node.right = node.right, node.left
stack.extend([node.left, node.right])
def invertTree3(self, root):
queue = [root]
while queue:
node = queue.pop(0)
if node:
node.left, node.right = node.right, node.left
queue.append(node.left)
queue.append(node.right)
return root | true |
6446af2698056b817808758011829646ab01c541 | Python | Sirkirill/facetracker-backend | /facein_api/common/permissions/permissions.py | UTF-8 | 2,513 | 2.515625 | 3 | [
"MIT"
] | permissive | from rest_framework.permissions import BasePermission
from profiles.models import User
class IsSuperUser(BasePermission):
"""
Allows access only for superusers.
"""
message = 'User is not a superuser.'
def has_permission(self, request, view):
if not request.user or not request.user.is_authenticated:
return False
return request.user.is_superuser
def has_object_permission(self, request, view, obj):
return self.has_permission(request, view)
class IsAdmin(BasePermission):
"""
Allows access only for admins.
Object permission checks that object is from the same company where user is an admin.
"""
message = 'User is not an admin of the company.'
def has_permission(self, request, view):
if not request.user or not request.user.is_authenticated:
return False
return request.user.is_admin
def has_object_permission(self, request, view, obj):
if not self.has_permission(request, view):
return False
if isinstance(obj, User):
return obj.company_id == request.user.company_id
return False
class IsSecurityGuide(BasePermission):
"""
Allows access only for securities.
Object permission checks that object is from the same company where user is an security.
"""
message = 'User is not a security guide.'
def has_permission(self, request, view):
if not request.user or not request.user.is_authenticated:
return False
return request.user.is_security
def has_object_permission(self, request, view, obj):
if not self.has_permission(request, view):
return False
if isinstance(obj, User):
return obj.company_id == request.user.company_id
return False
class IsSameCompany(BasePermission):
"""
Allow access only for users from the same company.
"""
message = 'User is not able to access data about other companies.'
def has_object_permission(self, request, view, obj):
if not request.user or not request.user.is_authenticated:
return False
if isinstance(obj, User):
return request.user.company_id == obj.company_id
return False
class IsOwner(BasePermission):
"""
Allow access only for owner of resource.
"""
def has_object_permission(self, request, view, obj):
if isinstance(obj, User):
return request.user == obj
return False
| true |
85cedc36d5dd1c9b091dbb507a5787b1434f0fd2 | Python | garvitkhurana/Beautiful_soup_web_scraping | /web_scraping_mining.py | UTF-8 | 822 | 3.296875 | 3 | [] | no_license | import nltk
from bs4 import BeautifulSoup
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
import string
import requests
punc=[]
url=input("Enter the website: ")
r=requests.get(url)
soup=BeautifulSoup(r.text)
text = soup.get_text(strip=True)
tokens=[t for t in text.split() ]
stop_words=stopwords.words('english')
s=input("Enter stop words: ")
new_stop_words=s.split()
new_stop_words=stop_words+new_stop_words
for i in range(0,len(string.punctuation)):
punc.append(string.punctuation[i])
stop_words_with_punctuation=new_stop_words+punc
clean_tokens=[t for t in tokens if t not in stop_words_with_punctuation]
key=input("Enter the word to be found: ")
if(key not in clean_tokens):
print("Word not found\n")
else:
print("The word {} is found at {}".format(key,clean_tokens.index(key)))
| true |
5afee5cb37a4b7f6a5c13afaa1b08c45881127ae | Python | kojo-gene/python-tutorials | /lecture45/lecture45prac.py | UTF-8 | 329 | 3.984375 | 4 | [] | no_license | from random import randint
rand = randint(0,5)
print(rand)
userNum = input("Enter an integer")
try:
if rand > int(userNum):
print(rand)
elif int(userNum) > rand:
print(int(userNum))
else:
print("The number are the same")
except:
print("Please run the program again and enter an integer") | true |
ab3f0f0ff62d8bd50d79e0ffcde2beca51e477e9 | Python | alyslma/HackerRank | /Python/Strings/SwapCase.py | UTF-8 | 841 | 4.46875 | 4 | [] | no_license | # https://www.hackerrank.com/challenges/swap-case/problem
# You are given a string and your task is to swap cases. In other words, convert all lowercase letters to uppercase letters and vice versa.
# Examples: Www.HackerRank.com → wWW.hACKERrANK.COM || Pythonist 2 → pYTHONIST 2
################################################################
# Using the swapcase() function
def swap_case(s):
return s.swapcase()
if __name__ == '__main__':
s = input()
result = swap_case(s)
print(result)
# Not using the swapcase() function
def swap_case(s):
result = ""
for letter in s:
if letter == letter.upper():
result += letter.lower()
else:
result += letter.upper()
return result
if __name__ == '__main__':
s = input()
result = swap_case(s)
print(result)
| true |
26fcb7f42c1fb596639964d6ebfce10bab9786cc | Python | shopopalo/math | /investment/forms.py | UTF-8 | 462 | 2.515625 | 3 | [] | no_license | from django import forms
class FirstForm(forms.Form):
my_choices = (
('1', '1'),
('2', '2'),
('3', '3'),
('4', '4'),
('5', '5'),
('6', '6'),
('7', '7'),
('8', '8'),
('9', '9'),
)
number_of_companies = forms.ChoiceField(choices=my_choices, label='Кількість компаній')
number_of_rows = forms.ChoiceField(choices=my_choices, label='Кількість можливих варіантів інвестування')
| true |