blob_id stringlengths 40 40 | language stringclasses 1
value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30
values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2
values | text stringlengths 12 5.47M | download_success bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
d27dc89abd880d383c1dec450a1e623e4cb70dbf | Python | devscheffer/SenacRS-Algoritmos-Programacao-3 | /task_2/Class/date_struct.py | UTF-8 | 1,158 | 3.09375 | 3 | [] | no_license | from os import getcwd
from sys import path
cwd = getcwd()
path.append(cwd)
from Task_2.Function.auxiliar import fn_check_day, fn_check_month, fn_check_year, fn_Inicializar_Data
class cls_Date_Struct:
def __init__(self, year: int = -999, month: int = -999, day: int = -999):
self.year = year
self.month = month
self.day = day
@property
def year(self):
return self.__year
@year.setter
def year(self, year: int) -> None:
self.__year = fn_check_year(year)
@property
def month(self):
return self.__month
@month.setter
def month(self, month: int) -> None:
self.__month = fn_check_month(month)
@property
def day(self):
return self.__day
@day.setter
def day(self, day: int) -> None:
self.__day = fn_check_day(year=self.year, month=self.month, day=day)
@property
def data(self):
return self.__data
@data.setter
def data(self):
self.__data = fn_Inicializar_Data(
year=self.year, month=self.month, day=self.day)
def mtd_Inicializar_Data(self, year: int, month: int, day: int) -> int:
self.year = year
self.month = month
self.day = day
res = fn_Inicializar_Data(year=year, month=month, day=day)
return res
| true |
1381dbfe69edf0752ad8f6ecce01f4369b56e355 | Python | Autodiscovery/mycelium | /mycelium_utils/dronekit_conn.py | UTF-8 | 1,753 | 2.65625 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env python3
import time
import dronekit
class DronekitConnector:
def __init__(self,
connection_string,
connection_baudrate=921600,
timeout=30,
source_system=1,
source_component=0):
self.conn = dronekit.connect(ip=connection_string,
baud=connection_baudrate,
timeout=timeout,
source_system=source_system,
source_component=source_component)
self.mission = None
def arm(self, timeout=10):
i = 0
while not self.conn.is_armable and i < timeout:
i += 1
time.sleep(1)
self.set_mode('GUIDED')
self.conn.arm()
def disarm(self, timeout=10):
self.conn.disarm(timeout=timeout)
def set_mode(self, mode):
self.conn.mode = dronekit.VehicleMode(mode)
time.sleep(1)
return self.conn.mode.name
def get_mode(self):
return self.conn.mode.name
def reboot(self):
self.reboot()
# check if rebooted
def get_mission(self, update=False):
if update:
self.mission = self.conn.commands
self.mission.download()
self.mission.wait_ready()
return self.mission
def fetch_mission(self):
return self.get_mission(update=True)
def send_to_waypoint(self, waypoint):
if self.mission is None:
self.fetch_mission()
# check if waypoint is valid
self.mission.next(waypoint)
def get_gps(self):
gps = self.conn.location.global_frame
if gps:
return [gps.lat, gps.lon, gps.fix_type]
return [None, None, None]
def disconnect(self):
self.conn.close()
| true |
5b0744f0726480ebfe47a2e1d5a82fd63fd3e0c1 | Python | geokodzilla/gcalc | /calc.py | UTF-8 | 1,116 | 3.1875 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
import math as m
from point import Point
def roznice(d, delta, odl):
"""
funkcja obliczająca delte dla miar ortogonalnych
"""
return d * (delta/odl)
def domiary(numer, sp, ep, odcieta, rzedna):
"""
funkcja obliczająca punkt z miar ortogonalnych
"""
sp.dist(ep)
dxa = roznice(odcieta, (ep.x-sp.x), sp.d)
dya = roznice(odcieta, (ep.y-sp.y), sp.d)
dxi = roznice(m.fabs(rzedna), (ep.y-sp.y), sp.d)
dyi = roznice(m.fabs(rzedna), (ep.x-sp.x), sp.d)
if rzedna >= 0:
xN = sp.x + dxa - dxi
yN = sp.y + dya + dyi
else:
xN = sp.x + dxa + dxi
yN = sp.y + dya - dyi
return Point(str(numer), round(xN, 2), round(yN, 2))
def przeciecie(numer, sp, ep, sp2, ep2):
"""
funckja obliczająca punkt stanowiący przecięcie dwóch prostych
"""
l = (ep.y - sp.y)/(ep.x - sp.x)
m = (ep2.y - sp2.y)/(ep2.x - sp2.x)
xN = (sp2.y - sp.y + (l * sp.x) - (m * sp2.x))/(l - m)
yN = sp.y + l * (xN-sp.x)
return Point(str(numer), round(xN, 2), round(yN, 2))
| true |
b30144b4d74b5ce3129c77b69cfc3fb1710b835b | Python | dr-dos-ok/Code_Jam_Webscraper | /solutions_python/Problem_96/880.py | UTF-8 | 929 | 2.6875 | 3 | [] | no_license | #
import re
import math
f = open("B-large.in","r")
fo = open("B-large.out","w")
lines = f.readlines()
lines = [l.strip() for l in lines]
nlines = len(lines)
i = 0
T = int(lines[i])
for icase in range(T):
# set parameters
i = i + 1
l = re.split(" ",lines[i])
n = int(l[0])
s = int(l[1])
p = int(l[2])
t = [int(x) for x in l[3:]]
min_sup = max(p * 3 - 4, p)
max_sup = p * 3 - 3
good = 0
border = 0
for j in range(n):
if t[j] >= min_sup:
if t[j] > max_sup:
good = good + 1
else:
border = border + 1
ans = good + min(border, s)
print("Case #",icase+1,": ",ans,sep="",file=fo)
print("Case #",icase+1,": ",ans,sep="")
#print("Case #",icase+1,": ",nswitch,sep="",file=fo)
f.close()
fo.close()
| true |
33db9d0fc1c62b8edf3e8600e89dd753e0f0f5fe | Python | PaavoR/Rad-Racer | /testing.py | UTF-8 | 753 | 2.5625 | 3 | [] | no_license | import cv2
import numpy as np
import pyscreenshot as ImageGrab
from PIL import Image
import process_image
original_image = np.array(ImageGrab.grab(bbox=(100,100,800,600)))
processed_img = cv2.cvtColor(original_image, cv2.COLOR_BGR2GRAY)
# detects edges from the image
processed_img = cv2.Canny(processed_img, threshold1=200, threshold2=300)
# roi vertices
vertices = np.array([[120,450],[120,380],[320,380],[475,350],[675,400],[675,450],
], np.int32)
# blur the image to better finding the lines
processed_img = cv2.GaussianBlur(processed_img,(5,5),0)
# region of intrest from the whole img
processed_img = process_image.roi(processed_img, [vertices])
im = Image.fromarray(processed_img)
# im.save('test-image-roi.jpg')
| true |
0055054a06b714defc9552297a093463ec98fd15 | Python | xiezhuokai/RubiksNet | /rubiksnet/dataset/config.py | UTF-8 | 3,323 | 2.5625 | 3 | [
"MIT"
] | permissive | import os
def return_ucf101(root_path):
filename_categories = 101
root_data = os.path.join(root_path, "ucf101/rgb")
filename_imglist_train = "ucf101/label/train.txt"
filename_imglist_val = "ucf101/label/val.txt"
prefix = "img_{:05d}.jpg"
return (
filename_categories,
filename_imglist_train,
filename_imglist_val,
root_data,
prefix,
)
def return_hmdb51(root_path):
filename_categories = 51
root_data = os.path.join(root_path, "hmdb/rgb")
filename_imglist_train = "hmdb/label/train.txt"
filename_imglist_val = "hmdb/label/val.txt"
prefix = "img_{:05d}.jpg"
return (
filename_categories,
filename_imglist_train,
filename_imglist_val,
root_data,
prefix,
)
def return_somethingv1(root_path):
filename_categories = "somethingv1/label/category.txt"
root_data = os.path.join(root_path, "somethingv1/rgb")
filename_imglist_train = "somethingv1/label/train_videofolder.txt"
filename_imglist_val = "somethingv1/label/val_videofolder.txt"
prefix = "{:05d}.jpg"
return (
filename_categories,
filename_imglist_train,
filename_imglist_val,
root_data,
prefix,
)
def return_somethingv2(root_path):
filename_categories = "somethingv2/label/category.txt"
filename_imglist_train = "somethingv2/label/train_videofolder.txt"
root_data = os.path.join(root_path, "somethingv2/rgb")
filename_imglist_val = "somethingv2/label/val_videofolder.txt"
prefix = "{:06d}.jpg"
return (
filename_categories,
filename_imglist_train,
filename_imglist_val,
root_data,
prefix,
)
def return_kinetics(root_path):
filename_categories = 400
root_data = os.path.join(root_path, "kinetics/images")
filename_imglist_train = "kinetics/labels/train_videofolder.txt"
filename_imglist_val = "kinetics/labels/val_videofolder.txt"
prefix = "img_{:05d}.jpg"
return (
filename_categories,
filename_imglist_train,
filename_imglist_val,
root_data,
prefix,
)
def return_dataset(dataset, root_path):
dict_single = {
"something": return_somethingv2,
"somethingv2": return_somethingv2,
"somethingv1": return_somethingv1,
"ucf101": return_ucf101,
"hmdb": return_hmdb51,
"kinetics": return_kinetics,
}
if dataset in dict_single:
(
file_categories,
file_imglist_train,
file_imglist_val,
root_data,
prefix,
) = dict_single[dataset](root_path)
else:
raise ValueError("Unknown dataset " + dataset)
file_imglist_train = os.path.join(root_path, file_imglist_train)
file_imglist_val = os.path.join(root_path, file_imglist_val)
if isinstance(file_categories, str):
file_categories = os.path.join(root_path, file_categories)
with open(file_categories) as f:
lines = f.readlines()
categories = [item.rstrip() for item in lines]
else:
categories = [None] * file_categories
n_class = len(categories)
# print('{}: {} classes'.format(dataset, n_class))
return n_class, file_imglist_train, file_imglist_val, root_data, prefix
| true |
15ca9ed534ebe46e05b4ae79f29024fb6525123d | Python | Zer0xPoint/LeetCode | /Top Interview Questions/1.Array/Valid Sudoku.py | UTF-8 | 1,611 | 3.375 | 3 | [] | no_license | from typing import List
from collections import defaultdict
class Solution:
def isValidSudoku(self, board: List[List[str]]) -> bool:
row_set = [set() for _ in range(9)]
col_set = [set() for _ in range(9)]
sub_grid_set = [set() for _ in range(9)]
for row in range(9):
for col in range(9):
num = board[row][col]
if num != ".":
if num in row_set[row]:
return False
else:
row_set[row].add(num)
if num in col_set[col]:
return False
else:
col_set[col].add(num)
sub_grid_id = (row // 3) + (col // 3) * 3
if num in sub_grid_set[sub_grid_id]:
return False
else:
sub_grid_set[sub_grid_id].add(num)
return True
if __name__ == "__main__":
s = Solution()
sudoku = [["8", "3", ".", ".", "7", ".", ".", ".", "."],
["6", ".", ".", "1", "9", "5", ".", ".", "."],
[".", "9", "1", ".", ".", ".", ".", "6", "."],
["8", ".", ".", ".", "6", ".", ".", ".", "3"],
["4", ".", ".", "8", ".", "3", ".", ".", "1"],
["7", ".", ".", ".", "2", ".", ".", ".", "6"],
[".", "6", ".", ".", "3", ".", "2", "8", "."],
[".", ".", ".", "4", "1", "9", ".", ".", "5"],
[".", ".", ".", ".", "8", ".", ".", "7", "9"]]
print(s.isValidSudoku(sudoku))
| true |
664da15456dc398e5011ebc07d63848995a707a4 | Python | CodeKul/Python-Dec-2018-CrashCourse | /Variables.py | UTF-8 | 469 | 4.09375 | 4 | [] | no_license | # Hello World program in Python
# single line comment
"""
This is
Multiline
comment
"""
var = 10
var = "Codekul"
var = 'Codekul'
var = '''"'Codekul'"
The "Gurukul" for Coders!'''
print(var)
x = 20
print(x)
a = 10
b = 20
d = 10.20
c = a + b
CONST = 100
print(CONST)
print(c)
c = a + d
print(c)
e = False
print(e)
print(type(var))
var = 10
print(type(var))
print(type(x))
print(type(a))
print(type(b))
print(type(c))
print(type(d))
print(type(e))
| true |
8334bffe4e1a2ddeae4f57e7a241b0239e5645bb | Python | hunmin-hub/Python-Study | /0119/10798_T1012.py | UTF-8 | 269 | 2.578125 | 3 | [] | no_license | board=list()
for _ in range(0,5) :
A=list(input())
N=len(A)
for t_blank in range(15-N) :
A.append('')
board.append(A)
temp=""
for i in range(0,15) :
for j in range(0,5) :
if board[j][i]!='' :
temp+=board[j][i]
print(temp) | true |
70485f4cbed707931883d6973d60dec29a043b01 | Python | AdamPayne238/Data-Structures | /binary_search_tree/binary_search_tree.py | UTF-8 | 3,971 | 4.125 | 4 | [] | no_license | import sys
sys.path.append('../queue_and_stack')
from dll_queue import Queue
from dll_stack import Stack
# Q breadth for search.
# Stack depth for search. Top to bottom, left to right
# Binary Search Tree is a node-based binary tree data structure which has the following properties:
# The left subtree of a node contains only nodes with keys lesser than the node’s key.
# The right subtree of a node contains only nodes with keys greater than the node’s key.
# The left and right subtree each must also be a binary search tree.
# Insert Value
# If no root node, insert as root node (first in tree)
# If node being inserted is greater than root node
# Move right
# If node being inserted is less than root node
# Move left
# If node has traversed and no more nodes to compare. Insert here.
# Find Value
# If no node at root: return false
# Compare value to root
# if smaller:
# Go left. Look at node there
# If Greater or ==:
# Go right.
# Get Max
# If no right child. Return this value
# Otherwise continue right in the tree to find the largest node
# Left and right are children of node
class BinarySearchTree:
def __init__(self, value):
self.value = value
self.left = None
self.right = None
# 1. Insert the given value into the tree
# insert adds the input value to the binary search tree, adhering to the rules of the
# ordering of elements in a binary search tree.
def insert(self, value):
node = BinarySearchTree(value)
# if inserted value is > node
if value > self.value:
# if no value right
if self.right is None:
# set right to node
self.right = node
else:
# recurse insert right child value
return self.right.insert(value)
# if inserted value is < node
elif value < self.value:
# if no value left
if self.left is None:
# set left to node
self.left = node
else:
# recurse insert left child value
return self.left.insert(value)
# 2. Return True if the tree contains the value. False if it does not
def contains(self, target):
if self.value == target:
return True
elif target > self.value:
if self.right is None:
return False
else:
return self.right.contains(target)
elif target < self.value:
if self.left is None:
return False
else:
return self.left.contains(target)
# 3. Return the maximum value found in the tree
def get_max(self):
if self.right is None:
return self.value
else:
return self.right.get_max()
# 4. Call the function `cb` on the value of each node. You may use a recursive or iterative approach
# for_each performs a traversal of every node in the tree, executing the passed-in callback function on each
# tree node value. There is a myriad of ways to perform tree traversal; in this case any of them should work.
def for_each(self, cb):
pass
# DAY 2 Project -----------------------
# Print all the values in order from low to high
# Hint: Use a recursive, depth first traversal
def in_order_print(self, node):
pass
# Print the value of every node, starting with the given node,
# in an iterative breadth first traversal
def bft_print(self, node):
pass
# Print the value of every node, starting with the given node,
# in an iterative depth first traversal
def dft_print(self, node):
pass
# STRETCH Goals -------------------------
# Note: Research may be required
# Print Pre-order recursive DFT
def pre_order_dft(self, node):
pass
# Print Post-order recursive DFT
def post_order_dft(self, node):
pass
| true |
374e3cfd38d4cc1979136bf5ecf2491489578e25 | Python | Stefan4472/Duckytown-Robot | /Python/interface_test.py | UTF-8 | 517 | 2.78125 | 3 | [] | no_license | import time
from arduino_interface import ArduinoInterface, PiToArduinoPacket, ArduinoToPiPacket
def callback(arg1, arg2, arg3):
print('Got callback with args {} {} {}'.format(arg1, arg2, arg3))
a_int = ArduinoInterface('/dev/ttyACM0', 115200, timeout=0.0)
time.sleep(2)
a_int.serial_port.flushInput()
for i in range(200):
a_int.echo(i, 0, 0, callback)
a_int.process_buffer(debug=False)
#input('Press Enter\n')
#a_int.echo(10, 0, 0, callback)
while True:
a_int.process_buffer(debug=False)
| true |
4063c16cced1703ccf0b0cb6cca4d448bd0dc350 | Python | sumie-dh/RPi-thermostat | /thermostate.py | UTF-8 | 1,993 | 2.59375 | 3 | [] | no_license | #!/usr/bin/env python
import wiringpi
import subprocess
import time
import tempita
cool_temp = 35
hot_temp = 37
from subprocess import call
#wiringpi.wiringPiSetup()
wiringpi.wiringPiSetupGpio()
#wiringpi.wiringPiSetupPhys()
wiringpi.pinMode(27 , 1)
wiringpi.digitalWrite(27, 0)
tmpl=tempita.Template.from_filename ("test.mail")
state="start"
while True:
p=subprocess.Popen(["checktemp"],stdout=subprocess.PIPE)
(out,rr)=p.communicate()
if p.returncode !=0:
print 'error'
wiringpi.digitalWrite(27, 0)
if state !='error':
state="error"
mail=tmpl.substitute(subject="error",body="Error - temperature is not readable")
p=subprocess.Popen(["sendmail","yourmail@here.com"],stdin=subprocess.PIPE)
p.communicate(mail)
else:
if float(out) == 0:
print 'error'
wiringpi.digitalWrite(27, 0)
if state !='error':
state="error"
mail=tmpl.substitute(subject="error",body="Error - null" +out)
p=subprocess.Popen(["sendmail","yourmail@here.com"],stdin=subprocess.PIPE)
p.communicate(mail)
if float(out) <35 and float(out) !=0:
print 'ok'
if state !='ok':
state="ok"
mail=tmpl.substitute(subject="ok",body="Back to normal state - temperature is " +out)
p=subprocess.Popen(["sendmail","yourmail@here.com"],stdin=subprocess.PIPE)
p.communicate(mail)
wiringpi.digitalWrite(27, 1)
if float(out) >37:
print 'prehrati'
wiringpi.digitalWrite(27, 0)
if state !='prehrati':
state="prehrati"
mail=tmpl.substitute(subject="overheating",body="NOK - Overheating - temprature is " +out)
p=subprocess.Popen(["sendmail","yourmail@here.com"],stdin=subprocess.PIPE)
p.communicate(mail)
if not (float(out) >37) and not (float(out) <35):
print 'waiting'
if state !='waiting':
state="waiting"
mail=tmpl.substitute(subject="waiting",body="NOK - Near overheating - temperature is " +out)
p=subprocess.Popen(["sendmail","yourmail@here.com"],stdin=subprocess.PIPE)
p.communicate(mail)
print out
time.sleep (1)
| true |
930e8d5ffa0e8b80ff365faa2300bb84ba47c3aa | Python | ashish-bisht/Ds-and-Algo | /backtracking/permute.py | UTF-8 | 1,435 | 3.84375 | 4 | [] | no_license | # def permute(nums):
# """
# :type nums: List[int]
# :rtype: List[List[int]]
# """
# # here we need a global wise list, each time we just append to the result
# rslt = []
# count = 0
# def dfs(temp, elements, count):
# count += 1
# print(f'count is {count}')
# # gather rslt
# if len(elements) == 0:
# rslt.append(temp[:]) # still remember to use temp[:]
# for e in elements:
# temp.append(e)
# # backtrack
# next_elements = elements[:]
# next_elements.remove(e)
# dfs(temp, next_elements, count)
# temp.pop()
# dfs([], nums, count) # first is the current result
# print(rslt)
# return rslt
# nums = [1, 2, 3]
# print(permute(nums))
def get_permutation(array):
permutations = []
permutations_helper(array, [], permutations)
return permutations
def permutations_helper(array, current_permutation, permutations):
# no need to append empty permuation i.e ([])
if not len(array) and len(current_permutation):
permutations.append(current_permutation)
else:
for i in range(len(array)):
new_array = array[:i] + array[i+1:]
new_permutation = current_permutation + [array[i]]
permutations_helper(new_array, new_permutation, permutations)
array = [1, 2, 3]
print(get_permutation(array))
| true |
643c7f2e6ea273337f0ea656f4c569e25433f844 | Python | alxgnussin/sf-alx5 | /csv_to_db.py | UTF-8 | 1,086 | 2.8125 | 3 | [] | no_license | import os
import csv
import psycopg2
meals = "meals_4.csv"
cat = "categories.csv"
conn_str = os.environ.get("DATABASE_URL")
def csv_reader(file):
my_list = []
with open(file, "r") as f_obj:
reader = csv.reader(f_obj)
for row in reader:
my_list.append(",".join(row).split(';'))
return my_list
def insert_to_categories():
my_list = csv_reader(cat)
conn = psycopg2.connect(conn_str)
curs = conn.cursor()
prefix = 'INSERT INTO "p5_categories" ("title") VALUES '
for x in my_list:
query = prefix + '(%s)'
curs.execute(query, [x[1]])
conn.commit()
conn.close()
def insert_to_meals():
my_list = csv_reader(meals)
conn = psycopg2.connect(conn_str)
curs = conn.cursor()
prefix = 'INSERT INTO "p5_meals" ("title", "price", "description", "picture", "category_id") VALUES '
for x in my_list:
query = prefix + '(%s, %s, %s, %s, %s)'
curs.execute(query, [x[1], x[2], x[3], x[4], x[5]])
conn.commit()
conn.close()
insert_to_categories()
insert_to_meals()
| true |
2bd25d2283841e90dc559dd564a417fb4b45adcf | Python | fitrialif/ANN-1 | /tests/MultiLayerPerceptronTest.py | UTF-8 | 7,825 | 2.640625 | 3 | [] | no_license | import gzip
import unittest
import numpy as np
import six.moves.cPickle as pickle
from ann.Layers import InputLayer, LogisticRegressionLayer, LinearRegressionLayer, HiddenLayer, InvalidDimensionError, \
LeNetConvPoolLayer
from ann.MultiLayerPerceptron import MultiLayerPerceptron, InvalidNetworkError, InvalidDataError, \
NoDataSetFoundError, NoNumpyArrayError
def _format_data_set(data_set):
return np.asarray([data_set[0].tolist(), data_set[1].tolist()]).T
def _load_data(data_set):
with gzip.open(data_set, 'rb') as f:
_, train_set, test_set = pickle.load(f) # use validation set instead of training set to speed up test time
return _format_data_set(train_set), _format_data_set(test_set)
class MultiLayerPerceptronTest(unittest.TestCase):
def test_invalid_network_specification(self):
# Given
network_specification = [InputLayer([2]), LinearRegressionLayer(2)]
# Then
self.assertRaises(InvalidNetworkError, MultiLayerPerceptron,
seed=1234,
network_specification=network_specification)
def test_invalid_data_set_format(self):
# Given
training_set = [[[1]]]
multilayer_perceptron_regressor = MultiLayerPerceptron(seed=1234,
network_specification=[InputLayer([1]), HiddenLayer(2),
LinearRegressionLayer(1)])
# Then
self.assertRaises(InvalidDataError, multilayer_perceptron_regressor.train, training_set)
def test_missing_data_set(self):
# Given
training_set = []
multilayer_perceptron_regressor = MultiLayerPerceptron(seed=1234,
network_specification=[InputLayer([1]), HiddenLayer(2),
LinearRegressionLayer(1)])
# Then
self.assertRaises(NoDataSetFoundError, multilayer_perceptron_regressor.train, training_set)
def test_no_numpy_array(self):
# Given
training_set = [[[1, 2, 3, 4], [1, 2]]]
multilayer_perceptron_regressor = MultiLayerPerceptron(seed=1234,
network_specification=[InputLayer([4]), HiddenLayer(2),
LinearRegressionLayer(2)])
# Then
self.assertRaises(NoNumpyArrayError, multilayer_perceptron_regressor.train, training_set)
def test_invalid_input_size(self):
# Given
training_set = np.array([[[1, 1], [2]]])
network_specification = [InputLayer([3]), HiddenLayer(2), LinearRegressionLayer(1)]
multilayer_perceptron_regressor = MultiLayerPerceptron(seed=1234,
network_specification=network_specification)
# Then
self.assertRaises(InvalidDimensionError, multilayer_perceptron_regressor.train, training_set)
def test_invalid_output_size_regressor(self):
# Given
training_set = np.array([[[1, 1], [2]]])
network_specification = [InputLayer([2]), HiddenLayer(2), LinearRegressionLayer(2)]
multilayer_perceptron_regressor = MultiLayerPerceptron(seed=1234,
network_specification=network_specification)
# Then
self.assertRaises(InvalidDimensionError, multilayer_perceptron_regressor.train, training_set)
def test_invalid_output_size_classifier(self):
# Given
training_set = np.array([[[1, 1], [2]]])
network_specification = [InputLayer([2]), HiddenLayer(2), LinearRegressionLayer(2)]
multilayer_perceptron_classifier = MultiLayerPerceptron(seed=1234,
network_specification=network_specification)
# Then
self.assertRaises(InvalidDimensionError, multilayer_perceptron_classifier.train, training_set)
def test_network_initialization(self):
# Given
network_specification = [InputLayer([4]), HiddenLayer(3), LinearRegressionLayer(2)]
# When
multilayer_perceptron_regressor = MultiLayerPerceptron(seed=1234,
network_specification=network_specification)
# Then
self.assertEqual(1, len(multilayer_perceptron_regressor._network_specification[1:-1]))
self.assertEqual(3, multilayer_perceptron_regressor._network_specification[1].size)
self.assertEqual(2, multilayer_perceptron_regressor._output_layer.size)
def test_XOR_problem_regression(self):
# Given
network_specification = [InputLayer([2]), HiddenLayer(2), LinearRegressionLayer(1)]
training_set = np.asarray([[[0.0, 0.0], [0.0]],
[[0.0, 1.0], [1.0]],
[[1.0, 0.0], [1.0]],
[[1.0, 1.0], [0.0]]
])
multilayer_perceptron_regressor = MultiLayerPerceptron(seed=1234,
network_specification=network_specification)
# When
multilayer_perceptron_regressor.train(training_set, iterations=1000, learning_rate=0.1)
# Then
self.assertTrue(multilayer_perceptron_regressor.predict([[0, 0]])[0] < 0.0001)
self.assertTrue(multilayer_perceptron_regressor.predict([[0, 1]])[0] > 0.9999)
self.assertTrue(multilayer_perceptron_regressor.predict([[1, 0]])[0] > 0.9999)
self.assertTrue(multilayer_perceptron_regressor.predict([[1, 1]])[0] < 0.0001)
self.assertTrue(multilayer_perceptron_regressor.test(training_set) < 0.0001)
def test_XOR_problem_classification(self):
# Given
network_specification = [InputLayer([2]), HiddenLayer(4), LogisticRegressionLayer(2)]
training_set = np.asarray([[[0.0, 0.0], 0],
[[0.0, 1.0], 1],
[[1.0, 0.0], 1],
[[1.0, 1.0], 0]
])
multilayer_perceptron_classifier = MultiLayerPerceptron(seed=1234,
network_specification=network_specification)
# When
multilayer_perceptron_classifier.train(training_set, iterations=100, learning_rate=0.1)
# Then
self.assertEqual(0, multilayer_perceptron_classifier.predict([[0.0, 0.0]]))
self.assertEqual(1, multilayer_perceptron_classifier.predict([[0.0, 1.0]]))
self.assertEqual(1, multilayer_perceptron_classifier.predict([[1.0, 0.0]]))
self.assertEqual(0, multilayer_perceptron_classifier.predict([[0.0, 0.0]]))
self.assertTrue(multilayer_perceptron_classifier.test(training_set) == 0)
def test_mnist_classifier(self):
# Given
training_set, test_set = _load_data('../data/mnist.pkl.gz')
network_specification = [InputLayer([28, 28]),
LeNetConvPoolLayer(feature_map=2, filter_shape=(5, 5), pool_size=(2, 2)),
HiddenLayer(50),
LogisticRegressionLayer(10)]
neural_network = MultiLayerPerceptron(seed=1234, network_specification=network_specification)
# When
neural_network.train(training_set=training_set, learning_rate=0.1, batch_size=500, iterations=1)
# Then
self.assertEqual(28.18, round(neural_network.test(test_set=test_set, batch_size=1000), 2))
| true |
65427f39a2e0da47ce4d0263db114fd02245684f | Python | rpmurph/misc-projects | /min-cost-path.py | UTF-8 | 1,133 | 3.359375 | 3 | [] | no_license |
#
arr = [ 131, 673, 234, 103, 18, 201, 96, 342, 965, 150, 630, 803,
746, 422, 111, 537, 699, 497, 121, 956, 805, 732, 524, 37, 331 ]
#
rows = 5
columns = 5
length = len(arr)
l = range(length)
#
firstColumn = l[5::rows]
firstRow = range(1, columns)
#
for idx, val in enumerate(arr):
# Leaves first value the same
# Targets first list value
if idx == 0:
val == val
if idx in firstRow: # Targets values in first row after first value
arr[idx] = arr[idx] + arr[idx-1] # Adds all values in first row to the previous list value
# Targets all values in first column
# adds value to the value above in previous row
if idx in firstColumn:
arr[idx] = arr[idx] + arr[idx-rows]
if idx not in firstColumn and idx not in firstRow: # targes any other values not in first row or first column
if arr[idx] + arr[idx-1] < arr[idx] + arr[idx-rows]: # determine if value above or previous is smaller
arr[idx] = arr[idx] + arr[idx-1] # sets value
else:
arr[idx] = arr[idx] + arr[idx-rows] # sets value
print arr # returns list | true |
9174ced1a0d6f97dcee91b116e19f1bdf6bb1486 | Python | thulio/watchlogs | /tests/entities/test_log_stream.py | UTF-8 | 597 | 2.8125 | 3 | [
"MIT"
] | permissive | import unittest
from server.entities.log_stream import LogStream
class TestLogstream(unittest.TestCase):
def test_from_dict(self):
stream_dict = {'logStreamName': 'stream-name'}
log_stream = LogStream.from_dict('some-group', stream_dict)
self.assertEqual(log_stream.name, 'stream-name')
def test_eq(self):
stream_1 = LogStream('some-group', 'name')
stream_2 = LogStream('some-group', 'name')
stream_3 = LogStream('some-other-group', 'name')
self.assertEqual(stream_1, stream_2)
self.assertNotEqual(stream_1, stream_3)
| true |
545c2a42a288713e518a6cca6110d9248fa65ab2 | Python | apurba420/sea-level-predictor | /SLP One.py | UTF-8 | 1,110 | 3.234375 | 3 | [] | no_license | import pandas as pd
import matplotlib.pyplot as plt
from scipy.stats import linregress
import numpy as np
import seaborn as sns
df = pd.read_csv('sea-level.csv')
x = df['Year']
y = df['CSIRO Adjusted Sea Level']
plt.figure(figsize=(14,9))
plt.scatter(x, y)
plt.xlabel('Year')
plt.ylabel('CSIRO Adjusted Sea Level')
plt.title('Year Vs. CSIRO ASL')
plt.tight_layout()
line1 = linregress(x, y)
slope, intercept, r_value, p_value, std_err = line1
years_extended = x.append(pd.Series(range(2014, 2050)), ignore_index=True)
plt.plot(years_extended, years_extended*slope + intercept, color="blue")
plt.xlabel('Year')
plt.ylabel('CSIRO Adjusted Sea Level')
year_above_2000 = df.loc[df['Year']>=2000,'Year']
CSIRO_above_2000 = df.loc[df['Year']>=2000,'CSIRO Adjusted Sea Level']
line2 = linregress(year_above_2000,CSIRO_above_2000)
slope2, intercept2, r_value2, p_value2, std_err2 = line2
year_after_2000 = years_extended[years_extended>=2000]
plt.plot(year_after_2000,year_after_2000*slope2 + intercept2, color='red')
plt.xlabel('Year')
plt.ylabel('CSIRO Adjusted Sea Level')
plt.savefig('results.png')
| true |
3fc977c48ea583affdb21373a7ff84b97dc8f392 | Python | sunnycd/HackerRank-Python | /Sets/strictSuperSet.py | UTF-8 | 383 | 3.296875 | 3 | [] | no_license | #add all the sets to one big list and check for strict super set element by element
bigList = list()
result = True
a = set(map(int, raw_input().split()))
numberOfSetsToCheck = int(raw_input())
for x in range(numberOfSetsToCheck):
b = set(map(int, raw_input().split()))
bigList.append(b)
for item in bigList:
if not a.issuperset(item):
result = False
print result
| true |
7528cc436d07b28b49e5b699f3371a1bd28eed82 | Python | Naamu/ml-learning | /reference/understanding-ml-code/ch25-군집 분석/cluster_basics.py | UTF-8 | 5,702 | 2.953125 | 3 | [] | no_license | #
# 프로그램 이름: cluster_basics.py
# 작성자: Bong Ju Kang
# 설명: 군집 분석 이해하기
#
# 필요한 패키지
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import cm
from numpy.random import RandomState
from sklearn.cluster import KMeans
from sklearn.datasets.samples_generator import make_blobs
from sklearn.metrics import silhouette_score, silhouette_samples
# 초기 설정
png_path = "./data/png"
os.makedirs(png_path, exist_ok=True)
# 한글출력
plt.rcParams['font.family'] = 'Malgun Gothic'
plt.rcParams['axes.unicode_minus'] = False
#
# 격차 통계량 계산 예제
#
# 데이터 구성
X, y = make_blobs(n_samples=1000, n_features=2, centers=10, random_state=7)
X.shape
# (1000, 2)
# 데이터 이해
plt.figure(figsize=(7,6))
plt.scatter(X[:, 0], X[:,1], c=y, s=9)
plt.title("산점도와 군집(10개) 분포")
plt.xlabel(r"$x_1$")
plt.ylabel(r"$x_2$")
plt.colorbar()
plt.show()
plt.savefig(png_path+'/cluster_blob_scatter.png')
#
# 격차 통계량 계산
#
# 계산을 위한 초 모수 정의
# 최대 군집의 개수
max_clusters = 20
# 참조 분포의 개수
num_ref_dists = 10
# 참조 분포의 차원: (샘플 개수, 특징 개수)
num_features = 2
B = 100
num_ref_data_shape = (B, num_features)
# 격차 통계량 자리 지킴이(placeholder)
gap_stat = np.zeros(shape=(max_clusters,))
# 각 군집의 개수에 대하여
for index, clusters in enumerate(np.arange(1, max_clusters+1)):
# 참조 분포의 wcss 자리 확보
ref_wcss = np.zeros(num_ref_dists)
# 각 참조 분포에 대하여
for j in np.arange(num_ref_dists):
# 참조 분포의 생성 (b-a)*uniform() + a: 유계 상자
random_dist = (np.max(X, axis=0) - np.min(X, axis=0)) * \
RandomState(j).random_sample(num_ref_data_shape) + \
np.min(X, axis=0).reshape(1, 2)
# 적합
km = KMeans(clusters)
km.fit(random_dist)
# WCSS
ref_wcss[j] = km.inertia_
# 원 데이터 적합
km = KMeans(clusters)
km.fit(X)
# 원 데이터 WCSS
wcss = km.inertia_
# 격차 통계량 계산
gap_stat[index] = np.mean(np.log(ref_wcss)) - np.log(wcss)
print(gap_stat)
# [-2.19660846 -1.9590175 -1.90628713 -1.95859784 -1.72469402 -1.33368902
# -1.16974462 -0.94765692 -0.94574371 -0.9539331 -1.036548 -1.10812855
# -1.18153949 -1.21712557 -1.27528754 -1.33497447 -1.34521287 -1.36448381
# -1.42248713 -1.41095365]
# 격차 통계량 그래프
plt.figure(figsize=(7, 7))
plt.plot(np.arange(max_clusters), gap_stat)
plt.xticks(np.arange(max_clusters),np.arange(1, max_clusters+1) )
plt.grid()
plt.title('군집개수에 따른 격차통계량의 값')
plt.xlabel('군집 개수')
plt.ylabel('격차통계량 값')
plt.show()
plt.savefig(png_path+'/cluster_blob_gap.png')
# 원 데이터와 참조 분포 그래프
random_dist = (np.max(X, axis=0) - np.min(X, axis=0)) * \
RandomState(0).random_sample(num_ref_data_shape) + \
np.min(X, axis=0).reshape(1, 2)
plt.figure(figsize=(7,6))
plt.scatter(X[:, 0], X[:,1], s=9, label='데이터')
plt.scatter(random_dist[:, 0], random_dist[:,1], c='orange', s=7, label='무작위분포')
plt.title('원 데이터와 참조 분포 데이터')
plt.xlabel(r"$x_1$")
plt.ylabel(r"$x_2$")
plt.legend()
plt.show()
plt.savefig(png_path+'/cluster_blob_with_random_scatter.png')
#
# 실루엣 통계량 계산
#
# 추가 필요한 패키지
from sklearn.metrics import silhouette_score
# 실루엣 결과 자리 지킴이
sil_avg = np.zeros(shape=(max_clusters-1,))
# 각 군집에 대하여
for index, clusters in enumerate(np.arange(2, max_clusters+1)):
km = KMeans(clusters)
km.fit(X)
cluster_label = km.predict(X=X)
sil_avg[index] = silhouette_score(X, cluster_label)
print(sil_avg)
# [0.50187747 0.53630729 0.56448052 0.57390226 0.64018763 0.64664969
# 0.68289145 0.6187224 0.56237004 0.52021199 0.51151902 0.50893581
# 0.41552878 0.4317837 0.41651814 0.37374767 0.3740731 0.35672327
# 0.34760964]
# 군집 개수에 따른 실루엣 평균값 그래프
plt.figure(figsize=(7, 7))
plt.plot(np.arange(max_clusters-1), sil_avg)
plt.xticks(np.arange(max_clusters-1), np.arange(2, max_clusters+1) )
plt.grid()
plt.title('군집 개수에 따른 실루엣 평균값')
plt.xlabel('군집 개수')
plt.ylabel('실루엣 평균값')
plt.show()
plt.savefig(png_path+'/cluster_blob_silhouette.png')
# 최적 군집에 대한 평가: 각 군집별 실루엣 계수값 비교
opt_clusters = 8
km = KMeans(opt_clusters)
km.fit(X)
cluster_label = km.predict(X=X)
silhouette_samples(X, cluster_label)
# 최적 산점도
colors = cm.nipy_spectral(cluster_label.astype(float) / max_clusters)
fig = plt.figure(figsize=(6,5))
ax = fig.add_subplot(1,1,1)
ax.scatter(X[:, 0], X[:, 1], marker='.', s=30, lw=0, alpha=0.7, c=colors, edgecolor='k')
plt.xlabel(r"$x_1$")
plt.ylabel(r"$x_2$")
plt.show()
plt.savefig(png_path+'/cluster_optimal_scatter.png')
# 실루엣 대 산점도
colors = cm.nipy_spectral(cluster_label.astype(float) / max_clusters)
fig = plt.figure(figsize=(12,5))
ax1 = fig.add_subplot(1, 2, 1)
ax2 = fig.add_subplot(1, 2, 2)
ax1 = ax1.scatter(X[:, 0], X[:, 1], marker='.', s=30, lw=0, alpha=0.7, c=colors, edgecolor='k')
for i in np.arange(2, opt_clusters+1):
ith_sil_score = np.sort(silhouette_samples(X, cluster_label)[cluster_label==i])[::-1]
pcolor = cm.nipy_spectral(np.float(i) / max_clusters)
ax2 = plt.plot(ith_sil_score, c=pcolor, label=np.str(i))
ax2 = plt.xlabel('군집 내 표본 번호')
ax2 = plt.ylabel('군집 별 실루엣 계수값')
ax2 = plt.legend()
| true |
58ccfefb906910aeb1806d202c3d6b4a4c661235 | Python | GILJC/MyProject | /Python/study/PythonWorkspace/exercise_day2_turtle.py | UTF-8 | 1,263 | 4.1875 | 4 | [] | no_license | import turtle
t= turtle.Turtle() # -> Turtle() 객체를 생성
# turtle. 요 점이 터틀 사용한다는 표시. 터틀 안의 터틀 객체 생성.
t.shape('turtle') # -> shape를 터틀로 지정
# turtle대신 arrow 등으로 다른 모양으로 바꿀 수 있다.
t.forward(100) # -> t 객체를 100만큼 전진시키겠다
t.left(90) # -> 왼쪽방향으로 90도 회전(왼쪽방향 바라봄)
t.forward(50) # -> 그 방향으로 50만큼 이동
t.forward(50)
t.left(90)
t.forward(100)
t.left(90)
t.forward(100)
# turtle로 사각형 그리기
t.right(30)
t.forward(50)
t.right(60)
t.forward(50)
t.right(60)
t.forward(50)
t.right(60)
t.forward(50)
t.right(60)
t.forward(50)
t.right(60)
t.forward(50)
# turtle로 50짜리 정육각형 그리기
t.left(0)
t.forward(100)
t.left(120)
t.forward(100)
t.left(120)
t.forward(100)
# turtle로 100짜리 삼각형 그리기
# turtle로 피자 그리기 (원)
"""
import turtle
t = turtle.Turtle()
t.shape("turtle")
"""
t.shape('arrow')
radius = 100
t.circle(radius) # 반지름이 100인 원을 그린다
radius = 200
t.circle(radius) # 반지름이 200인 원을 그린다
| true |
807923abe125e25db38116cac1c8c31df04d4171 | Python | alehpineda/IntroAPython-Practicas | /U2 - Cadenas y consola/U2T2 - Metodos en las cadenas/U2T2P1.py | UTF-8 | 896 | 4.03125 | 4 | [] | no_license | # -*- coding: utf-8 -*-
"""
U2T2 - Metodos en las cadenas
Modificar las cadenas es util. Pero hacerlo a mano es una ...(inserte
palabra altisonante preferida). Sin embargo, Python trae de fabrica varios
metodos para hacer eso automaticamente.
"""
"""
Ej 01 - Cuatro metodos a la locura.
Ahora en los siguientes ejercicios hablaremos acerca de algunos metodos
utiles para manipular las cadenas.
Explicaremos mas a fondo los metodos en siguientes lecciones. Ahorita,
nos quedamos con que los metodos son piezas de codigo preprogramadas que
hacen tareas pre-establecidas.
Hablaremos de cuatro metodos en esta seccion:
1. len()
2. lower()
3. upper()
4. str()
Iniciaremos con len(), este metodo nos da el tamaño de una cadena.
"""
#Crea una variable 'perico' y asignale el valor 'Verde Militar'
#Ahora escribe len(perico) despues de print. Esto nos dara el numero de
# letras
| true |
10697dee6832ae4e0df8f44f12a8fef275cb2c38 | Python | KULDEEPMALIKM41/Practices | /Python/Python Basics/46.greater4.py | UTF-8 | 367 | 4.0625 | 4 | [] | no_license | #greater no. in fore values by if else.
a=int(input('Enter no. a \t'))
b=int(input('Enter no. b \t'))
c=int(input('Enter no. c \t'))
d=int(input('Enter no. d \t'))
if a>b and a>c and a>d:
print('Greater No. is a')
if b>a and b>c and b>d:
print('Greater No. is b')
if c>a and c>b and c>d:
print('Greater No. is c')
if d>a and d>b and d>c:
print('Greater No. is d') | true |
3c6ba885ad8eaf3a8e70617d161d010d2f2121d5 | Python | dxc13762525628/concurrent | /desiger_model/action_model/observer_model.py | UTF-8 | 1,929 | 3.546875 | 4 | [] | no_license | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# @Time : 2021/7/7 14:46
# @Author : dxc
# @File : observer_model.py
"""
观察者模式
定义对象间的一种一对多的依赖关系,当一个对象的状态发生改变时,所有依赖它的对象都得到通知并自动更新 观察者又叫发布订阅模式
"""
from abc import ABCMeta, abstractmethod
class Observer(metaclass=ABCMeta):
"""
观察者 接口
"""
@abstractmethod
def update(self, notice): # notice 是消息对象
pass
class Notice:
"""
发布者 抽象
"""
def __init__(self):
# 存储所有的观察者
self.observer = []
def attach(self, objs):
"""
订阅者
:param objs:
:return:
"""
self.observer.append(objs)
def detach(self, objs):
"""
解除绑定
:param obj:
:return:
"""
self.observer.remove(objs)
def notify(self):
"""
通知
:return:
"""
for obj in self.observer:
obj.update(self)
class StaffNotice(Notice):
"""
具体发布者
"""
def __init__(self, company_info=None):
super().__init__()
self.__company_info = company_info
@property
def company_info(self):
return self.__company_info
@company_info.setter
def company_info(self, info):
self.__company_info = info
self.notify()
class Staff(Observer):
"""
接收者
"""
def __init__(self):
self.company_info = None
def update(self, notice):
self.company_info = notice.company_info
if __name__ == '__main__':
notice = StaffNotice("初始公司信息")
s1 = Staff()
s2 = Staff()
notice.attach(s1)
notice.attach(s2)
notice.company_info = "今年业绩好 发奖金"
print(s1.company_info)
print(s2.company_info)
| true |
de9c17488fd2f879db721cdeb924d065251a2d80 | Python | Vlad-Radz/distributed_pong | /game/server.py | UTF-8 | 4,305 | 2.890625 | 3 | [] | no_license | import socket
import uuid
from threading import Thread
import queue
import pickle
import subprocess
# TODO: needs to be run through isort for the right import sorting
# TODO: use structlog for better logging
import pika
from game.player_config import PlayerConfig
class Orchestrator:
def __init__(self, host: str, port: int, my_queue: queue.Queue):
self.host = host
self.port = port
self.config_players_queue = my_queue
self.connected_players_queue = queue.Queue()
# AF_INET & AF_INET6: address (and protocol) families
# SOCK_STREAM means that it is a TCP socket.
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.bind((self.host, self.port)) # Bind the socket to address
# Enable a server to accept connections
# number of unaccepted connections that the system will allow before refusing new connections
self.socket.listen(expected_players)
print("Waiting for a connection")
def orchestrate(self):
# Set up a queue in message broker for future communication between players
# TODO: abstraction from message broker needed; probably using abstract base classes and / or facade pattern
connection = pika.BlockingConnection(
pika.ConnectionParameters(host=self.host))
channel = connection.channel()
channel.exchange_declare(exchange='moves', exchange_type='direct')
sockets = []
while True:
# TODO: maybe the State pattern would be good here (using Enum)?
# `conn` is a new socket object usable to send and receive data on the connection
# `address` is the address bound to the socket on the other end of the connection.
conn, addr = self.socket.accept()
sockets.append(conn)
print(conn, type(conn))
print("Connected to: ", addr)
# Start a new thread and return its identifier.
# The thread executes the function function with the argument list args (which must be a tuple).
# TODO: asyncio implementation might be better
thread = Thread(target=self.handle_connection, args=(conn,))
thread.start()
thread.join()
if self.connected_players_queue.qsize() == expected_players:
print("Required number of players were connected.")
for conn in sockets:
conn.sendall(pickle.dumps(list(self.connected_players_queue.queue)))
print("Connection Closed")
conn.close()
def handle_connection(self, conn: socket.socket):
config_player = self.config_players_queue.get_nowait()
# TODO: pickle is not the best tool, since can be used only for Python and has security issues.
conn.send(pickle.dumps(config_player))
self.config_players_queue.task_done()
self.connected_players_queue.put_nowait(config_player)
# TODO: refactor methods, better division of responsibilities
# TODO: idea with second queue is not perfect - any other structure with shared data? look into asyncio
# TODO: implement max possible number of players
if __name__ == "__main__":
# TODO: to env vars
host = subprocess.check_output("hostname -I", shell=True).decode("utf-8").split(" ")[0]
print(host)
# host = "192.168.178.47"
port = 5555
expected_players = 2
player_left = PlayerConfig(
uuid=uuid.uuid4(),
side='left',
coord_x=20,
coord_y=200,
eligible_to_start=True)
player_right = PlayerConfig(
uuid=uuid.uuid4(),
side='right',
coord_x=670,
coord_y=200,
eligible_to_start=False) # TODO: the choice of eligible to start should happen automatically and exclude possibility of double assignment
# Not needed now, because not implemented yet
player_up = ...
player_down = ...
# I use FIFO queue since order is important for this game (from my perspective)
players_queue = queue.Queue(maxsize=expected_players)
players_queue.put(player_left)
players_queue.put(player_right)
server = Orchestrator(host=host, port=port, my_queue=players_queue)
server.orchestrate()
| true |
db66ee01e001c1b42e02cff4b55c3da28609443e | Python | sangyeel/zzStock | /File/GetCompanyTodayStock.py | UTF-8 | 1,165 | 2.609375 | 3 | [] | no_license | import pandas
import requests
import io
import bs4
import re
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE","ssStock.settings")
import django
django.setup()
class GetCompanyTodayStockData:
__URL = 'https://finance.naver.com/item/sise_day.nhn?code='
__HEADER = { 'User-Agent' : ('Mozilla/5.0 (Windows NT 10.0;Win64; x64)\AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98\Safari/537.36') }
def __init__(self,stockCode):
self.stockCode = stockCode
self.todayStockPrice = self.get_today_stock_data()
def get_today_stock_data(self):
targetUrl = GetCompanyTodayStockData.__URL + self.stockCode + '&page=1'
res = requests.get(targetUrl,headers = GetCompanyTodayStockData.__HEADER)
tempDataFrameList = pandas.read_html(res.text)
tempDataFrame = tempDataFrameList[0] #Naver stock has 2table, one is stock data other one is pagenation table
tempDataFrame = tempDataFrame.dropna() #drop 'na' data
return tempDataFrame.loc[1,"종가"]
if __name__ == '__main__':
TodayStock = GetCompanyTodayStockData('005930')
print(TodayStock.todayStockPrice) | true |
75bb272b96b64a45ec6eb734961964805a304727 | Python | zibrahim/MIMICTimeSeriesProcessing | /PneumoniaPythonDataProcessing/3-AggregateTimeSeries4Hours.py | UTF-8 | 3,372 | 2.921875 | 3 | [] | no_license | import pandas as pd
import os
from Processing.Dictionaries import aggregation, outcomes, id
from Processing.CleanTimeSeries import remove_alpha, remove_nacolumns
def main():
time_series = pd.read_csv("Data/TimeSeriesUpto0.csv")
time_series = remove_alpha(time_series)
time_series = remove_nacolumns(time_series)
patient_ids = time_series['PatientID'].unique()
#2. Create a new column, call it FourHourIndex
time_series['FourHourIndex'] = -1
new_time_series = pd.DataFrame(columns=time_series.columns)
#3. Create a new column that aggregates every 4 hours into 1
for p in patient_ids:
patient_slice = time_series.loc[time_series.PatientID ==p,]
patient_slice.reset_index()
lower_limit = 0
upper_limit = 3
flag = False
while (flag == False):
if upper_limit >= len(patient_slice.index) :
flag = True
patient_slice.iloc[lower_limit:len(patient_slice.index),patient_slice.columns.get_loc('FourHourIndex')] = lower_limit
else:
patient_slice.iloc[lower_limit:upper_limit+1,patient_slice.columns.get_loc('FourHourIndex')] = lower_limit
lower_limit = lower_limit + 4
upper_limit = upper_limit + 4
cvo2 = patient_slice['CentralvenousO2Saturation']
creactiveprot = patient_slice['Creactiveprotein']
if cvo2.isnull().values.all() or creactiveprot.isnull().values.all():
print(" paitent", p, "has all nan CentralvenousO2Saturation")
else:
new_time_series = new_time_series.append(patient_slice, ignore_index=True)
new_time_series.to_csv("Data/new_time_series_0.csv", index=False)
new_time_series = pd.read_csv("Data/new_time_series_0.csv")
os.remove("Data/new_time_series_0.csv")
int_columns = [ "Day", "Hour", "Age",
"Mortality3Days", "Mortality7Days","Mortality14Days","Mortality30Days",
"OrdinalHour", "FourHourIndex"]
new_time_series[int_columns] = new_time_series[int_columns].astype(int)
na_columns = set(new_time_series.columns) - set(int_columns)
na_columns = na_columns - set(['PatientID'])
float_columns = list(set(new_time_series.columns) - set(int_columns))
new_time_series[float_columns] = new_time_series[float_columns].astype(float)
aggregate_series = new_time_series.groupby(['PatientID', 'FourHourIndex']).aggregate(aggregation)
#print(aggregate_series['PO2/FIO2'].isnull().sum() * 100 /len(aggregate_series['PO2/FIO2']))
# 1. Identify columns where PO2/FIO2 is null but both FIO2 and PO2 are not null
#matches = aggregate_series['PO2/FIO2'].isnull() & aggregate_series['FiO2'].notnull() & aggregate_series['PO2'].notnull()
# 2. Calculate PO2/FIO2 for the columns using the individual PO2 and FIO2 values
#aggregate_series.loc[matches, 'PO2/FIO2'] = aggregate_series.loc[matches, 'PO2']/aggregate_series.loc[matches, 'FiO2']
#print(aggregate_series['PO2/FIO2'].isnull().sum() * 100 /len(aggregate_series['PO2/FIO2']))
#print("dim before remove na ", aggregate_series.shape)
aggregate_series.dropna(axis=1, how='all', inplace=True)
#print("dim after remove na ", aggregate_series.shape)
aggregate_series.to_csv("Data/TimeSeriesAggregated.csv", index=False)
if __name__ == "__main__" :
main() | true |
726e01c067000dc2411c765c454ce3dfb828eae8 | Python | fnannizzi/homophone_error_correction | /generate_training_data.py | UTF-8 | 3,639 | 3.15625 | 3 | [] | no_license | #! /usr/bin/env python
from __future__ import division
import sys, os, nltk, homophone_error_correction, time
# move all the texts into one big file
def consolidate_text():
with open("raw_training_data.txt", 'w+') as outfile:
for file in os.listdir("texts"):
if file.endswith(".txt"):
with open("texts/" + file) as infile:
for line in infile:
outfile.write(line)
# clean up the text for processing
# format into training samples
def format_for_training():
t0 = time.time()
hec = homophone_error_correction.HomophoneErrorCorrection()
with open("raw_training_data.txt") as textfile:
raw_text = textfile.read().replace('\n', ' ').replace('\r', ' ')
# tokenize the text into sentences
sentence_tokenizer=nltk.data.load('nltk:tokenizers/punkt/english.pickle')
sentences = sentence_tokenizer.tokenize(raw_text)
for s in sentences:
# tokenize the sentence into words
raw_words = s.split(' ')
words = []
# clean up the text some more before we add POS tags
for w in raw_words:
if w.isspace():
continue
w = w.lower()
w = w.replace('.', '')
w = w.replace('?', '')
w = w.replace(',', '')
w = w.replace(';', '')
w = w.replace(':', '')
w = w.replace('"', '')
if not w:
continue
words.append(w)
# add POS tags to the words
pos_tagged_words = nltk.pos_tag(words)
len_words = len(words)
for index, w in enumerate(words):
# check to see if word is one of the homophones we're searching for
h_type = hec.find_homophone_type(w)
if h_type != -1:
# the current word is one of the types of homophones we're looking for
# determine the class of the homophone (within its type)
h_class = hec.find_homophone_class(w)
# find the features for this training example
# find the 2-preceding tag
if index > 1:
pre_pre_tag = pos_tagged_words[index - 2][1]
else:
pre_pre_tag = "null_tag"
# find the preceding tag
if index > 0:
pre_tag = pos_tagged_words[index - 1][1]
else:
pre_tag = "null_tag"
# find the succeeding tag
if index < (len_words - 1):
post_tag = pos_tagged_words[index + 1][1]
else:
post_tag = "null_tag"
# find the 2-succeeding tag
if index < (len_words - 2):
post_post_tag = pos_tagged_words[index + 2][1]
else:
post_post_tag = "null_tag"
# create the feature vector
features = [hec.pos_tag_lookup(pre_pre_tag), hec.pos_tag_lookup(pre_tag), hec.pos_tag_lookup(post_tag), hec.pos_tag_lookup(post_post_tag)]
# add the training example to the corresponding set of training examples
hec.add_training_example(h_type, h_class, features)
# write the training data to a file
hec.write_data_to_file("training_data.txt")
t1 = time.time()
# output the total time needed to generate training data
print "Generating training data took {0} minutes".format((t1-t0)/60)
consolidate_text()
format_for_training() | true |
27b2a282314f118c4f88bd07a13c154b5dd7ac5e | Python | dsilvers/hangar-scripts-2015 | /probes.py | UTF-8 | 1,554 | 2.90625 | 3 | [
"BSD-3-Clause"
] | permissive | from api import api
import random
import sys
from w1thermsensor import W1ThermSensor
"""
Script has two modes:
Testing mode. Add anything as an argument when running this script to use
random data and existing sensors in the API.
python probes.py test
Production mode. Reads any temperature sensors hooked up.
python probes.py
"""
class FakeSensor:
""" Fake sensor for testing purposes. Returns a random temperature """
id = "fake"
value = 0
def get_temperature(self):
return random.randint(1000, 3000) / 100.0
if len(sys.argv) > 1:
""" Testing mode. Grab all sensors from the API and use those """
all_api_sensors = api("probes")
sensors = []
for api_sensor in all_api_sensors:
print api_sensor
sensor = FakeSensor()
sensor.id = api_sensor['serial']
sensors.append(sensor)
else:
""" Production mode. Use all sensors detected by W1ThermSensor. """
sensors = W1ThermSensor.get_available_sensors()
for sensor in sensors:
""" Actually send the data to the API. """
api_sensor = api("probes", method="get", data={'serial': sensor.id})
if len(api_sensor) == 1:
id = api_sensor[0]["id"]
value = round(sensor.get_temperature(), 2)
print "===== #{} - {}: {}'C".format(id, sensor.id, value)
print api("probedata", method="post", data={
'probe': id,
'value': value,
})
print ""
else:
print "### API has no probe setup for serial '{}'".format(sensor.id)
print "" | true |
14ff728a5a32b46c6458e239155732e6c6042507 | Python | bkerdzaia/nand2tetris | /projects/10/JackAnalyzer.py | UTF-8 | 3,385 | 3.421875 | 3 | [] | no_license | #!/usr/bin/evn python3
from JackTokenizer import JackTokenizer
from CompilationEngine import CompilationEngine
import os
import sys
"""
The analyzer program operates on a given source, where source is either a file name
of the form Xxx.jack or a directory name containing one or more such files. For
each source Xxx.jack file, the analyzer goes through the following logic:
1. Create a JackTokenizer from the Xxx.jack input file.
2. Create an output file called Xxx.xml and prepare it for writing.
3. Use the CompilationEngine to compile the input JackTokenizer into the output file.
"""
# makes terminal
def generate_terminal(terminal_type, value):
return "<" + terminal_type + "> " + value + " </" + terminal_type + ">\n"
# generates a xml tree
def generate_xml_token_code(filename):
name = os.path.basename(filename).replace(".jack", "T.xml")
dirname = os.path.dirname(filename) + "2/"
xml = open(dirname + name, "w")
xml.write("<tokens>\n")
stream = open(filename)
token = JackTokenizer(stream)
while token.has_more_tokens():
token.advance()
tok_type = token.token_type()
if tok_type == token.KEYWORD:
xml.write(generate_terminal("keyword", token.key_word()))
print("keyword:", token.key_word())
elif tok_type == token.SYMBOL:
xml.write(generate_terminal("symbol", token.symbol()))
print("symbol:", token.symbol())
elif tok_type == token.STRING_CONST:
xml.write(generate_terminal("stringConstant", token.string_val()))
print("string const:", token.string_val())
elif tok_type == token.INT_CONST:
xml.write(generate_terminal("integerConstant", token.int_val()))
print("int const:", token.int_val())
elif tok_type == token.IDENTIFIER:
xml.write(generate_terminal("identifier", token.identifier()))
print("identifier:", token.identifier())
else:
print("not valid token:", tok_type)
xml.write("</tokens>")
stream.close()
# generate a xml tree for tokenizer module
def generate_xml_token_dir(dirname):
for files in os.listdir(dirname):
if files.find(".jack") != -1:
generate_xml_token_code(dirname + "\\" + files)
# filename = "ArrayTest\Main.jack"
# generate_xml_token_code(filename)
# generate_xml_token_dir("ExpressionlessSquare")
# Create a JackTokenizer from the Xxx.jack input file.
# Pass an output file name called Xxx.xml to compilation engine and prepare it for writing.
# Use the CompilationEngine to compile the input JackTokenizer into the output file.
def generate_xml_code_parser(dirname, filename):
file = open(dirname + "\\" + filename)
tokenizer = JackTokenizer(file)
engine = CompilationEngine(tokenizer, dirname + "2\\" + filename.replace(".jack", ".xml"))
engine.compile_class()
file.close()
engine.close()
# generate a xml tree for parser module
def generate_xml_parser(dirname):
for files in os.listdir(dirname):
if files.find(".jack") != -1:
generate_xml_code_parser(dirname, files)
# main program
if __name__ == '__main__':
if len(sys.argv) != 2:
raise Exception("Not valid argument number")
try:
generate_xml_parser(sys.argv[1])
print("finished successfully")
except Exception as e:
print(e)
| true |
ad84ec14c9600b5d5ae3f2aea864790a7e730538 | Python | Traverse-Technology/python_basic | /Function/function_args.py | UTF-8 | 110 | 3 | 3 | [] | no_license | def my_function(*kids):
print("The youngest child is " + kids[2])
my_function("Kyaw Kyaw", "Zaw Zaw", "Maw Maw") | true |
b6c8a29e66e977e5bf745fbd5dc6539e5da19d03 | Python | pppppass/NumPDE | /P01Elliptic/ProblemPart2.py | UTF-8 | 4,176 | 2.671875 | 3 | [] | no_license |
# coding: utf-8
# In[2]:
import time
import shelve
import numpy
import scipy.sparse
import exts
# In[3]:
def get_ana_sol(size):
n = size
h = 1.0 / n
g = numpy.linspace(0.0, 1.0, n+1)
x, y = g[:, None], g[None, :]
ana = numpy.log((x + 1.0)**2 + y**2) / 2.0 + numpy.arctan(y / (x + 1.0))
return ana
# In[4]:
def solve_sol(size, tol, max_=50000):
n = size
h = 1.0 / n
data = numpy.zeros((5, n+1, n-1))
data[0, :, :] = 4.0
data[0, [0, -1], :] = 2.0
data[1, :, :] = -1.0
data[1, [0, -1], :] = -1.0 / 2.0
data[1, :, 0] = 0.0
data[2, :, :] = -1.0
data[3, :, :] = -1.0
data[3, [0, -1], :] = -1.0 / 2.0
data[3, :, -1] = 0.0
data[4, :, :] = -1.0
mat = scipy.sparse.dia_matrix((data.reshape(5, -1), [0, 1, n-1, -1, -n+1]), ((n+1)*(n-1), (n+1)*(n-1))).tocsr()
del(data)
g = numpy.linspace(0.0, 1.0, n+1)
x, y = g[:, None], g[None, :]
bdry1 = (1.0 - g) / (1.0 + g**2)
bdry2 = numpy.log(g + 1.0)
bdry3 = (2.0 - g) / (4.0 + g**2)
bdry4 = numpy.log((g + 1.0)**2 + 1) / 2.0 + numpy.pi / 2.0 - numpy.arctan(g + 1.0)
sol = numpy.zeros((n+1, n+1))
sol[:, 0] = bdry2
sol[:, -1] = bdry4
vec = numpy.zeros((n+1, n-1))
vec[0, :] -= h * bdry1[1:-1]
vec[-1, :] += h * bdry3[1:-1]
vec[1:-1, 0] += bdry2[1:-1]
vec[0::n, 0] += bdry2[0::n] / 2.0
vec[1:-1, -1] += bdry4[1:-1]
vec[0::n, -1] += bdry4[0::n] / 2.0
start = time.time()
ctr = exts.solve_cg_infty_wrapper((n-1)*(n+1), mat.data, mat.indices, mat.indptr, vec, sol[:, 1:-1], tol, max_)
end = time.time()
return sol, end - start, ctr
# In[5]:
res = [[], [], [], []]
n_list = [4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096]
tol = 1.0e-11
# In[5]:
sol_old = None
for n in n_list:
ana = get_ana_sol(n)
sol, elap, ctr = solve_sol(n, tol)
print("n = {} solved, {:.5f} seconds and {} iterations".format(n, elap, ctr))
err = numpy.linalg.norm((sol - ana).flatten(), numpy.infty)
print("Error is {:.5e}".format(err))
res[0].append((n, err, elap, ctr))
if sol_old is not None:
ana_old = get_ana_sol(n//2)
sol_ext = (4.0 * sol[::2, ::2] - sol_old) / 3.0
err_ext = numpy.linalg.norm((sol_ext - ana_old).flatten(), numpy.infty)
print("Extrapolated error is {:.5e}".format(err_ext))
res[1].append((n//2, err_ext))
del(sol_ext)
del(ana_old)
inc = sol[::2, ::2] - sol_old
inc_norm = numpy.linalg.norm(inc.flatten(), numpy.infty)
res[2].append((n//2, inc_norm))
del(inc)
del(sol_old)
sol_old = sol
del(sol)
del(ana)
del(sol_old)
# In[6]:
n_list = [3, 6, 11, 23, 45, 91, 181, 362, 724, 1448, 2896]
# In[7]:
for n in n_list:
ana = get_ana_sol(n)
sol, elap, ctr = solve_sol(n, tol)
print("n = {} solved, {:.5f} seconds and {} iterations".format(n, elap, ctr))
err = numpy.linalg.norm((sol - ana).flatten(), numpy.infty)
res[0].append((n, err, elap, ctr))
del(sol)
del(ana)
# In[6]:
n_list = [4, 8, 16, 32, 64, 128, 256, 512]
tol_list = [1.0e-2, 1.0e-3, 1.0e-4, 1.0e-5, 1.0e-6, 1.0e-7, 1.0e-8, 1.0e-9, 1.0e-10]
# In[7]:
for n in n_list:
ana = get_ana_sol(n)
for tol in tol_list:
sol, elap, ctr = solve_sol(n, tol)
print("tol = {:.1e} solved, {:.5f} seconds and {} iterations".format(tol, elap, ctr))
err = numpy.linalg.norm((sol - ana).flatten(), numpy.infty)
res[3].append((n, tol, err, elap, ctr))
del(sol)
print("n = {} finished".format(n))
del(ana)
# In[10]:
with shelve.open("Result") as db:
for e in res[0]:
db[str((2, "error", e[0]))] = e[1:]
for e in res[1]:
db[str((2, "extrapolate", e[0]))] = e[1:]
for e in res[2]:
db[str((2, "order", e[0]))] = e[1:]
for e in res[3]:
db[str((2, "tolerance", e[0], e[1]))] = e[2:]
# In[14]:
n = 512
tol = 1.0e-11
# In[15]:
ana = get_ana_sol(n)
sol, _, _ = solve_sol(n, tol)
# In[16]:
numpy.save("Result3.npy", ana)
numpy.save("Result4.npy", sol)
| true |
c52073d3520093610963c1fa582e1235de980c4a | Python | dr-dos-ok/Code_Jam_Webscraper | /solutions_python/Problem_136/1987.py | UTF-8 | 740 | 3.171875 | 3 | [] | no_license |
def cookie_clicker(C,F,X):
rate = 2;
current_wait =float(0)
for i in xrange(10000000):
ifwait = (C/rate)+(X/(rate+F))
nowait = X/(rate)
if ifwait< nowait:
current_wait +=C/rate
rate += F
else:
current_wait +=nowait
break
return current_wait
T=int(raw_input())
solutions = []
for t in xrange(T):
three = map(float, raw_input().strip().split())
C = three[0]
F = three[1]
X = three[2]
sol = cookie_clicker(C,F,X)
form = "Case #%d: %8f" %(t+1, sol)
solutions.append(form)
with open('cookie_ouput.txt', 'w') as f:
for s in solutions[:-1]:
f.write(s)
f.write("\n")
f.write(solutions[-1]) | true |
81e211ec9d4f8bf23108231329b5a8207230abdc | Python | strands-project/strands_morse | /bham/src/lift_controller.py | UTF-8 | 2,557 | 2.875 | 3 | [] | no_license | #! /usr/bin/env python3
"""
An over simplified lift controller.
Controlls the lift in morse using a socket connection, and subscribes to:
/lift_sim/[command|call]floor : std_messages/Bool
where floor is [B|G|1|2].
The topics correspond to a user pressing the "call" buton on the outside of the lift on floor, or the "command" button on the inside of the lift to got to "floor". The lift travels in the order requested, and waits 8 seconds before closing the door.
"""
import sys
import rospy
from std_msgs.msg import Bool
import pymorse
import imp
rospy.init_node('lift_controller')
doors=['B','G','1','2']
lift_commands=[0]
def on_floor_call_command(data, args):
(type,floor)=args
""" On the press of the call or inside command button for lift..."""
rospy.loginfo("[Lift Controller] Lift %s: To floor %s"%(type, doors[floor]))
if floor in lift_commands:
return
lift_commands.append(floor)
for t in ["call","command"]:
for i in range(0,4):
rospy.Subscriber("/lift_sim/%s%s"%(t,doors[i]), Bool, on_floor_call_command, callback_args=(t,i))
morse = None
while not rospy.is_shutdown():
rospy.loginfo("[Lift Controller] Waiting for Morse...")
if pymorse.Morse._asyncore_thread is not None:
# This is a strange hack that is required because of some bug in pymorse.
# The async thread created by pymorse needs to be forced to recreate
# when we start a new connection.
rospy.loginfo("[Lift Controller] Crikey")
pymorse.Morse._asyncore_thread.join()
pymorse.Morse._asyncore_thread = None
try:
with pymorse.Morse() as morse:
rospy.loginfo ("[Lift Controller] Ready.")
while not rospy.is_shutdown():
if len(lift_commands)>1:
rospy.loginfo("[Lift Controller] Closing door %s"%doors[lift_commands[0]])
morse.rpc('lift.door%s'%doors[lift_commands[0]],'change_door_state',0)
lift_commands=lift_commands[1:]
rospy.loginfo("[Lift Controller] Moving to floor %s"%doors[lift_commands[0]])
morse.rpc('lift.platform','move_to_floor',lift_commands[0]-1)
rospy.loginfo("[Lift Controller] Opening door %s"%doors[lift_commands[0]])
morse.rpc('lift.door%s'%doors[lift_commands[0]],'change_door_state',1)
rospy.sleep(8)
except Exception as e:
rospy.loginfo("[Lift Controller] " + str(e) + " : will retry.")
rospy.sleep(0.5)
| true |
5f3b4f78efe561457977a302e3909721b1b70873 | Python | egenedy97/Photo-Editor | /imageViewer.py | UTF-8 | 7,180 | 2.78125 | 3 | [] | no_license |
from tkinter import Frame, Canvas, CENTER, ROUND
from PIL import Image, ImageTk
import cv2
import imutils
import numpy as np
class ImageViewer(Frame):
def __init__(self, master=None):
Frame.__init__(self, master=master, bg="gray", width=600, height=400)
self.shown_image = None
self.x = 0
self.y = 0
self.crop_start_x = 0
self.crop_start_y = 0
self.crop_end_x = 0
self.crop_end_y = 0
self.rectangle_id = 0
self.ratio = 0
self.coord = []
self.dot =[]
self.canvas = Canvas(self, bg="gray", width=1000, height=600)
self.canvas.place(relx=0.5, rely=0.5, anchor=CENTER)
def show_image(self, img=None):
self.clear_canvas()
if img is None:
image = self.master.processedImage.copy()
else:
image = img
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
height, width, channels = image.shape
ratio = height / width
new_width = width
new_height = height
if height > self.winfo_height() or width > self.winfo_width():
if ratio < 1:
new_width = self.winfo_width()
new_height = int(new_width * ratio)
else:
new_height = self.winfo_height()
new_width = int(new_height * (width / height))
self.shown_image = cv2.resize(image, (new_width, new_height))
self.shown_image = ImageTk.PhotoImage(Image.fromarray(self.shown_image))
self.ratio = height / new_height
self.canvas.config(width=new_width, height=new_height)
self.canvas.create_image(new_width / 2, new_height / 2, anchor=CENTER, image=self.shown_image)
def ActiveCropping(self):
self.canvas.bind("<ButtonPress>", self.start_crop)
self.canvas.bind("<B1-Motion>", self.crop)
self.canvas.bind("<ButtonRelease>", self.end_crop)
self.master.cropState = True
def DeactiveCropping(self):
self.canvas.unbind("<ButtonPress>")
self.canvas.unbind("<B1-Motion>")
self.canvas.unbind("<ButtonRelease>")
self.master.cropState = False
def start_crop(self, event):
self.crop_start_x = event.x
self.crop_start_y = event.y
def crop(self, event):
if self.rectangle_id:
self.canvas.delete(self.rectangle_id)
self.crop_end_x = event.x
self.crop_end_y = event.y
self.rectangle_id = self.canvas.create_rectangle(self.crop_start_x, self.crop_start_y,
self.crop_end_x, self.crop_end_y, width=1)
def end_crop(self, event):
if self.crop_start_x <= self.crop_end_x and self.crop_start_y <= self.crop_end_y:
start_x = int(self.crop_start_x * self.ratio)
start_y = int(self.crop_start_y * self.ratio)
end_x = int(self.crop_end_x * self.ratio)
end_y = int(self.crop_end_y * self.ratio)
elif self.crop_start_x > self.crop_end_x and self.crop_start_y <= self.crop_end_y:
start_x = int(self.crop_end_x * self.ratio)
start_y = int(self.crop_start_y * self.ratio)
end_x = int(self.crop_start_x * self.ratio)
end_y = int(self.crop_end_y * self.ratio)
elif self.crop_start_x <= self.crop_end_x and self.crop_start_y > self.crop_end_y:
start_x = int(self.crop_start_x * self.ratio)
start_y = int(self.crop_end_y * self.ratio)
end_x = int(self.crop_end_x * self.ratio)
end_y = int(self.crop_start_y * self.ratio)
else:
start_x = int(self.crop_end_x * self.ratio)
start_y = int(self.crop_end_y * self.ratio)
end_x = int(self.crop_start_x * self.ratio)
end_y = int(self.crop_start_y * self.ratio)
x = slice(start_x, end_x, 1)
y = slice(start_y, end_y, 1)
self.master.processedImage = self.master.processedImage[y, x]
self.show_image()
def flippingImage(self ,var ):
self.master.processedImage = cv2.flip(self.master.processedImage,var)
self.show_image()
def RotatingImage(self , val) :
self.master.processedImage = imutils.rotate(self.master.processedImage, angle=val)
self.show_image()
def EqualizeImage(self):
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
self.master.processedImage = clahe.apply(self.master.processedImage)
self.show_image()
def ActiveTransform(self) :
self.canvas.bind("<Button 1>",self.insertCoords)
self.canvas.bind("<Button 3>",self.removeCoords)
def DeactiveTransform(self):
self.canvas.unbind("<Button 1>",self.insertCoords)
self.canvas.unbind("<Button 3>",self.removeCoords)
def insertCoords(self ,event):
self.coord.append([event.x, event.y])
r=3
self.dot.append(self.canvas.create_oval(event.x - r, event.y - r, event.x + r, event.y + r, fill="#ff0000")) #print circle
if (len(self.coord) == 4):
self.Transformer()
self.canvas.delete("all")
self.canvas.create_image(0,0,image=self.result,anchor="nw")
self.master.processedImage = self.result
def removeCoords(self, event=None):
del self.coord[-1]
self.canvas.delete(self.dot[-1])
del self.dot[-1]
def Transformer(self):
cv2.circle(self.master.processedImage , tuple(self.coord[0]), 5, (0, 0, 255), -1)
cv2.circle(self.master.processedImage , tuple(self.coord[1]), 5, (0, 0, 255), -1)
cv2.circle(self.master.processedImage , tuple(self.coord[2]), 5, (0, 0, 255), -1)
cv2.circle(self.master.processedImage , tuple(self.coord[3]), 5, (0, 0, 255), -1)
widthA = np.sqrt(((self.coord[3][0] - self.coord[2][0]) ** 2) + ((self.coord[3][1] - self.coord[2][1]) ** 2))
widthB = np.sqrt(((self.coord[1][0] - self.coord[0][0]) ** 2) + ((self.coord[1][1] - self.coord[0][1]) ** 2))
maxWidth = max(int(widthA), int(widthB))
heightA = np.sqrt(((self.coord[1][0] - self.coord[3][0]) ** 2) + ((self.coord[1][1] - self.coord[3][1]) ** 2))
heightB = np.sqrt(((self.coord[0][0] - self.coord[2][0]) ** 2) + ((self.coord[0][1] - self.coord[2][1]) ** 2))
maxHeight = max(int(heightA), int(heightB))
print(self.coord)
pts1 = np.float32(self.coord)
pts2 = np.float32([[0, 0], [maxWidth-1, 0], [0, maxHeight-1], [maxWidth-1, maxHeight-1]])
matrix = cv2.getPerspectiveTransform(pts1, pts2)
self.result_cv = cv2.warpPerspective(self.master.processedImage, matrix, (maxWidth,maxHeight))
result_rgb = cv2.cvtColor(self.result_cv, cv2.COLOR_BGR2RGB)
self.result = ImageTk.PhotoImage(image = Image.fromarray(result_rgb))
def clear_canvas(self):
self.canvas.delete("all")
| true |
8f6dd38ae32d53aafc49060ee7753509cf7154a7 | Python | seattlegirl/leetcode | /palindrome-number.py | UTF-8 | 1,109 | 4.125 | 4 | [] | no_license | #coding=utf-8
"""
判断一个整数是否是回文数。回文数是指正序(从左向右)和倒序(从右向左)读都是一样的整数。
示例 1:
输入: 121
输出: true
示例 2:
输入: -121
输出: false
解释: 从左向右读, 为 -121 。 从右向左读, 为 121- 。因此它不是一个回文数。
示例 3:
输入: 10
输出: false
解释: 从右向左读, 为 01 。因此它不是一个回文数。
进阶:
你能不将整数转为字符串来解决这个问题吗?
不使用字符串来判断,也就是不使用额外的空间。
可以将原整数翻转,看反转后的整数是否和原来整数相等。
"""
class Solution:
def isPalindrome(self, x):
"""
:type x: int
:rtype: bool
"""
#负数时,一定不是回文,结尾为0时,不是回文
if x<0 or (x!=0 and x%10==0):
return False
reverse=0
while x>reverse:
reverse=reverse*10+x%10
x=x//10
return x==reverse or reverse//10==x
if __name__ == "__main__":
print Solution().isPalindrome(11)
| true |
99cc3482154b1e57ccca419941b95075453568bb | Python | avidekar/python-assignments | /trap_rain_water.py | UTF-8 | 670 | 3.9375 | 4 | [] | no_license | # Given n non-negative integers representing an elevation map where the width of each bar is 1,
# compute how much water it is able to trap after raining.
# Example:
#
# Input: [0,1,0,2,1,0,1,3,2,1,2,1]
# Output: 6
def calculate_rain_water(height):
volume = 0
for index in range(1, len(height) - 1):
max_left = max(height[:index])
#print(max_left)
max_right = max(height[index+1:])
#print(max_right)
potential = min(max_left, max_right) - height[index]
#print(potential)
volume += max(0, potential)
#print(volume)
print(volume)
height = [0,1,0,2,1,0,1,3,2,1,2,1]
calculate_rain_water(height) | true |
1d0ac9c29dca2c8310d1f2c6240ff51e9cf40ab4 | Python | zenatureza/IA | /Missionarios_Canibais_ArthurPinheiro/Classes.py | UTF-8 | 15,933 | 3.53125 | 4 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""@package Classes
Documentação para o módulo de classes.
"""
## Classe que define um nó do espaço de busca.
#
# Mais informações abaixo.
class Nodo:
## Construtor:
# @brief Inicialização dos parâmetros associados aos nodos (estado, referência ao nodo pai e operador que o gerou).
# @param estado Estado do nodo.
# @param pai Referência ao nodo pai.
# @param operador Operador que gerou o nodo.
def __init__(self, estado, pai=None, operador=None):
self.estado = estado
self.pai = pai
self.operador = operador
## @var self.estado
# @brief Estado do nodo atual.
## @var self.pai
# @brief Referência ao nodo que gerou o atual.
## @var self.operador
# @brief Operador que gerou o nodo atual.
## Aplicar o operador <M,C>, onde 0 < (M + C) < TAM_BARCO.
# M: representa o número de missionários a serem transportados no barco.
# C: representa o número de canibais a serem transportados no barco.
# @param self Referência ao nodo.
# @param operador Operador que produz novo nodo.
# @param possiveisOperadores Lista de operadores que podem ser utilizados na função sucessor.
# @return Retorna nodo gerado a partir de um operador.
def funcSucessor(self, operador, possiveisOperadores):
novoEstado = self.transportar(self.estado, operador)
novoNodo = Nodo(novoEstado, self, operador)
return novoNodo
## Gerar nodos filhos de um determinado nodo com uso da função sucessor.
# @param self Referência ao nodo.
# @return Retorna todos nodos com estados gerados a partir dos possíveis operadores.
def gerarEstados(self):
possiveisOperadores = self.definirOpValidos(self.estado)
return [self.funcSucessor(operadorAtual, possiveisOperadores)
for operadorAtual in possiveisOperadores]
## Mostrar operadores que realizaram o objetivo.
# @param self Referência ao nodo.
# @return Retorna a sequência de operadores usados para atingir o estado objetivo.
def mostrarPlanoResultante(self):
return [node.operador for node in self.caminho()[1:]]
## Lista de referências ao caminho que levou ao objetivo.
# @param self Referência ao nodo.
# @return Retorna a lista de referências de trás pra frente (dos estados objetivo ao inicial).
def caminho(self):
nodo, caminho_de_nodos = self, []
while nodo:
caminho_de_nodos.append(nodo)
nodo = nodo.pai
return list(reversed(caminho_de_nodos))
## Método que verifica se o estado atual é o estado objetivo.
# @param self Referência ao nodo.
# @param estado Estado a ser comparado com estado objetivo.
# @param sObjetivo Estado referência (objetivo).
# @return Retorna 'True' se e somente se, todos os itens da tupla 'estado' forem iguais ao da tupla 'sObjetivo'.
def testeDeObjetivo(self, estado, sObjetivo):
if all(itemDoEstadoAtual == itemDoEstadoObjetivo for (itemDoEstadoAtual, itemDoEstadoObjetivo) in zip(estado, sObjetivo)):
return True
else:
return False
## Método que gera um estado com base em um operador. Isso simula um transporte de x missionários e y canibais para a outra margem.
# @param self Referência ao nodo.
# @param estado Estado de partida para realizar o transporte das pessoas.
# @param opAtual Operador que define quantos missionários e quantos canibais serão levados no barco.
# @return Retorna estado gerado a partir do transporte das pessoas.
def transportar(self, estado, opAtual):
margens = self.obterMargens(estado, opAtual)
ME = margens[0]
MD = margens[1]
novaMargem = margens[2]
novoEstado = (ME, MD, novaMargem)
return novoEstado
## Método que verifica a quantia de pessoas em cada margem do rio.
# @param self Referência ao nodo.
# @param margens Configuração de cada margem, isto é, quantos missionários e canibais há em cada uma.
# @return Retorna 'False' caso haja mais canibais que missionários em alguma das margens.
# Retorna 'True' caso as margens sejam válidas.
def verificarMargens(self, margens):
me = margens[0]
md = margens[1]
missionariosNaDireita = md[0]
canibaisNaDireita = md[1]
missionariosNaEsquerda = me[0]
canibaisNaEsquerda = me[1]
if (missionariosNaDireita > 0):
if (missionariosNaDireita < canibaisNaDireita):
return False # INVÁLIDO!
if (missionariosNaEsquerda > 0):
if (missionariosNaEsquerda < canibaisNaEsquerda):
return False # INVÁLIDO!
return True # VÁLIDO!
## Método que seleciona apenas operadores válidos.
# @param self Referência ao nodo.
# @param estado Estado necessário para verificação de operadores válidos.
# @return Retorna lista de possíveis operadores.
def definirOpValidos(self, estado):
margemAtual = (estado[2])
if margemAtual == "MargemDireita":
indiceMargem = 1
else:
indiceMargem = 0
numMissionarios = estado[indiceMargem][0]
numCanibais = estado[indiceMargem][1]
operadores = self.opIniciais()
possiveisOperadores = self.testarTransporte(operadores, numMissionarios, numCanibais)
""" A segunda bateria de testes valida os operadores quanto a:
- O uso deste operador gera uma margem onde numCanibais > numMissionarios ?
- Se sim, este estado será descartado
- Caso contrário, ele será um possível operador. """
possiveisOperadores = self.testarOperadores(possiveisOperadores, estado)
return possiveisOperadores
## Método que simula a ação de um operador, verificando se o seu uso gera numCanibais > numMissionarios em alguma das margens.
# @param self Referência ao nodo.
# @param operadores Lista de operadores a serem testados.
# @param estado Estado Estado a ser testado.
# @return Lista de operadores validados no primeiro teste.
def testarOperadores(self, operadores, estado):
for opAtual in operadores[:]:
if (self.transporteGeraMaisCanibaisQueMissionarios(opAtual, estado) == True):
operadores.remove(opAtual)
return operadores
## Método especialista que avalia se o uso do operador causa a situação inválida: numCanibais > numMissionarios.
# @param self Referência ao nodo.
# @param opAtual Operador a ser testado.
# @param estado Estado que contém as configurações das margens.
# @return Retorna 'False' caso o uso do operador não causar a morte dos missionários (numCanibais > numMissionarios).
# Retorna 'True' caso houver morte.
def transporteGeraMaisCanibaisQueMissionarios(self, opAtual, estado):
margens = self.obterMargens(estado, opAtual)
if self.verificarMargens(margens) == True:
return False
else:
return True
## Definição das configurações das margens a partir do uso de um operador.
# @param self Referência ao nodo.
# @param estado Estado que contém as configurações das margens.
# @param opAtual Operador a ser testado.
# @return Configurações das margens com o uso de opAtual.
def obterMargens(self, estado, opAtual):
configBarco = opAtual
numMissionariosTransportados = configBarco[0]
numCanibaisTransportados = configBarco[1]
# Variáveis responsáveis pelo número de pessoas na margem esquerda
missionariosNaMargemEsquerda = estado[0][0]
canibaisNaMargemEsquerda = estado[0][1]
# Variáveis responsáveis pelo número de pessoas na margem direita
missionariosNaMargemDireita = estado[1][0]
canibaisNaMargemDireita = estado[1][1]
# Faz o transporte de fato (leva as pessoas e também alterna o lado do barco)
if estado[2] == "MargemEsquerda":
missionariosNaMargemEsquerda -= numMissionariosTransportados
canibaisNaMargemEsquerda -= numCanibaisTransportados
missionariosNaMargemDireita += numMissionariosTransportados
canibaisNaMargemDireita += numCanibaisTransportados
novaMargem = "MargemDireita"
else:
missionariosNaMargemEsquerda += numMissionariosTransportados
canibaisNaMargemEsquerda += numCanibaisTransportados
missionariosNaMargemDireita -= numMissionariosTransportados
canibaisNaMargemDireita -= numCanibaisTransportados
novaMargem = "MargemEsquerda"
MD = [missionariosNaMargemDireita, canibaisNaMargemDireita]
ME = [missionariosNaMargemEsquerda, canibaisNaMargemEsquerda]
margens = (ME, MD, novaMargem)
return margens
## Método que gera operadores iniciais com base no estado inicial.
# @param self Referência ao nodo.
# @return Lista que contém todos os operadores (válidos e inválidos), dados o número de pessoas e tamanho do barco.
def opIniciais(self):
operadores = []
for numMissAtual in range(0, Busca.N_MISS+1):
for numCaniAtual in range(0, Busca.N_CAN+1):
# Ignorar operadores
numPessoasNoBarco = numMissAtual + numCaniAtual
if (numPessoasNoBarco == 0):
break
if (numPessoasNoBarco > Busca.TAM_BARCO):
break
# Supondo que se: numMissAtual < numCaniAtual (tem mais canibais que missionários no barco) então isso será um estado inválido
if (numMissAtual > 0) and (numCaniAtual > numMissAtual):
break
else:
opAtual = (numMissAtual, numCaniAtual)
operadores.append(opAtual)
for i in range (1, Busca.TAM_BARCO+1):
opAtual = (0,i)
operadores.append(opAtual)
return operadores
## Método especialista em verificar se o transporte das pessoas é inválido, ou seja, transportar N+1 canibais de uma margem onde há apenas N canibais, por exemplo.
# @param self Referência ao nodo.
# @param operadores Lista de operadores a serem validados.
# @param numMissAtual Indica o número de missionários na margem onde o barco se encontra.
# @param numCaniAtual Indica o número de canibais na margem onde o barco se encontra.
# @return Lista de operadores que foram validados neste teste.
def testarTransporte(self, operadores, numMissAtual, numCaniAtual):
for opAtual in operadores[:]:
if (opAtual[0] > numMissAtual) or (opAtual[1] > numCaniAtual):
operadores.remove(opAtual)
return operadores
## Implementação de fila FIFO necessária para realizar busca em largura.
#
# Mais informações abaixo.
class FilaFIFO:
## Construtor:
# @brief Inicialização das variáveis fila e referência ao início da fila.
# @param self Referência à fila
def __init__(self):
self.fila = []
self.inicio = 0
## @var self.fila
# @brief Lista utilizada para implementar a fila.
## @var self.inicio
# @brief Referência ao membro inicial da fila.
## Método responsável por adicionar à fila nodos filhos de um determinado nó.
# @param self Referência à fila.
# @param items Itens a serem incluídos na lista.
def expandir(self, items):
for item in items:
print "Operador: " + str(item.operador) + " - " + "Estado: " + str(item.estado)
self.append(item)
## Inclusão de itens individuais à fila.
# @param self Referência à fila.
# @param item Item a ser incluído na lista.
def append(self, item):
self.fila.append(item)
## Tirar item da fila. Como é FIFO, o primeiro a entrar sai primeiro.
# @param self Referência à fila.
def pop(self):
itemRemover = self.fila[self.inicio]
self.inicio += 1
return itemRemover
## Classe que implementa busca em largura com uso de fila FIFO.
#
# Mais informações abaixo.
class Busca:
## Construtor:
# @brief Inicialização das variáveis necessárias para a busca (estados inicial e final, fila FIFO e nodo inicial).
# @param self Referência a busca
def __init__(self):
self.estadoInicial = self.setEstadoInicial()
self.estadoObjetivo = self.setEstadoObjetivo()
self.fila = FilaFIFO()
self.nodoInicial = Nodo(self.estadoInicial)
## @var self.estadoInicial
# @brief Configuração inicial do problema.
## @var self.estadoObjetivo
# @brief Configuração objetivo do problema.
## @var self.fila
# @brief Fila FIFO.
## @var self.nodoInicial
# @brief Nodo inicial da busca.
## Número de missionários.
# @brief É uma entrada do programa.
N_MISS = 3
## Número de canibais.
# @brief É uma entrada do programa.
N_CAN = 3
## Quantas pessoas cabem no barco.
# @brief É uma entrada do programa.
TAM_BARCO = 2
## Margem inicial onde se encontra o barco.
# @brief É uma entrada do programa.
MARGEM_INICIAL = "Esquerda"
@classmethod
## Definição do parâmetro 'MARGEM_INICIAL', responsável por definir onde se encontram os missionários, canibais e o barco.
# @param cls Referência à classe Busca.
def checkMargemInicial(cls):
if "Esquerda" in Busca.MARGEM_INICIAL or "esquerda" in Busca.MARGEM_INICIAL:
Busca.MARGEM_INICIAL = "MargemEsquerda"
elif "Direita" in Busca.MARGEM_INICIAL or "direita" in Busca.MARGEM_INICIAL:
Busca.MARGEM_INICIAL = "MargemDireita"
else:
print "A margem inicial inserida é inválida. Portanto, MargemEsquerda será assumida."
Busca.MARGEM_INICIAL = "MargemEsquerda"
@classmethod
## Definição da margem final com base na margem inicial.
# @param cls Referência à classe Busca.
# @return Retorna a margem do estado objetivo.
def margemFinal(cls):
if (Busca.MARGEM_INICIAL == "MargemEsquerda"):
return "MargemDireita"
else:
return "MargemEsquerda"
## Definição do estado inicial, tendo como base a margem inicial.
# @param self Referência à busca.
# @return Retorna a configuração do estado inicial.
def setEstadoInicial(self):
Busca.checkMargemInicial()
if Busca.MARGEM_INICIAL == "MargemEsquerda":
return ([Busca.N_MISS, Busca.N_CAN], [0,0], Busca.MARGEM_INICIAL)
else:
return ([0,0], [Busca.N_MISS, Busca.N_CAN], Busca.MARGEM_INICIAL)
## Definição do estado objetivo, tendo como base a margem inicial.
# @param self Referência ao objeto busca.
# @return Retorna a configuração do estado objetivo.
def setEstadoObjetivo(self):
if Busca.MARGEM_INICIAL == "MargemEsquerda":
return ([0,0], [Busca.N_MISS, Busca.N_CAN], "MargemDireita")
else:
return ([Busca.N_MISS, Busca.N_CAN], [0,0], "MargemEsquerda")
## Implementação da busca em largura.
# @param self Referência ao objeto busca.
def buscaEmLargura(self):
# Define nodo inicial
self.fila.append(self.nodoInicial)
while self.fila:
nodoAtual = self.fila.pop()
# ~DEBUG
print "\nEstado sendo visitado: "
print nodoAtual.estado
if nodoAtual.testeDeObjetivo(nodoAtual.estado, self.estadoObjetivo) == True:
print "Estado final encontrado!"
return nodoAtual
else:
print "Estados gerados: "
self.fila.expandir(nodoAtual.gerarEstados())
return None
| true |
e46475f246d414a590596e1159db25123c02465f | Python | brentrwilliams/CPE458 | /Lab3/HMAC-Timing/TaskIV.py | UTF-8 | 2,054 | 2.6875 | 3 | [] | no_license | import urllib2
import time
def two_space_hex(hex_str):
if len(hex_str) == 3:
return '0' + hex_str[2]
else:
return hex_str[2:]
def taskIVA():
mac = '0' * 40
knownVals = ''
numFound = 0
testVal = 0
lastTime = 0
maxTime = 0
maxVal = ''
while(True):
testMac = knownVals + two_space_hex(hex(testVal)) + mac[(numFound + 1)*2:]
totVals = []
for i in xrange(0, 5):
start = time.time()
response = urllib2.urlopen('http://localhost:8080/?q=foo&mac=' + testMac)
stop = time.time()
if(response.read().find("Invalid signature") == -1):
print testMac
quit()
totVals.append(stop - start)
totVals.sort()
tot = totVals[2]
#print hex(testVal) + ': ' + str(tot)
#85b0118f691ab66f68fe
# if tot > maxTime and tot < ((numFound+1) * 0.02) + 0.03:
# maxTime = tot
# maxVal = two_space_hex(hex(testVal))
# if testVal == 255:
# if maxTime < ((numFound+1) * 0.02) + 0.01:
# print 'Backtracking...'
# if numFound > 0:
# numFound-= 1
# print 'old: ' + knownVals
# knownVals = knownVals[:-2]
# print 'new: ' + knownVals
# else:
# knownVals += maxVal
# print str(numFound+1) + "/20:" + knownVals
# print 'maxTime: ' + str(maxTime)
# numFound += 1
# testVal = -1
# maxTime = 0
# testVal += 1
if tot >= 0.05 * (numFound + 1): #and tot < 0.05 * (numFound + 1):
numFound += 1
knownVals += two_space_hex(hex(testVal))
testVal = 0
lastTime = tot
print "tot time: " + str(tot)
print "lower bound: " + str(0.023 * (numFound) + .005)
print "upper bound: " + str(0.04 * (numFound))
#print "yay!: " + testMac
print str(numFound) + "/20:" + testMac
elif testVal == 255:
print 'Backtracking...'
if numFound > 0:
numFound-= 1
print 'old: ' + knownVals
testVal = ord(knownVals[-2:].decode("hex"))
knownVals = knownVals[:-2]
print 'new: ' + knownVals
#testVal = 0
else:
testVal+= 1
html = response.read()
print html
def main():
taskIVA()
if __name__ == '__main__':
main() | true |
898d74dfeff7082ab35e4f567ec20848ecb8ba50 | Python | Akvilion/test_task | /webapp/services/form_editor.py | UTF-8 | 577 | 3.078125 | 3 | [] | no_license | class ListCreator():
def __init__(self):
self.lst = [['column_name_0', 'column_type_0']]
self.col_name = 'column_name_'
self.col_type = 'column_type_'
def add_element(self):
temp1 = [self.col_name, self.col_type]
temp = []
for i in temp1:
temp.append(i+str(len(self.lst)))
self.lst.append(temp)
return self.lst
def del_element(self):
if len(self.lst) != 1:
self.lst.pop()
return self.lst
else:
return self.lst
mass = ListCreator()
| true |
b21e5e4f4cd880df8612c2c87a1d3b032aff8cfd | Python | PsychoinformaticsLab/pliers | /pliers/tests/test_utils.py | UTF-8 | 3,958 | 2.59375 | 3 | [
"MIT",
"BSD-3-Clause"
] | permissive | from types import GeneratorType
from os.path import join
import numpy as np
import pytest
from pliers.stimuli import VideoStim
from pliers.filters import FrameSamplingFilter
from pliers.utils import batch_iterable, flatten_dict, resample
from pliers.extractors import RMSExtractor
from pliers import config
from .utils import get_test_data_path
@pytest.mark.skip(reason="tqdm prevents normal stdout/stderr capture; need to"
"figure out why.")
def test_progress_bar(capfd):
video_dir = join(get_test_data_path(), 'video')
video = VideoStim(join(video_dir, 'obama_speech.mp4'))
conv = FrameSamplingFilter(hertz=2)
old_val = config.get_option('progress_bar')
config.set_option('progress_bar', True)
derived = conv.transform(video)
out, err = capfd.readouterr()
assert 'Video frame:' in err and '100%' in err
config.set_option('progress_bar', False)
derived = conv.transform(video)
out, err = capfd.readouterr()
assert 'Video frame:' not in err and '100%' not in err
config.set_option('progress_bar', old_val)
def test_batch_iterable():
iterable = [1, 2, 3, 4, 5, 6, 7, 8]
res = batch_iterable(iterable, 2)
assert isinstance(res, GeneratorType)
assert sum(1 for i in res) == 4
res = batch_iterable(iterable, 4)
assert sum(1 for i in res) == 2
res = batch_iterable(iterable, 4)
first_half = next(res)
assert isinstance(first_half, list)
assert first_half == [1, 2, 3, 4]
second_half = next(res)
assert isinstance(second_half, list)
assert second_half == [5, 6, 7, 8]
def test_flatten_dict():
d = { 'a' : 5, 'b' : { 'c' : 6, 'd' : 1 } }
res = flatten_dict(d)
assert res == { 'a' : 5, 'b_c' : 6, 'b_d' : 1}
res = flatten_dict(d, 'prefix', '.')
assert res == { 'prefix.a' : 5, 'prefix.b.c' : 6, 'prefix.b.d' : 1}
def test_resample():
ext = RMSExtractor()
res = ext.transform(join(get_test_data_path(), 'audio/homer.wav'))
df = res.to_df(format='long')
# Test downsample
downsampled_df = resample(df, 3)
assert np.allclose(downsampled_df.iloc[0].onset, 0)
assert np.allclose(downsampled_df.iloc[1].onset, 0.33333)
assert set(downsampled_df.columns) == {
'duration', 'onset', 'feature', 'value'}
assert downsampled_df['feature'].unique() == 'rms'
# This checks that the filtering has happened. If it has not, then
# this value for this frequency bin will be an alias and have a
# very different amplitude
assert downsampled_df[downsampled_df.onset == 0]['value'].values[0] != \
df[df.onset == 0]['value'].values[0]
assert downsampled_df[downsampled_df.onset == 0]['value'].values[0] != \
df[df.onset == 0]['value'].values[0]
assert np.allclose(downsampled_df[downsampled_df.onset == 2]['value'].values[0],
0.2261582761938699, rtol=1e-03)
# Test upsample
ext = RMSExtractor(frame_length=1500, hop_length=1500,)
res = ext.transform(join(get_test_data_path(), 'audio/homer.wav'))
df = res.to_df(format='long')
upsampled_df = resample(df, 10)
assert np.allclose(upsampled_df.iloc[0].onset, 0)
assert np.allclose(upsampled_df.iloc[1].onset, 0.1)
assert set(upsampled_df.columns) == {
'duration', 'onset', 'feature', 'value'}
assert upsampled_df['feature'].unique() == 'rms'
# This checks that the filtering has happened. If it has not, then
# this value for this frequency bin will be an alias and have a
# very different amplitude
assert upsampled_df[upsampled_df.onset == 0]['value'].values[0] != \
df[df.onset == 0]['value'].values[0]
assert upsampled_df[upsampled_df.onset == 0]['value'].values[0] != \
df[df.onset == 0]['value'].values[0]
# Value will be slightly different at 2s with different sampling
assert np.allclose(
upsampled_df[upsampled_df.onset == 2]['value'].values[0], 0.25309)
| true |
83068d924f32b9c430ff3dadcabc4709a315b6ab | Python | jennyChing/leetCode | /350.py | UTF-8 | 836 | 3.5625 | 4 | [] | no_license | '''
350. Intersection of Two Arrays II QuestionEditorial Solution
Given two arrays, write a function to compute their intersection.
'''
class Solution(object):
def intersect(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: List[int]
"""
n1_dic = {}
n2_dic = {}
for n in nums1:
if n in n1_dic:
n1_dic[n] += 1
else:
n1_dic[n] = 1
for n in nums2:
if n in n2_dic:
n2_dic[n] += 1
else:
n2_dic[n] = 1
inters = []
for k, v in n1_dic.items():
if k in n2_dic:
cnt = min(n2_dic[k], v)
for _ in range(cnt):
inters.append(k)
return inters
| true |
9f8ffd3882e7db0be5974bdfec0d7ac94eea3bd7 | Python | sanketag/startxlabs-assignment | /Online machine test (Startxlabs).py | UTF-8 | 2,039 | 3.125 | 3 | [] | no_license | '''
Write a function which takes two arguments: a list of customers and the number of open cash registers. Each customer is represented by an i
nteger which indicates the amount of time needed to checkout. Assuming that customers are served in their original order, your function should
output the minimum time required to serve all customers.
Examples:
get_checkout_time([5, 1, 3], 1) should return 9
get_checkout_time([10, 3, 4, 2], 2) should return 10 because while the first register is busy serving customer[0] the second register can serve
all remaining customers.
'''
def get_checkout_time(li, num):
x = num
def fun(uplist, flag):
nonlocal x
if flag==0:
return max(uplist)
else:
temp1 = min(uplist)
zero = uplist.count(temp1)
dlist = [i-temp1 for i in uplist if i-temp1 != 0]
if len(li)>x+zero:
dlist+=li[x:x+zero]
x+=zero
# print(temp1,".")
return temp1+fun(dlist,1)
else:
dlist+=li[x:]
# print(temp1)
return temp1+fun(dlist,0)
if len(li)>num:
return fun(li[:num],1)
elif num==0:
return 0
else:
return fun(li[:],0)
print(get_checkout_time([5, 1, 3], 1))
print(get_checkout_time([10, 3, 4, 2], 2))
print(get_checkout_time([12,3,4,57,72,135,435],3))
12,3,3
9,
def fun(st):
count = 0
for i in st:
if i=='[':
count+=1
else:
count-=1
if count<0:return 'NOT OK'
if count==0:
return 'OK'
else:
return 'NOT OK'
print(fun('[[][]]]['))
def fun(li):
temp = []
c = 0
mul = 1
uplist = [i for i in li if i != 0]
for i in uplist:
if i<0:
c+=1
temp.append(i)
if c%2!=0:
uplist.remove(max(temp))
for i in uplist:
mul*=i
return mul
print(fun([23, 25, 343, -3, -43, 23, -434, 0]))
| true |
d5f6dfa4d3b4dbbc9622fafb035abd4f80f9d3f2 | Python | SushmaBR/FirstPython | /IntegerToDigits.py | UTF-8 | 234 | 3.671875 | 4 | [] | no_license | x=int(input("Enter the 6 digit number"))
n1=int((x/100000) % 10)
n2=int((x/10000) % 10)
n3=int((x/1000) % 10)
n4=int((x/100) % 10)
n5=int((x/10) % 10)
n6=x % 10
print("6 digit number is divided into digits",n1,n2,n3,n4,n5,n6)
| true |
fcd31bc3337575c9b081e7543fbb6116f576240c | Python | jldupont/jldaws | /src/jldaws/tools_logging.py | UTF-8 | 3,526 | 2.75 | 3 | [] | no_license | """
Created on 2012-01-20
@author: jldupont
"""
import os, sys, logging, hashlib
from logging.handlers import SysLogHandler
import types
PROGRESS=15
def pprint_kv(k,v, align=20):
fmt="%-"+str(align)+"s : %s"
print fmt % (k, v)
def info_dump(d, align):
fmt="%-"+str(align)+"s : %s"
if type(d)==types.DictionaryType:
for key in d:
logging.info(fmt % (key, d[key]))
if type(d)==types.ListType:
for el in d:
key, value=el
logging.info(fmt % (key, value))
def setloglevel(level_name):
"""
>>> import logging
>>> setloglevel("info")
>>> logging.debug("test")
"""
try:
logger=logging.getLogger()
name=level_name.upper()
if name=="PROGRESS":
logger.setLevel(PROGRESS)
else:
ll=getattr(logging, name)
logger.setLevel(ll)
except Exception, e:
raise Exception("Invalid log level name: %s (%s)" % (level_name, e))
class FilterDuplicates(logging.Filter):
"""
Everything before the ':' marker is considered to be the "signature" for a log event
- All DEBUG level log go through
- All "progress" report go through
- All messages with a ":" separator (for contextual info) are passed
- Other messages are filtered for duplicates
"""
occured=[]
def filter(self, record):
if record.levelname=="DEBUG":
return 1
msg=record.getMessage()
if msg.startswith("progress") or msg.startswith("Progress") or msg.startswith("PROGRESS"):
return 1
try:
bits=msg.split(":")
if len(bits)>1:
return 1
signature_hash=hashlib.md5(msg).hexdigest()
if signature_hash in self.occured:
return 0
self.occured.append(signature_hash)
record.msg="*"+msg
except Exception,e:
print e
### let it pass...
pass
return 1
def enable_duplicates_filter():
logger=logging.getLogger()
logger.addFilter(FilterDuplicates())
## ================================================================================================
def _get_fname():
name=os.path.basename(sys.argv[0])
cmdname=os.environ.get("CMDNAME", name)
return "%-12s" % cmdname
FORMAT='%(asctime)s - '+_get_fname()+' - %(levelname)s - %(message)s'
def setup_basic_logging():
logging.basicConfig(level=logging.INFO, format=FORMAT)
logging.addLevelName(PROGRESS, "PROGRESS")
def levelProgress(self, message, *args, **kwargs):
if self.isEnabledFor(PROGRESS):
self._log(PROGRESS, message, args, **kwargs)
logging.Logger.progress=levelProgress
def progress(msg, *args, **kwargs):
"""
Log a message with severity 'PROGRESS' on the root logger.
"""
logging.getLogger().progress(msg, *args, **kwargs)
logging.progress=progress
def setup_syslog():
slogger=SysLogHandler()
slogger.setLevel(logging.INFO)
formatter = logging.Formatter(FORMAT)
slogger.setFormatter(formatter)
logger=logging.getLogger()
logger.addHandler(slogger)
if __name__=="__main__":
os.environ["CMDNAME"]="test"
setup_basic_logging()
setup_syslog()
logging.info("Test tools_logging...")
| true |
1f96f151a6a211a5909a6f3982ca42799435d5cc | Python | hktamzid/Python-Mosh | /conditional.py | UTF-8 | 107 | 2.59375 | 3 | [] | no_license | print(30>20)
print(30<20)
print(30>=20)
print(30<=20)
print(30!=20)
print(30==20)
print("hamba"=="hamba")
| true |
cd65ea29d4ace7f68962d47919cef5e96ea47ceb | Python | ipsolar/serapis | /serapis/tests/test_adder.py | UTF-8 | 692 | 2.546875 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python
# coding=utf-8
"""
Collection of tests.
Tests methods need to start with "test_", otherwise you're free to do
whatever you want here.
"""
from __future__ import unicode_literals
from __future__ import absolute_import
__author__ = "Manuel Ebert"
__copyright__ = "Copyright 2015, summer.ai"
__date__ = "2015-12-17"
__email__ = "manuel@summer.ai"
from serapis.preprocess import clean_and_qualify_term
import codecs
def test_disqualify():
with codecs.open("serapis/tests/data/words_disqualified.txt", 'r', 'utf-8') as wordlist:
for word in wordlist.readlines():
assert not clean_and_qualify_term(word), "Word '{}' falsely marked as valid".format(word.strip())
| true |
43f2865697e8b012d96aa3dc01846ca86c7e8d55 | Python | akshatmalik/akshat.ndun-gmail.com | /dynamic_programming/word_break.py | UTF-8 | 1,517 | 3.25 | 3 | [] | no_license | class Solution:
# https://www.interviewbit.com/problems/distinct-subsequences/
# @param A : string
# @param B : list of strings
# @return an integer
def find_make(self, A, a_index, B, b_index, dp):
if dp[a_index][b_index] == -1:
if len(B) == b_index:
return 1
if a_index == len(A) and len(B) != b_index:
return 0
if a_index != len(A) and len(B) == b_index:
return 0
if A[a_index] == B[b_index]:
dp[a_index][b_index] = self.find_make(A, a_index + 1, B, b_index + 1, dp) + \
self.find_make(A, a_index + 1, B, b_index, dp)
else:
dp[a_index][b_index] = self.find_make(A, a_index + 1, B, b_index, dp)
return dp[a_index][b_index]
def numDistinct(self, A, B):
dp = [[-1 for _ in range(len(B) + 1)] for _ in range(len(A) + 1) ]
x = self.find_make(A, 0, B, 0, dp)
return x
if __name__ == "__main__":
import cProfile
pr = cProfile.Profile()
pr.enable()
x = Solution()
p = x.numDistinct("rabbbit", "rabbit")
print("ans ", p)
assert p == 3
x = Solution()
p = x.numDistinct("abc", "abc")
print("ans ", p)
assert p == 1
x = Solution()
p = x.numDistinct("aaaababbababbaabbaaababaaabbbaaabbb", "bbababa")
print("ans ", p)
assert p == 22113
pr.disable()
# after your program ends
pr.print_stats(sort="calls")
| true |
b6d3fd86da2eb1978c2b38d0b45e3e9ddebad22a | Python | Felipehonorato1/DEEPLEARNING | /ANN/BREASTCANCER/breastregistroisolado.py | UTF-8 | 1,020 | 2.65625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import pandas as pd
import keras
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Dropout
previsoes = pd.read_csv('entradas-breast.csv')
classe = pd.read_csv('saidas-breast.csv')
classificador = Sequential()
classificador.add(Dense(units = 8, activation = 'relu', kernel_initializer = 'normal', input_dim= 30))
classificador.add(Dropout(0.2))
classificador.add(Dense(units = 8, activation = 'relu', kernel_initializer = 'normal'))
classificador.add(Dropout(0.2))
classificador.add(Dense(units = 1, activation = 'sigmoid'))
classificador.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['binary_accuracy'])
classificador.fit(x = previsoes, y =classe, batch_size = 10, epochs = 100)
novo = np.array([[15.80, 8.34, 118, 900 , 0.10, 0.26, 0.08, 0.134, 0.178, 0.20, 0.05, 1098, 0.87, 4500,
145.2,0.005,0.04,0.05,0.015,0.03,0.007,23.15,16.64,178.5,2018,0.14,0.185,0.84,158,0.363]])
previsao = classificador.predict(novo) | true |
536ebf0dd24ba8362d7158681ee216f9d2c9ae4f | Python | alsanta/For_Loop_Basic_1 | /for_loop_basic1.py | UTF-8 | 500 | 3.609375 | 4 | [] | no_license | for x in range(0,151):
print(x)
for x in range(5,1001,5):
print(x)
for x in range(1,101):
if x % 5 == 0 and x % 10 != 0:
print("Coding")
elif x % 10 == 0:
print("Coding Dojo")
else:
print(x)
count = 0
for x in range (0,500001):
if x % 2 != 0:
count += x
print (count)
for x in range(2018,0,-4):
if x % 2 == 0:
print(x)
lowNum = 2
highNum = 9
mult = 3
for x in range(lowNum,highNum + 1):
if x % mult == 0:
print (x)
| true |
3ef3d6817f036e262a45ded7625272c2dbfcee9e | Python | mandos1995/online_judge | /BOJ/전체문제/10171_고양이.py | UTF-8 | 148 | 2.75 | 3 | [] | no_license | '''
문제 :
\ /\
) ( ')
( / )
\(__)|
출력하기
'''
# solution
print('\ /\\')
print(' ) ( \')')
print('( / )')
print(' \\(__)|') | true |
64d38023040fd3f5b9a0acee7c64a0422e8f6aa9 | Python | ausaki/data_structures_and_algorithms | /leetcode/maximum-nesting-depth-of-two-valid-parentheses-strings/397370388.py | UTF-8 | 498 | 2.9375 | 3 | [] | no_license | # title: maximum-nesting-depth-of-two-valid-parentheses-strings
# detail: https://leetcode.com/submissions/detail/397370388/
# datetime: Fri Sep 18 14:55:06 2020
# runtime: 48 ms
# memory: 14.5 MB
class Solution:
def maxDepthAfterSplit(self, seq: str) -> List[int]:
L = '('
n = len(seq)
result = [0] * n
depth = 0
for i, c in enumerate(seq):
depth += c == L
result[i] = depth % 2
depth -= c != L
return result | true |
df4015fc03b779efa1cb6ef1377c9d125e35a341 | Python | ChristianLemke/Master_Practical_Course_Data_Analysis_with_Python | /lib/csv_reader.py | UTF-8 | 2,595 | 3 | 3 | [
"Apache-2.0"
] | permissive | # -*- coding: utf-8 -*-
import csv
from copy import deepcopy
class CSV_reader(object):
def __init__(self):
self.tags = []
self.uniqueTags = []
self.metadata = []
self.metadata_dic = {}
self.tags_dic = {}
self.data_dic = {}
self.path_tags = "../data/artigo-tags.csv"
self.path_metadata = "../data/artigo-metadata.csv"
self.path_image_path = "../data/artigo-path.csv"
self.path_uniqueTags = "../data/uniqueTags.csv"
self.metadata_labels = ['picture_id', 'metadata_year', 'metadata_name', 'metadata_surname', 'metadata_location']
self.tags_labels = ['picture_id','tag_tag','tag_count']
self.delimiter = ','
def get(self, path):
tmp = []
with open(path, 'rb') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
tmp.append(row)
return tmp
def get_metadata(self):
if(len(self.metadata) == 0):
self.metadata = self.get(self.path_metadata)
return self.metadata
def get_metadata_dic(self):
if not any(self.metadata_dic) :
for data in self.get_metadata():
self.metadata_dic[data[0]] = {}
for i in range(1,len(self.metadata_labels)):
self.metadata_dic[data[0]].update({self.metadata_labels[i]: data[i]})
return self.metadata_dic
def get_tags(self):
if(len(self.tags) == 0):
self.tags = self.get(self.path_tags)
return self.tags
def get_uniqueTags(self):
if(len(self.uniqueTags) == 0):
self.uniqueTags = self.get(self.path_uniqueTags)
return self.uniqueTags
def get_tags_dic(self):
if not any(self.tags_dic) :
for data in self.get_tags():
self.tags_dic[data[0]] = {data[1]: data[2]}
return self.tags_dic
def get_data_dic(self):
"""
All data in a dictionary
bsp: {5988 :{'metadata_location': 'Chicago (Illinois)',
'metadata_name': 'Paul',
'metadata_surname': 'Gauguin',
'metadata_year': '1892',
'tags': {'ABSTRAKT': '1',
'AQUARELL': '2',
'AST': '4',...}
}, ...}
"""
self.data_dic = deepcopy(self.get_metadata_dic())
for entry in self.data_dic:
self.data_dic[entry]["tags"] = {}
for tag in self.get_tags():
if tag[0] in self.data_dic:
self.data_dic[tag[0]]["tags"].update({tag[1]: tag[2]})
return self.data_dic
| true |
82406d922c94c96736ce47a34044276ded603060 | Python | Areizen/Android-Malware-Sandbox | /plugins/cipher_plugin/lib/Cipher.py | UTF-8 | 1,187 | 2.640625 | 3 | [
"Apache-2.0"
] | permissive | import datetime
import re
import socket
from urllib.parse import urlparse
from sqlalchemy import Column, Integer, String, Boolean, ForeignKey, DateTime
from lib.model.database.Database import Database
Base = Database.get_declarative_base()
class Cipher(Base):
__tablename__ = 'cipher'
id = Column(Integer, primary_key=True)
date = Column(DateTime, default=datetime.datetime.utcnow)
algorithm = Column(String)
key = Column(String)
iv = Column(String)
opmode = Column(String)
input_value = Column(String)
output_value = Column(String)
stack = Column(String)
application_id = Column(Integer, ForeignKey('application.id'))
def __init__(self, algorithm, key, iv, opmode, input_value, output_value, stack):
self.algorithm = algorithm
self.key = key
self.iv = iv
self.opmode = opmode
self.input_value = input_value
self.output_value = output_value
self.stack = stack
def __repr__(self):
return f'<Cipher(id={self.id},algorithm="{self.algorithm}",key="{self.key}",opmode="{self.opmode}",input_value="{self.input_value}",output_value="{self.output_value}")>'
| true |
a870c56083594b93ec4467d96c88f6de90eb840f | Python | VPetras/ctu-turtle-robot | /workspace/src/task_01_package/src/listener.py | UTF-8 | 1,445 | 2.984375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python2
from sensor_msgs.msg import LaserScan
import matplotlib.pyplot as plt
import numpy as np
import rospy
class ScanCollector():
def __init__(self):
# Initialize Subscriber node for topic name "/scan"
rospy.init_node('listener')
self.subscriber = rospy.Subscriber('scan', LaserScan, self.scan_callback)
# Initialize filter and plot parameters
self.plot_min_angle = np.radians(-30)
self.plot_max_angle = np.radians(30)
self.plot_length = 500
self.start_time = 0
# TODO: initialize the necessary variables to store data:
def plot_ranges(self, x, y):
plt.plot(x, y)
plt.xlabel('time [s]')
plt.ylabel('distance [m]')
plt.title('LaserScan Data')
plt.show()
def scan_callback(self, msg):
# 1) TODO: Parse input message, extract all needed data:
# Example:
# angle_min = msg.angle_min
# angle_max = msg.angle_max
# 2) TODO: Filter out erroneous data from scan
# TODO: Discard unwanted angles
# TODO: Ignore wrong ranges
# 3) TODO: store filtered data and message timestamp.
# ...
# 4) TODO: plot ranges if the number of stored values had reached the number specified in the plot_length
# self.plot_ranges(timestamps, data)
return
if __name__ == '__main__':
sc = ScanCollector()
rospy.spin() | true |
31a102dd2c27d5b788dd0d73c04f49c06f08146d | Python | time2do/python | /ch08/value_error.py | UTF-8 | 313 | 3.265625 | 3 | [] | no_license | while True:
try: #문자를 입력하면 에러 나니까 try 해줌
x = int(input("숫자를 입력하세요: "))
print(x)
break
except ValueError: #문자 이외꺼 입력시 ValueError 뜰때 아래 문구가 뜸.
print("숫자가 아닙니다. 다시 입력하세요.") | true |
7b4c70f90fefeda820db4bc36922933ea3c558a3 | Python | aimalz/justice | /justice/similarity_model/model.py | UTF-8 | 3,424 | 2.734375 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
"""tf.estimator.Estimator model function for the similarity model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib import layers
def extract_similarity_vector(window_data, params, name):
"""Extracts a vector that represents a window of data.
Currently it first builds diffs from all neighboring flux values.
:param window_data: <float64>[batch_size, window_size, 3] tensor of input
light curve data. Currently contains time values, flux, and flux error.
:param params: Model parameters.
:param name: String name "left" or "right".
:returns: Normalized similarity tensor.
"""
batch_size = params["batch_size"]
window_size = params["window_size"]
dropout_rate = params["dropout_keep_prob"]
symmetric = params.get("symmetric", True)
layer1_dim, layer2_dim = params["layer_sizes"]
assert window_data.shape == (batch_size, window_size, 3)
# <float64>: [batch_size, window_size]
time_values = window_data[:, :, 0]
flux_values = window_data[:, :, 1]
# <float64>: [batch_size, window_size - 1]
flux_diffs = flux_values[:, 1:] - flux_values[:, :-1]
time_diffs = time_values[:, 1:] - time_values[:, :-1]
diffs = flux_diffs / time_diffs
reuse = tf.AUTO_REUSE if symmetric else False
scope_name = "extract_vector" if symmetric else "extract_vector_{}".format(name)
with tf.variable_scope(scope_name, reuse=reuse):
layer1 = layers.fully_connected(diffs, layer1_dim, activation_fn=tf.nn.relu)
layer1 = tf.nn.dropout(layer1, keep_prob=dropout_rate)
layer2 = layers.fully_connected(layer1, layer2_dim, activation_fn=tf.nn.relu)
# Normalize the vectors. If vector magnitude is causing precision issues, we could
# add a regularization loss.
layer2_norm = tf.expand_dims(tf.norm(layer2, axis=1), axis=1)
assert layer2_norm.shape == (batch_size, 1)
# Display mean norm (across batch) in TensorBoard.
tf.summary.scalar("{}_norm".format(name), tf.reduce_mean(layer2_norm))
tf.summary.scalar("{}_layer2_min".format(name), tf.reduce_min(layer2))
return layer2 / layer2_norm
def model_fn(features, labels, mode, params):
left, right = features['left'], features['right']
left_vec = extract_similarity_vector(left, params, "left")
right_vec = extract_similarity_vector(right, params, "right")
predictions = tf.reduce_sum(left_vec * right_vec, axis=1)
loss = None
if mode != tf.estimator.ModeKeys.PREDICT:
loss = tf.losses.mean_squared_error(
labels=features['goal'], predictions=predictions)
train_op = None
if mode == tf.estimator.ModeKeys.TRAIN:
global_step = tf.train.get_global_step()
learning_rate = tf.train.exponential_decay(
params.get('learning_rate', 1e-3),
global_step,
params['lr_decay_steps'],
params.get('lr_decay_rate', 0.96),
staircase=False)
train_op = tf.contrib.layers.optimize_loss(
loss=loss,
global_step=global_step,
optimizer=tf.train.AdamOptimizer,
learning_rate=learning_rate,
)
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op,
)
| true |
aee51ca2dcc5ccced1d06e1a9e44bfd54e795f5d | Python | figueiredorodrigo/Exercicios-Guanabara | /Desafio110/moeda.py | UTF-8 | 1,452 | 3.671875 | 4 | [] | no_license | def dobro(num=int, formatar=False):
num *= 2
if not formatar:
return num
else:
return real(num)
def metade(num=0, formatar=False):
num *= 0.5
if not formatar:
return num
else:
return real(num)
def infla(num=0, aumento=0, formatar=False):
num += num * (aumento / 100)
if not formatar:
return num
else:
return real(num)
def dim(num=0, diminuicao=0, formatar=False):
num -= num * (diminuicao / 100)
if not formatar:
return num
else:
return real(num)
def real(num=0, cambio='R$'):
return f'{cambio}{num}'.replace('.', ',')
def resumo(num=0, aumento=0, diminuicao=0, cambio='R$'):
"""
:param num: Valor a ser convertido em real
:param aumento: Acréscimo ao valor digitado anteriormente
:param diminuicao: Decréscimo ao valor digitado anteriormente
:param cambio: R$
:return:
"""
double = num*2
half = num*0.5
aume = num + (num * aumento/100)
dimi = num - (num * diminuicao/100)
titulo = 'Análise'
print('--' * 20)
print(titulo.center(40))
print('--' * 20)
print(f'\nPreço digitado: {cambio}{num}'
f'\nDobro: {cambio}{double}'
f'\nMetade: {cambio}{half}'
f'\nAumentando {aumento}%: {cambio}{aume}'
f'\nDiminuindo {diminuicao}%: {cambio}{dimi}'.replace('.', ',')) | true |
e984e850a7773e901d7b1ec3d45932356a6d1bc4 | Python | smarahara92/Mobility-Management-in-Software-Defined-Campus-Networks | /Network.py | UTF-8 | 3,486 | 2.5625 | 3 | [] | no_license | import pox.openflow.discovery as discovery
import HostDBEvents
class Network (HostDBEvents):
"""
The logical representation of Network
"""
def __init__(self):
self.dpidlist = [] # {dpid1, dpid2, ...} for finding index of each node in topology and pathgraph
self.switchlist = {} # {dpid1: switchobj1, dpid2: switchobj2, ...} It stores obj of each switch
self.topology = {} # {dpid1: [port1, port2, ...], dpid2: [port1, port2, ...], ...}
self.pathgraph = {} # {dpid1: [port1, port2, ...], dpid2: [port1, port2, ...], ...}
def _handle_ConnectionUp(self, event):
self.dpidlist.append(event.dpid)
self.switchlist[event.dpid] = None # create Switch obj here
totalnoswitches = len(self.dpidlist)
for key in self.topology.keys():
adjlist = self.topology[key]
adjlistlen = len(adjlist)
while(adjlistlen < totalnoswitches):
adjlist.append(None)
adjlistlen += 1
def _handle_ConnectionDown(self, event):
index = -1
try:
index = self.dpidlist.index(event.dpid)
self.dpidlist.remove(event.dpid)
self.switchlist.pop(event.dpid)
except ValueError:
return
self.topology.pop(event.dpid)
for key in self.topology.keys():
self.topology[key].pop(index)
floyd_warshall_algorithm()
def _handle_LinkEvent(self, event):
dpid2index = None
dpid1index = None
try:
dpid2index = self.dpidlist.index(event.link.dpid2)
dpid1index = self.dpidlist.index(event.link.dpid1)
except ValueError:
return
if(event.added):
if(self.topology[event.link.dpid1][dpid2index] is not None):
self.topology[event.link.dpid1][dpid2index] = event.link.port1
self.topology[event.link.dpid2][dpid1index] = event.link.port2
floyd_warshall_algorithm()
else:
self.topology[event.link.dpid1][dpid2index] = None
self.topology[event.link.dpid2][dpid1index] = None
floyd_warshall_algorithm()
def __get_init_Cost_and_Path_Matrices(self):
costmatrix = {}
pathmatrix = {}
for key in self.topology.keys():
costadjlist = []
pathadjlist = []
for value in self.topology[key]:
pathadjlist.append(None)
if(value is not None):
costadjlist.append(1)
else:
costadjlist.append(0xFFFFFFFF)
costmatrix[key] = costadjlist
pathmatrix[key] = pathadjlist
return costmatrix, pathmatrix
def floyd_warshall_algorithm():
costmatrix, pathmatrix = __get_init_Cost_and_Path_Matrices()
stageindex = 0
for stage in self.dpidlist:
stageadjlist = costmatrix[stage]
for node in self.dpidlist:
nodeadjlist = costmatrix[node]
neighbourindex = 0
for neighbour in self.dpidlist:
curstagepathcost = nodeadjlist[stageindex] + stageadjlist[neighbourindex]
if(curstagepathcost < nodeadjlist[neighbourindex]):
nodeadjlist[neighbourindex] = curstagepathcost
pathmatrix[node][neighbourindex] = stage
neighbourindex += 1
stageindex += 1
self.pathgraph = pathmatrix
def getPath(self, srcdpid, dstdpid):
try:
dstdpidIndex = self.dpidlist.index(dstdpid)
path = []
while(srcdpid != dstdpid):
path.append(self.switchlist[srcdpid])
srcdpid = self.pathgraph[srcdpid][dstdpidIndex]
path.append(self.switchlist[dstdpid])
return path
except Exception:
return None
def hostAdded(self, hostentry):
pass
def hostUpdated(self, oldentry, updatedentry):
hostRemoved(oldentry)
hostAdded(updatedentry)
def hostRemoved(self, hostentry):
switch = self.switchlist[hostentry[2]]
switch.removeFlowRule(hostentry[0], hostentry[1])
| true |
5e64f1fdb6ab42d318627ea9a537d34c9832c25d | Python | sabcodes/Python_Programs | /simple/loops.py | UTF-8 | 1,751 | 3.25 | 3 | [] | no_license | monday_temp = [9.1, 8.8, 7.6]
for i in monday_temp:
print(round(i))
print('done')
for letter in 'hello':
print(letter.title())
#colors = [11, 34, 98, 43, 45, 54, 54]
#for i in colors:
# print(i)
#colors = [11, 34, 98, 43, 45, 54, 54]
#for i in colors:
# if i>50:
# print(i)
#colors = [11, 34.1, 98.2, 43, 45.1, 54, 54]
#for i in colors:
# if isinstance(i, int):
# print(i)
#colors = [11, 34.1, 98.2, 43, 45.1, 54, 54]
#for i in colors:
# if isinstance(i, int) and i>50:
# print(i)
print('----------------------------------------------------------------')
#grades ={"barno": 10, "marry":20, "ash": 30}
#for i in grades.items():
# print(i)
#phone_numbers = {"John Smith": "+37682929928", "Marry Simpons": "+423998200919"}
#for key, value in phone_numbers.items():
# print('{}: {}'.format(key, value))
phone_numbers = {"John Smith": "+37682929928", "Marry Simpons": "+423998200919"}
for value in phone_numbers.values():
print('{}'.format(value.replace('+', '00')))
print('--------------while loop---------------------')
a = 3
while a > 0:
print(a)
a = a-1
#username = ''
#while username != 'pypy':
# username = input("enter usernae: ")
#while True:
# user = input("Enter: ")
# if user == 'barno':
# break
# else:
# continue
('----------------------------set example--------------------')
a = [1,1,2,3]
print(set(a))
response = ''
def sentece_maker(s):
words = ("how", "what", "when")
cap = s.capitalize()
if s.startswith(words):
return '{}? '.format(cap)
else:
return '{}. '.format(cap)
while True:
user = input("Enter: ")
if user == '\end':
break
else:
response = response +sentece_maker(user)
print(response)
| true |
aeb72ed03efb575fddea18aa53cfe9ef124fbb9b | Python | jeffrean/scanner | /scanner.py | UTF-8 | 2,208 | 2.890625 | 3 | [] | no_license | import numpy as np
import cv2
def laser_threshold_image(frame):
'''
This funciton uses upper color #FFA2C7 and lower color #E64A79
'''
#color
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
dark_red = cv2.cvtColor(np.uint8([[[187,47,85]]]),cv2.COLOR_RGB2HSV)[0][0]
lower_red = np.uint8([[[dark_red[0] - 50, dark_red[1] - 50, dark_red[2] - 50]]])
upper_red = np.uint8([[[dark_red[0] + 90, dark_red[1] + 90, dark_red[2] + 90]]])
mask = cv2.inRange(hsv, lower_red, upper_red)
#brightness
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (9, 9), 0)
thresh = cv2.threshold(blurred, 170, 255, cv2.THRESH_BINARY)[1]
#edge
edge = cv2.Canny(cv2.split(hsv)[0], 50, 100)
mask_thresh = cv2.bitwise_or(mask, thresh)
return cv2.bitwise_and(mask_thresh, edge)
def linear_interpolate(laser_centers):
empty_flag = False
empty_ranges = []
for i in range(len(laser_centers) - 1):
if laser_centers[i] != 0 and laser_centers[i + 1] == 0 and not empty_flag:
empty_flag = True
empty_ranges.append(i)
elif laser_centers[i + 1] != 0 and empty_flag:
empty_flag = False
empty_ranges.append(i + 1)
if empty_flag:
#del empty_ranges[-1]
empty_ranges.append(empty_ranges[-1])
for i in range(0, len(empty_ranges), 2):
rise = empty_ranges[i+1] - empty_ranges[i]
run = laser_centers[empty_ranges[i+1]] - laser_centers[empty_ranges[i]]
slope = rise / run
x = laser_centers[empty_ranges[i]]
for j in range(empty_ranges[i] + 1, empty_ranges[i+1]):
#x2 = (y2 - y1 + mx1) / m
if np.isinf(slope):
laser_centers[j] = laser_centers[j-1]
else:
x = (j - (j-1) + slope * x) / slope
laser_centers[j] = int(x)
def laser_centers_of_mass(frame):
laser_centers = np.zeros(frame.shape[0])
#keep track of undetected laser spots
empty_flag = False
for i in range(frame.shape[0]):
count = 0
for j in range(frame.shape[1]):
if frame[i][j] != 0:
laser_centers[i] += j
count += 1
if count != 0:
laser_centers[i] //= count
if laser_centers[i] < (len(frame[0]) / 2):
laser_centers[i] = 0
else:
empty_flag = True
if empty_flag:
linear_interpolate(laser_centers)
return laser_centers
def calculate_depth():
pass | true |
6d7da0f35a659873086ed671bee3e84ef50e0109 | Python | DigimundoTesca/mini-examples | /python/control-PID.py | UTF-8 | 8,507 | 3.984375 | 4 | [] | no_license | from datetime import datetime, timedelta
"""
Controlador PID
TODO: Obtener el Control Proporcional, Integral y Derivativo
La fórmula está dada por:
( (K1 * (Sp + Mp + Ap) ) + (K2 * ( Sum(Ts) - Sum(Tp) ) / Dr) + Dp ) / 3
En donde
K1 = Constante Proporcional
K2 = Constante Integral
K3 = Constante Derivativa
Sp = Promedio por día de la Semana
Mp = Promedio por dia del Mes
Ap = Promedio por día del Año
Ts = Suma del total de elementos en los días de la semana
Tp = Suma del total de elementos promedios en todas las semanas registradas
Dr = Días restantes -> 7 - Ts
Dp = Promedio derivativo ( Mínimos cuadrados )
*** K1, K2, K3 Son constantes que se van ajustando manualmente al final de la
predicción inicial, de acuerdo a los valores reales obtenidos ***
"""
def main():
"""
Esta funcion su unico proposito será generar los cálculos pertinentes, además
de que aquí configuraremos las constantes de control
Inicialmente, K1 = k2 = k3 = 1
Mientras se vayan obteniendo los resultados y se comparen con los valores reales, entonces
se hacen los ajustes pertinentes a las constantes.
Ejemplo:
obtuvimos los siguientes valores para cada control:
get_control_proporcional() = 5
get_control_integral() = 7
get_control_derivativo() = 13
Pero el valor real para el día siguiente de la gelatina fue de 7
por lo tanto se hace un ajuste a la primer constante k1, para que
5 * k1 nos de un valor aproximado a 7
los mismo para las siguientes constantes
de manera que:
k1 * 5 = 7; k1 > 1
k2 * 7 = 1; k2 = 1 ... aquí se mantuvo :D
k3 * 13 = 13; k3 < 1
Ejemplo:
De k3 ...
si k3 > 1 entonces:
k3 > 13 ... por lo tanto k3 * 13 > 7... y eso no nos sirve, ya que
la venta real fue de 7, entonces nos estaríamos alejando más...
la idea es realizarle ajustes a la constante para que el valor se acerque a 7 :D
"""
k1 = 1
k2 = 1
k3 = 1
# Debemos enviarle un dato, el cual es el día que queremos calcular la predicción ...
# Imaginando que hoy es DOMINGO 21 de mayo, por lo tanto enviamos el día a predecir
# Es decir el Lunes 22 de mayo
day_to_predict = datetime.today() + timedelta(days=1)
control_p = get_control_proporcional(day_to_predict)
def get_control_proporcional(day_to_predict:datetime, :object):
"""
Este metodo nos retornará un PROMEDIO, pero qué promedios???
1. Por DÍA [Lunes, Martes, Miercoles...]
2. Por Número de día de cada MES [1, 2, 3, 4... 30, 31]
3. Por día del AÑO [1, 2, 3, 4, 5, ... 363, 364, 365]
La Suma de estos tres valores nos indicarán el Control Proporcional
Ejemplo:
Hoy estamos a Lunes 15 de Mayo de 2017
Sp
Espera... me surgió una duda ...
no recuerdo si aquí debería ser el promedio de TODOS los días que hay registro o
el promedio de ventas de TODOS los lunes de los que se tiene registro ...
Mp
Calcular el promedio de todos los días 15 de TODOS los meses, por ejemplo 15 de enero,
15 de febrero, 15 de marzo... etc
Ap
Calcular el promedio de ventas de todos los días X de todos los años
enero tiene 31...
febero... 28 ? :v
marzo 31...
abril .. 30
mayo ... -> 15
por lo tanto, 31 + 28 + 31 + 30 +15 = 135 -> Mayo es el día 135 del año
Ap sería el promedio de todos los días 135 de todos los años que se tenga registro
"""
# Primero debemos hacer las consultas pertinentes :3
# Todas las siguientes deberías ser funciones, pero por ahora nel, todo aká :v
# Aquí debe estar la lógica para obtener la variable Sp
# Hay que filtrar sólo los que tengan como fecha de creación un lunes :D
"""
En esta parte nos auxiliaremos de isoweekday que nos proveé python...
https://docs.python.org/3/library/datetime.html#datetime.date.isoweekday
nos retornará un numero del 1 al 7 dependiendo de cada día
siendo 1 lunes y 7 domingo
así que una vez obtenidos todos los tickets, iteraremos su fecha de creacion
y validaremos uno a uno los que cumplan a condicion requerida...
Recordar: ya tenemos un método en helpers que nos retorna el numero de un día,
pero nos retorna numero del 0 al 6, siendo lunes el 0 y 6 domingo
"""
from helpers import Helper()
""" Le tenemos que enviar el día del cual queremos obtener el numero
correspondiente para hacer las validaciones """
number_day = helper.get_number_day(day_topredict) + 1 # Este metodo ya incorpora isoweekday
# Como day_to_predict es Lunes 22 de mayo, nos retornará un 0, así que le sumamos uno, para que tenga sentido
all_tickets_details = TicketDetail.objects.select_related('ticket').all()
tickets_details_list = []
total_days_dict = {}
# Ahora sí, vamos a iterar los tickets y a cada uno igual hay que convertir su atributo a entero
for ticket_detail in all_tickets_details:
if ticket_detail.ticket.created_at.isoweekday() == number_day:
'Por lo tanto, ese ticket detail es de un día lunes :D'
tickets_details_list.append(ticket_detail)
""" Aquí obtendremos el total de lunes """
total_days_dict[ticket_detail.ticket.created_at.strftime('%d-%m-%Y')] = True
# Es obvio que si ya existe un ticket detail con la misma fecha no importa, ya que
# sólo indicaremos que si existen tickets en ese día ...
""" ahora obtendremos el promedio de todos esos días, como son tickets details
entonces ya incluye el producto vendido y obvio, el precio base y el total, pero necesitamos conocer el
id de la gelatina, por lo tanto debemos pasarlo por argumento en la funcion
en este caso pasaremos el objecto como tal...
Una vez encontrado el ticket detail correspondiente podremos añadir las elementos que se
vendieron en ese movimiento
"""
total_elements = 0
for ticket_detail in tickets_details_list:
if ticket_detail.cartridge.id == product_object.id:
'significa que es un ticket detail que vendio una gelatina'
total_elements += ticket_detail.quantity
# Y listo, ahora total_elements nos indicará los elementos vendidos en todos los tiempos
# en los cuales haya sido una venta en un día lunes :3 -> Procedemos a promediar
day_average = total_elements / len(total_days_dicts) # Promedio de dia = cantidad de elementos vendidos entre total de dias obtenidos
""" Necesitamos calcular los días totales :D ¿Cómo los calcularias?
TIP: Te puedes guiar usando los tickets_details_list <- Contiene los datos que sí nos sirven
TODO: Obtener la cantidad de lunes en TODOS los tiempos en los que se haya vendido la gelatina
Solución:
recordar que un diccionario tiene llaves irrepetibles, entonces podemos usar cada datetime como una llave
por lo tanto iteramos todos los tickets details list ( que son los que ya están filtrados)
y almacenamos cada llava y obviamente, cada que se encuentre otro ticket detail con la misma
fecha (date, no confundir con datetime) entonces ya no será necesario crear otro espacio en el diccionario
al final solo queda obtener el tamaño del diccionario y ya
"""
# Aquí debe estar la lógica para obtener la variable Mp
# Aquí debe estar la lógica para obtener la variable Ap
def get_control_integral():
"""
Este método retornará la suma del total de ventas de un producto en un
día 'n' de la semana menos el promedio de ventas en una semana del producto,
dividido entre la diferencia de los días de la semana menos la cantidad
de días evaluados.
Ejemplo:
en el día miercoles se han vendido 20 gelatinas, y sabemos que en promedio
hasta la fecha de hoy, en una semana cualquiera el promedio es que se vendan
50 gelatinas, por lo tanto
Ts = 20
Tp = 50
Dr = 7 - 3 = 4
"""
def get_control_derivativo():
"""
Este método nos retornará la derivada del día anterior con respecto a su día anterior
aquí es donde utilizaremos los mínimos cuadrados...
hipoteticamente, imaginando que la semana pasada se vendieron en el siguiente
orden de días la cantidad de gelatinas:
{'Lunes': 15,
'Martes': 5,
'Miércoles': 9:
'Jueves': 14,
'Viernes': 12,
'Sabado': 0,
'Domingo': 15
}
y al realizar las operaciones de los mínimos cuadrados obtuvimos las siguientes "Predicciones":
(Hipoteticamente)
{'Lunes': 13,
'Martes': 7,
'Miércoles': 8:
'Jueves': 10,
'Viernes': 12,
'Sabado': 0,
'Domingo': 12
}
por lo tanto si hoy es lunes y queremos conocer las ventas de mañana martes utilizaríamos
el valor correspondiente al martes: 7
"""
if __name__ == '__main__':
main()
| true |
20196aadd40301e64d811ddace6aadbe5d34aa8f | Python | wbkifun/my_research | /Schrodinger-SSFM/raw_data/plot_from_npy.py | UTF-8 | 872 | 2.59375 | 3 | [] | no_license | #!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
import sys
psi_fn = sys.argv[1]
kpsi_fn = 'k' + psi_fn
psi = np.load(psi_fn)
kpsi = np.load(kpsi_fn)
nx, ny = psi.shape
snx = 1024
print psi.shape
print kpsi.shape
abs_kpsi = np.abs(kpsi)
print abs_kpsi.min(), abs_kpsi.max()
print abs_kpsi.argmin(), abs_kpsi.argmax()
kpsi_maxi = abs_kpsi.argmax()/ny
kpsi_maxj = abs_kpsi.argmax()%ny
print kpsi_maxi, kpsi_maxj
print abs_kpsi[kpsi_maxi, kpsi_maxj]
print abs_kpsi[:,0]
print abs_kpsi[:,1]
plt.ion()
fig = plt.figure(figsize=(8,6))
ax1 = fig.add_subplot(2,2,1)
im1 = plt.imshow(np.abs(psi).T, origin='lower')
ax1.set_xlim(nx/2 - snx/2, nx/2 + snx/2)
ax2 = fig.add_subplot(2,2,2)
im2 = plt.imshow(abs_kpsi.T, origin='lower')
#ax2.set_xlim(kpsi_maxi - snx/2, kpsi_maxi + snx/2)
ax3 = fig.add_subplot(2,1,2)
l3, = plt.plot(abs_kpsi[:,1])
plt.show()
| true |
3672b1c49cfe3d9e92ff90221e44b3dcfd3d359a | Python | Agonis1/Onlineshop | /main.py | UTF-8 | 9,146 | 3 | 3 | [] | no_license | shopping = [{"id": 1001, "Name": "HP-AE12", "Available": 100, "Price": 25000, "Original_Price": 24000},
{"id": 1002, "Name": "DELL", "Available": 100, "Price": 35000, "Original_Price": 34000},
{"id": 1003, "Name": "ASUS", "Available": 100, "Price": 28000, "Original_Price": 27000},
{"id": 1004, "Name": "APPLE", "Available": 100, "Price": 60000, "Original_Price": 59000},
{"id": 1005, "Name": "ACER", "Available": 100, "Price": 24000, "Original_Price": 23000},
{"id": 1006, "Name": "SAMSUNG", "Available": 100, "Price": 35000, "Original_Price": 34000},
{"id": 1007, "Name": "OPPO", "Available": 100, "Price": 15000, "Original_Price": 14000},
{"id": 1008, "Name": "XAOMI", "Available": 100, "Price": 45000, "Original_Price": 44000},
{"id": 1009, "Name": "HUAWEI", "Available": 100, "Price": 20000, "Original_Price": 19000},
{"id": 1010, "Name": "VIVO", "Available": 100, "Price": 12000, "Original_Price": 11000}]
shopping1 = shopping
temp = []
order = ""
def adminLoginWindow():
print("===========================")
print("1.Display Menu")
print("2.Add Product")
print("3.Remove Product")
print("4.Products goods available")
print("5.Total Income")
print("6.Logout")
print("======================")
def addproducts():
n = int(input("Enter the no.of.items need to be added : "))
for i in range(n):
new_id = int(input("Enter id : "))
new_Name = input("Enter Name : ")
new_Available = int(input("Enter Available : "))
new_Price = int(input("Enter Price : "))
new_original = int(input("Enter the original price : "))
d = [{"id": new_id, "Name": new_Name, "Available": new_Available, "Price": new_Price,
"Original_Price": new_original}]
shopping.extend(d)
adminDisplayMenuWindow()
def removeproducts():
dressId = int(input("Enter the id need to be deleted : "))
found = False
for d in shopping1:
found = d == dressId
if found != True:
temp.append(d)
continue
if found == True:
d -= 1
print("Deleting item....")
if len(temp) == d:
print(f"{dressId} not found")
else:
print(f"{dressId}'s one available is removed from the list")
adminDisplayMenuWindow()
def availableproducts():
Total = 0
print("\n")
for d in shopping:
print(f'{d["Name"]} = {d["Available"]}')
Total += (d["Available"])
print("\nTotal available goods is : ", Total)
def userLoginWindow():
print("=====================\n")
print("1.Display Menu")
print("2.Place order")
print("3.Cancel order")
print("4.Logout")
print("\n======================")
def placeOrder():
order_number = 10
userDisplayMenuWindow()
p_id = int(input("\nEnter the id : "))
for d in shopping:
if d["id"] == p_id:
print("\nId\tName\tAvailable\tPrice")
print("=============================================================")
print(f'{d["id"]}\t{d["Name"]}\t{d["Available"]}\t\t{d["Price"]}')
order = '{d["id"]}\t{d["Name"]}\t{d["Available"]}\t\t{d["Price"]}'
conform = input("\nDo you want to place an order on the above shown product : Y/N ")
if conform == 'Y' or conform == 'y':
print("\nSuccessfully placed the order on the product {} {}".format(d["id"], d["Name"]))
order_number += 1
print("Your order number is : ", order_number)
d ["Available"] -= 1
break
elif conform == 'N' or conform == 'n':
print("The order is not placed. You can carry on with you purchase. Happy shopping!")
break
else:
print("\nYou have entered wrong option. Please enter again\n")
conform = input("\nDo you want to place an order on the above shown product : Y/N ")
break
if ["id"] != p_id:
print("\nYou have entered invalid id. Please enter valid id\n")
user_id()
print("\nAvailable products : \n")
userDisplayMenuWindow()
def cancelOrder():
found = False
temp = []
order_id = input("Enter the order id : ")
for d in shopping:
found = ["id"] == order_id
if found != True:
temp.append(d)
if len(temp) == d:
print(f'{order_id} is not found')
else:
print(f'{order_id} is removed from the placed order')
def userChoiceOptions ():
choice = int(input("Please enter user choice : "))
if choice == 1:
userDisplayMenuWindow()
print("\n===================================\n")
userLoginWindow()
print("\n==================================\n")
userChoiceOptions()
elif choice == 2:
placeOrder()
print("\n==================================\n")
userLoginWindow()
print("\n===================================\n")
userChoiceOptions()
print("\n===================================\n")
elif choice == 3:
cancelOrder()
print("\n==================================\n")
userLoginWindow()
print("\n===================================\n")
userChoiceOptions()
elif choice == 4:
logoutwindow()
else:
print("Invalid Choice")
def userDisplayMenuWindow():
print("Id\tName\tAvailable\tPrice\tOriginal Price")
print("===================================")
for d in shopping:
print(f'{d["id"]}\t{d["Name"]}\t{d["Available"]}\t\t{d["Price"]}')
def adminDisplayMenuWindow():
print("Id\tName\tAvailable\tPrice\tOriginal Price")
print("===================================")
for d in shopping:
print(f'{d["id"]}\t{d["Name"]}\t{d["Available"]}\t\t{d["Price"]}')
def logoutwindow():
login()
def adminOptions():
choice = int(input("Please enter user choice : "))
if choice == 1:
adminDisplayMenuWindow()
print("\n===================================\n")
adminLoginWindow()
print("\n==================================\n")
adminOptions()
elif choice == 2:
adminDisplayMenuWindow()
print("\n==================================\n")
addproducts()
print("\n===================================\n")
adminLoginWindow()
print("\n===================================\n")
adminOptions()
elif choice == 3:
adminDisplayMenuWindow()
print("\n==================================\n")
removeproducts()
print("\n===================================\n")
adminLoginWindow()
print("\n===================================\n")
adminOptions()
elif choice == 4:
adminDisplayMenuWindow()
print("\n==================================\n")
availableproducts()
print("\n===================================\n")
adminLoginWindow()
print("\n===================================\n")
adminOptions()
elif choice == 5:
adminDisplayMenuWindow()
print("\n==================================\n")
monthlyincome()
print("\n===================================\n")
adminLoginWindow()
print("\n===================================\n")
adminOptions()
elif choice == 6:
logoutwindow()
else:
print("Invalid Choice")
print("\n===================================\n")
adminLoginWindow()
print("\n===================================\n")
adminOptions()
def user_id():
n = 1
userDisplayMenuWindow()
for i in range(n):
new_id = int(input("Enter id : "))
new_Name = input("Enter Name : ")
new_Available = int(input("Enter id : "))
new_Price = int(input("Enter Price : "))
new_original = int(input("Enter original Price : "))
d = [{"id": new_id, "Name": new_Name, "Available": new_Available, "Price": new_Price, "Original": new_original}]
def monthlyincome():
total = 0
for d in shopping:
total += ((d["Available"] * d["Price"]) - (d["Available"] * d["Price"]))
print("\nTotal income is : ", total)
def login():
tp = input("Login Admin/Login User Type A to Log in the Admin or U to Login in User : ")
if tp == 'A' or tp == 'a':
password = input("Enter the password : ")
if password == "1234":
adminLoginWindow()
adminOptions()
else:
print("Invalid User type")
elif tp == 'U' or tp == 'u':
password = input("Enter the password : ")
if password == "1234":
userLoginWindow()
userChoiceOptions()
else:
print("Invalid User type")
login()
| true |
5f69986a412cf0f123136f3a00b17411eeaddf1d | Python | jstutters/distqueue | /greentest.py | UTF-8 | 713 | 3.390625 | 3 | [] | no_license | from threading import Thread
from time import sleep
class Node():
def __init__(self, n):
self.name = n
self.t = None
def do_work(self, st):
self.t = Thread(target=self.test_func, args=(st,))
self.t.start()
def test_func(self, st):
sleep(st)
print self.name, st, 'done'
def is_free(self):
if not self.t:
return True
else:
return not self.t.is_alive()
def main():
nodes = [Node('foo'), Node('bar'), Node('bob')]
jobs = range(1, 10)
while jobs:
for n in nodes:
if n.is_free():
n.do_work(jobs.pop())
sleep(0.5)
if __name__ == '__main__':
main()
| true |
6db7cee192d4f7412ee953108cfe6082da59aba6 | Python | jennyjkim/Intermediate-Programming | /program1/ndfa.py | UTF-8 | 3,516 | 2.890625 | 3 | [] | no_license | #Submitter: jisook5(Kim, Jisoo)
#Partner: yink3(Yin, Kevin)
# We certify that we worked cooperatively on this programming
# assignment, according to the rules for pair programming
import goody
from collections import defaultdict
from multiprocessing.managers import State
def read_ndfa(file : open) -> {str:{str:{str}}}:
ndfa = defaultdict(str)
for line in file:
inner_dict = defaultdict(str)
p = []
string_list= line.strip('\n').replace(' ', ';').split(';')
first_key = string_list.pop(0)
p.append(list(zip(string_list[::2], string_list[1::2])))
for entry in p[0]:
if entry[0] in inner_dict:
inner_dict[entry[0]].add(entry[1])
else:
inner_dict.setdefault(entry[0], {entry[1]})
ndfa.setdefault(first_key, dict(inner_dict))
return dict(ndfa)
def ndfa_as_str(ndfa : {str:{str:{str}}}) -> str:
print('Non-Deterministic Finite Automaton')
string = ''
for k, v in sorted(ndfa.items()):
setlist = []
for each in sorted(v.items()):
setlist.append((each[0], sorted(each[1])))
string+= (' {} transitions: {}\n'.format(k, setlist))
#print(string)
return(string)
def process(ndfa : {str:{str:{str}}}, state : str, inputs : [str]) -> [None]:
final_list = [state]
current_states = [state]
all_keys = set()
for element in (ndfa.values()):
list1 = (list((element).keys()))
all_keys.update(list1)
for move in inputs:
if move in all_keys:
appending_set = [move] #[1, {need these moves}]
set_moves = set() # getting the {need these moves}
for states in current_states:
try:
the_move = ndfa[states][move]
set_moves.update(the_move)
except:
continue
if len(set_moves) == 0:
appending_set.append(set_moves)
final_list.append(tuple(appending_set))
break
appending_set.append(set_moves)
final_list.append(tuple(appending_set))
current_states = (list(set_moves))
else:
pass
return(final_list)
def interpret(result : [None]) -> str:
print('Starting new simulation')
string = ('Start state = {}\n'.format(result[0]))
for tuplez in result[1:]:
string += (' Input = {}; new possible states = {}\n'. format(tuplez[0], sorted(tuplez[1])))
string+=('Stop state(s) = {}\n'.format(sorted(result[-1][1])))
return(string)
if __name__ == '__main__':
file_name = str(input('Enter file with non-deterministic finite automaton: '))
dictionary = read_ndfa(open(file_name))
print(ndfa_as_str(dictionary))
input_file = str(input('Enter file with the start-state and input: '))
text = open(input_file)
for line in text:
line_list = line.strip('\n').replace(' ', ';').split(';')
processed = process(dictionary, line_list[0], line_list[1::])
print(interpret(processed))
# For running batch self-tests
print()
import driver
driver.default_file_name = "bsc4.txt"
# driver.default_show_traceback = True
# driver.default_show_exception = True
# driver.default_show_exception_message = True
driver.driver()
| true |
c7160d6d24166e616c8d8ba8f0adf75ff0a21f85 | Python | franzlst/surfrdf | /surf/test/resource/test_resource.py | UTF-8 | 7,447 | 2.59375 | 3 | [] | no_license | from builtins import str
from builtins import range
from builtins import object
from past.builtins import basestring
import pytest
import surf
from surf import Resource
from surf.rdf import URIRef
from surf.util import uri_split, error_message
from rdflib.term import Literal
from surf.util import error_message
@pytest.fixture
def store_session():
""" Return initialized SuRF store and session objects. """
store = surf.Store(reader="rdflib", writer="rdflib")
session = surf.Session(store)
return store, session
def test_empty_attribute_sync(store_session):
"""
Test synchronization between empty attribute and rdf_direct.
"""
_, session = store_session
instance = session.get_resource("http://smth", surf.ns.OWL["Thing"])
assert len(instance.rdf_direct) == 1
# Poke foaf_name so it gets initialized
list(instance.foaf_name)
# Append value
instance.foaf_name.append("John")
assert len(instance.rdf_direct) == 2
assert len(instance.rdf_direct[surf.ns.FOAF["name"]]) == 1
def test_loaded_attribute_sync(store_session):
"""
Test synchronization between loaded attribute and rdf_direct.
"""
_, session = store_session
instance = session.get_resource("http://smth", surf.ns.OWL["Thing"])
instance.foaf_name = "John"
instance.save()
instance = session.get_resource("http://smth", surf.ns.OWL["Thing"])
# Load foaf_name
list(instance.foaf_name)
# rdf_direct should contain two attributes now
assert len(instance.rdf_direct) == 2
assert len(instance.rdf_direct[surf.ns.FOAF["name"]]) == 1
def test_class_mapping(store_session):
"""
Test class mapping.
"""
class MyPerson(object):
def get_something(self):
pass
_, session = store_session
session.mapping[surf.ns.FOAF.Person] = [MyPerson]
# Class-level tests.
cls = session.get_class(surf.ns.FOAF.Person)
assert issubclass(cls, surf.Resource)
assert issubclass(cls, MyPerson)
assert hasattr(cls, "get_something")
# Instance-level tests.
instance = session.get_resource("http://someuri", surf.ns.FOAF.Person)
assert isinstance(instance, surf.Resource)
assert isinstance(instance, MyPerson)
assert hasattr(instance, "get_something")
def test_class_instances(store_session):
"""
Test that dirty class instances are not lost to GC.
"""
_, session = store_session
# Class-level tests.
cls = session.get_class(surf.ns.FOAF.Person)
for i in range(0, 100):
c = cls("http://test_instance_%d" % i)
# Make some changes to instance to trigger its "dirty" state.
c.rdfs_comment = "Test Instance %d" % i
assert len(Resource._dirty_instances) == 100
session.commit()
assert len(Resource._dirty_instances) == 0
def test_init_namespace(store_session):
"""
Test resource initialization in specified namespace.
"""
_, session = store_session
Person = session.get_class(surf.ns.FOAF.Person)
surf.ns.register(nstest="http://example.com/ns#")
# namespace is an instance of Namespace
p = Person(namespace=surf.ns.NSTEST)
ns, _ = uri_split(p.subject)
assert ns == "NSTEST"
# namespace is an instance of URIRef
p = Person(namespace=URIRef("http://example.com/ns#"))
ns, _ = uri_split(p.subject)
assert ns == "NSTEST"
# namespace is string
p = Person(namespace="http://example.com/ns#")
ns, _ = uri_split(p.subject)
assert ns == "NSTEST"
def test_default_namespace(store_session):
"""
Test resource initialization in specified namespace.
"""
_, session = store_session
Person = session.get_class(surf.ns.FOAF.Person)
surf.ns.register_fallback("http://example.com/ns#")
p = Person()
assert str(p.subject).startswith("http://example.com/ns#")
def test_multiple_sessions(store_session):
"""
Test that multiple sessions coexist correctly.
"""
s1 = surf.Session(surf.Store(reader="rdflib"))
P = s1.get_class(surf.ns.FOAF.Person)
assert P.session == s1
_ = surf.Session(surf.Store(reader="rdflib"))
# Making another session shouldn't change session of already
# instantiated classes and instances:
assert P.session == s1
def test_instance(store_session):
"""
Test Resource._instance().
"""
try:
_, session = store_session
Thing = session.get_class(surf.ns.OWL.Thing)
subject = surf.ns.SURF.test1
Thing._instance(subject, [surf.ns.OWL.Thing], store=Thing.store_key)
except Exception as e:
pytest.fail(error_message(e), pytrace=True)
@pytest.mark.skip(reason="type mapping hasn't been implemented yet")
def test_type_mapping(store_session):
"""
Test that XSD types are mapped to Python types.
"""
_, session = store_session
Thing = session.get_class(surf.ns.OWL.Thing)
t1 = Thing("http://t1")
t1.surf_string_value = "text"
t1.surf_bool_value = True
t1.surf_float_value = 3.14
t1.surf_int_value = 2010
t1.save()
t1 = Thing("http://t1")
assert type(t1.surf_string_value.first) == str
assert type(t1.surf_bool_value.first) == bool
assert type(t1.surf_float_value.first) == float
assert type(t1.surf_int_value.first) == int
def test_dict_access():
"""
Test that resources support dictionary-style attribute access.
"""
session = surf.Session(surf.Store(reader="rdflib"))
Person = session.get_class(surf.ns.FOAF.Person)
person = Person()
person.foaf_name = "John"
# Reading
assert person["foaf_name"].first == Literal(u"John")
assert person[surf.ns.FOAF.name].first == Literal(u"John")
# Writing
person["foaf_name"] = "Dave"
assert person.foaf_name.first == Literal(u"Dave")
# Deleting
del person["foaf_name"]
assert person.foaf_name.first is None
def test_auto_load():
"""
Test that session.auto_load works.
"""
store = surf.Store(reader="rdflib", writer="rdflib")
session = surf.Session(store, auto_load=True)
Person = session.get_class(surf.ns.FOAF.Person)
person = Person()
person.foaf_name = "John"
person.save()
same_person = Person(person.subject)
# Check that rdf_direct is filled
assert surf.ns.FOAF.name in same_person.rdf_direct
def test_query_attribute_unicode(store_session):
"""
Test that query_attribute calls ResultProxy with string arguments.
query_attribute sets up and returns ResultProxy instance. Here we test
that it doesn't pass unicode keywords to it, these don't work
in Python 2.6.2.
"""
def mock_get_by(self, **kwargs):
""" Verify that all passed keywords are strings. """
for keyword in list(kwargs.keys()):
assert isinstance(keyword, basestring), \
"Passed non-string keyword: %s" % keyword
_, session = store_session
resource = session.get_resource("http://p1", surf.ns.FOAF.Person)
RP = surf.resource.result_proxy.ResultProxy
try:
# Patch ResultProxy with mock get_by method
original_get_by, RP.get_by = RP.get_by, mock_get_by
resource.query_attribute(u"foaf_knows")
except Exception as e:
pytest.fail(error_message(e), pytrace=True)
finally:
# Regardless of results, revert our patch so other tests are not
# affected.
RP.get_by = original_get_by
| true |
96b6952eff6be7f9031b6408cb6b71bf254da3d1 | Python | Terminaator/chatbot | /oisbotServer/views/ois/courses.py | UTF-8 | 571 | 2.609375 | 3 | [] | no_license | import requests
def coursesId(id):
request = requests.get('https://ois2dev.ut.ee/api/courses/' + id + '/versions/latest')
statusCode = request.status_code
if statusCode == 200:
return request.json()
raise Exception("reguest status code: ", statusCode)
def getNCourses(n: int, start: int):
request = requests.get('https://ois2dev.ut.ee/api/courses?take=' + str(n) + '&start=' + str(start))
statusCode = request.status_code
if statusCode == 200:
return request.json()
raise Exception("reguest status code: ", statusCode)
| true |
ed5628246f9ac709f295aedc360105810a8bf0d3 | Python | piinalpin/flask-crud | /app/module/controller.py | UTF-8 | 1,526 | 2.5625 | 3 | [] | no_license | from flask import render_template, request, redirect
from app import app
from .models import db, Mahasiswa
@app.route('/', methods=['GET','POST'])
def index():
if request.method == 'POST':
name = request.form['name']
nim = request.form['nim']
try:
mhs = Mahasiswa(nim=nim, name=name)
db.session.add(mhs)
db.session.commit()
except Exception as e:
print("Failed to add data.")
print(e)
listMhs = Mahasiswa.query.all()
print(listMhs)
return render_template("home.html", data=enumerate(listMhs,1))
@app.route('/form-update/<int:id>')
def updateForm(id):
mhs = Mahasiswa.query.filter_by(id=id).first()
return render_template("form-update.html", data=mhs)
@app.route('/form-update', methods=['POST'])
def update():
if request.method == 'POST':
id = request.form['id']
name = request.form['name']
nim = request.form['nim']
try:
mhs = Mahasiswa.query.filter_by(id=id).first()
mhs.name = name
mhs.nim = nim
db.session.commit()
except Exception as e:
print("Failed to update data")
print(e)
return redirect("/")
@app.route('/delete/<int:id>')
def delete(id):
try:
mhs = Mahasiswa.query.filter_by(id=id).first()
db.session.delete(mhs)
db.session.commit()
except Exception as e:
print("Failed delete mahasiswa")
print(e)
return redirect("/") | true |
a9a5b67d3f2878fd8a8cebcb98bb64a500109c2b | Python | FRC-1123/FRC2018-1123 | /robot/commands/setforwardspeed.py | UTF-8 | 774 | 2.78125 | 3 | [
"MIT"
] | permissive | import wpilib
from wpilib.command import TimedCommand
import subsystems
class SetForwardSpeed(TimedCommand):
"""
Drives forward at the given power for a given duration.
"""
def __init__(self, power, timeout, squared=True):
super().__init__('Going forward at %f for %0.2fs' % (power, timeout), timeout)
self.requires(subsystems.drivetrain)
self.drivetrain = subsystems.drivetrain
self.power = power
self.squared = squared
self.timer = wpilib.Timer()
def initialize(self):
self.timer.start()
def execute(self):
self.drivetrain.tank_drive(self.power, self.power, self.squared)
self.timer.delay(0.05)
def end(self):
self.drivetrain.tank_drive(0.0, 0.0) | true |
572aaaa76d4b281986733f6029459627572867a6 | Python | VillaEdwar/Ciclo_1_MisionTic2022_Python | /Unidad_4/5_funcion_map.py | UTF-8 | 212 | 3.40625 | 3 | [] | no_license | elevar_al_cubo = lambda n: n**3
print(elevar_al_cubo(3))
#map
# Creamos la lista
lista_numeros: list = [2, 3, 4, 5, 6, 7, 8, 9]
#map(funcion_a_aplicar, un_iterable)
print(list(map(elevar_al_cubo, lista_numeros))) | true |
104ea97e0930b36bca9673a4f94f1be9922b56e1 | Python | princeton-vl/SpatialSense | /baselines/unrel/spatial_features.py | UTF-8 | 1,785 | 2.765625 | 3 | [
"BSD-2-Clause"
] | permissive | import pdb
import json
import pickle
import numpy as np
import math
import random
from sklearn.mixture import GaussianMixture
def raw_spatial_feature(bbox_s, bbox_o):
w_s = bbox_s[3] - bbox_s[2]
h_s = bbox_s[1] - bbox_s[0]
w_o = bbox_o[3] - bbox_o[2]
h_o = bbox_o[1] - bbox_o[0]
# Scale
scale_s = w_s * h_s
scale_o = w_o * h_o
# Offset
xc_s = (bbox_s[2] + bbox_s[3]) / 2.0
yc_s = (bbox_s[0] + bbox_s[1]) / 2.0
xc_o = (bbox_o[2] + bbox_o[3]) / 2.0
yc_o = (bbox_o[0] + bbox_o[1]) / 2.0
offsetx = xc_o - xc_s
offsety = yc_o - yc_s
# Aspect ratio
aspect_s = w_s / h_s
aspect_o = w_o / h_o
# Overlap
boxI_xmin = max(bbox_s[2], bbox_o[2])
boxI_ymin = max(bbox_s[0], bbox_o[0])
boxI_xmax = min(bbox_s[3], bbox_o[3])
boxI_ymax = min(bbox_s[1], bbox_o[1])
wI = max(boxI_xmax - boxI_xmin, 0)
yI = max(boxI_ymax - boxI_ymin, 0)
areaI = wI * yI
areaU = scale_s + scale_o - areaI
# Fill the raw spatial feature
feature = np.asarray(
[
offsetx / math.sqrt(scale_s),
offsety / math.sqrt(scale_s),
math.sqrt(scale_o / scale_s),
aspect_s,
aspect_o,
math.sqrt(areaI / areaU),
]
)
return feature
if __name__ == "__main__":
data = json.load(open("../annotations.json"))
X = []
for img in data:
if img["split"] == "test":
continue
for annot in img["annotations"]:
X.append(
raw_spatial_feature(annot["subject"]["bbox"], annot["object"]["bbox"])
)
random.shuffle(X)
X = np.vstack(X)
gmm = GaussianMixture(400, max_iter=100, verbose=1)
gmm.fit(X)
pickle.dump(gmm, open("gmm.pickle", "wb"))
| true |
b32734ea9250783eae4cbb017eb8ff9b2d9190c6 | Python | derickdeiro/curso_em_video | /aula_11-cores-no-terminal/pratica.py | UTF-8 | 304 | 3.65625 | 4 | [] | no_license | a = 3
b = 5
print('Os valores são \033[33m{}\033[m e \033[31m{}\033[m!!!'.format(a, b))
nome = 'Derick'
cores = {'limpa': '\033[m',
'azul': '\033[34m',
'amarelo': '\033[33m'}
print('Olá! Muito prazer em te conhecer, {}{}{}!!!' .format(cores['azul'], nome, cores['limpa']))
| true |
860afb8f4793fb0007d40ea980579fa0ec1496f1 | Python | ken626014896/python-practice | /正则表达式/匹配密码.py | UTF-8 | 132 | 2.75 | 3 | [] | no_license |
import re
pattern=re.compile(r'^(?=.*[A-Z])(?=.*[a-z])(?=.*[0-9])\w{6,12}$')
res=pattern.search('zxc12A3ken1')
print(res.group()) | true |
2676bead71717c207bfedee914d1ca16fece2b8b | Python | HuangXiongjin/python_study | /python基础/8.函数基础/code/04-函数的返回值.py | UTF-8 | 1,772 | 4.40625 | 4 | [] | no_license | """---author---hxj""""
# 1. 什么是返回值
"""
返回值就是return关键字后表达式的值 (怎么确定函数的返回值)
返回值就是函数调用表达式的值 (怎么获取函数的返回值)
(python中所有函数都有返回值,默认是None)
1)什么是return
return是函数体中的关键字(只能在函数中使用), 作用有两个: 结束函数\确定函数返回值
a.结束函数: 执行函数体的时候只要遇到return函数直接结束
b.确定函数返回值: return 返回值(可以是具体的数据,声明过的变量,运算表达式)
2)什么是函数调用表达式
函数调用语句就是函数调用表达式, 例如: func1(10), max([1, 2]),
每个调用函数的语句都有结果,这个结果就是调用这个函数得到的返回值
"""
def func1():
for x in range(10):
if x == 2:
return
print(x)
print('里面: 函数结束')
print('func1:', func1())
def func2():
print('=====')
return
print('++++++')
print('------')
print('func2:', func2())
def func3():
if False:
return 10
print('func3:', func3())
def func4():
return 100
print('func4:', func4())
# 2.怎么使用返回值
"""
想要用函数的返回值,就使用函数调用表达式的值。
普通数据能做的事情,函数调用表达式都可以做
"""
100
func4()
num = 100
num1 = func4()
print(num, num1)
list1 = [100, 200]
list2 = [func4(), 200]
print(list1, list2)
print(100 > 200, 100 * 2)
print(func4() > 200, func4()*2)
# 3.什么时候需要返回值
"""
初学者:看实现函数的功能会不会产生新的数据
return 返回值1,返回值2,...
"""
def sum1(num1, num2):
return num1 + num2
re = sum1(10, 20)
print(re)
list1 = [re, 20]
| true |
d593d1007d8bb780664d42687e1911fab508b921 | Python | kzbigboss/2021-epic-road-trip | /get_put_teslafi_data/app.py | UTF-8 | 3,817 | 2.78125 | 3 | [] | no_license | import os
import requests
import json
import boto3
import base64
from botocore.exceptions import ClientError
def lambda_handler(event, context):
# TODO description
## Get data from TeslaFi
teslafi_token = get_secret()
teslafi_url = "https://www.teslafi.com/feed.php?" + \
"token=" + teslafi_token # + \
# "&command=lastGoodTemp"
teslafi_data = get_teslafi_data(teslafi_url)
## Put data into data stream
teslafi_stream_name = get_env_var("teslafidatastream")
put_response = put_teslafi_data(teslafi_data, teslafi_stream_name)
print(put_response)
def get_env_var(variable_name):
"""
Helper to grab Lambda's env vars
:param variable_name: string, named of
:return: variable: string
"""
try:
variable = os.environ[variable_name]
except:
print("Environment variable name not found")
exit()
return variable
def get_secret():
"""
Example AWS Python function to pull from Secrets Manager
:return: string representing API token
"""
secret_name = "epicroadtripteslafiapi"
region_name = "us-east-1"
# Create a Secrets Manager client
session = boto3.session.Session()
client = session.client(
service_name='secretsmanager',
region_name=region_name
)
# In this sample we only handle the specific exceptions for the 'GetSecretValue' API.
# See https://docs.aws.amazon.com/secretsmanager/latest/apireference/API_GetSecretValue.html
# We rethrow the exception by default.
try:
get_secret_value_response = client.get_secret_value(
SecretId=secret_name
)
except ClientError as e:
if e.response['Error']['Code'] == 'DecryptionFailureException':
# Secrets Manager can't decrypt the protected secret text using the provided KMS key.
# Deal with the exception here, and/or rethrow at your discretion.
raise e
elif e.response['Error']['Code'] == 'InternalServiceErrorException':
# An error occurred on the server side.
# Deal with the exception here, and/or rethrow at your discretion.
raise e
elif e.response['Error']['Code'] == 'InvalidParameterException':
# You provided an invalid value for a parameter.
# Deal with the exception here, and/or rethrow at your discretion.
raise e
elif e.response['Error']['Code'] == 'InvalidRequestException':
# You provided a parameter value that is not valid for the current state of the resource.
# Deal with the exception here, and/or rethrow at your discretion.
raise e
elif e.response['Error']['Code'] == 'ResourceNotFoundException':
# We can't find the resource that you asked for.
# Deal with the exception here, and/or rethrow at your discretion.
raise e
else:
# Decrypts secret using the associated KMS CMK.
# Depending on whether the secret is a string or binary, one of these fields will be populated.
if 'SecretString' in get_secret_value_response:
secret = get_secret_value_response['SecretString']
else:
decoded_binary_secret = base64.b64decode(get_secret_value_response['SecretBinary'])
return json.loads(secret)['teslafiapi']
def get_teslafi_data(url):
# TODO DESC
r = requests.get(url)
return r.json()
def put_teslafi_data(payload, stream_name):
data_stream = boto3.client('kinesis')
response = data_stream.put_record(
StreamName=stream_name,
Data=json.dumps(payload).encode('utf-8') + b'\n',
PartitionKey="whatever" # why this remains a required parameter boggles my mind
)
return response
| true |
6189a462075bb90c360d930eac226e6197c1b750 | Python | fLaVz/dataAnalytics | /test.py | UTF-8 | 3,046 | 2.875 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
""" TP2
https://openclassrooms.com/fr/courses/4297211-evaluez-et-ameliorez-les-performances-dun-modele-de-machine-learning/4308241-mettez-en-place-un-cadre-de-validation-croisee
http://scikit-learn.org/stable/modules/cross_validation.html#stratified-k-fold
"""
import numpy as np
import pandas as pd
from scipy.io.arff import loadarff
from sklearn.impute import SimpleImputer
import sklearn.preprocessing as skp
from sklearn.model_selection import train_test_split
from sklearn.dummy import DummyClassifier
from sklearn.metrics import confusion_matrix
from sklearn.dummy import DummyClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn import tree
from sklearn.linear_model import LogisticRegression
from sklearn import svm
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import cross_val_predict
from sklearn import metrics
# from mlxtend.preprocessing import TransactionEncoder
# Récupère les données à partir du fichier et les insère dans un DataFrame
vote, meta = loadarff('labor.arff')
df = pd.DataFrame(vote)
# --------------------------
# NORMALISATION (Question 1)
# --------------------------
# Noms des colonnes numériques (cad à normaliser)
numeric_columns = [meta.names()[i] for i, t in enumerate(meta.types()) if t == 'numeric']
# Remplace les valeurs manquantes par la moyenne
imp_mean = SimpleImputer(missing_values=np.nan, strategy='mean')
df[numeric_columns] = imp_mean.fit_transform(df[numeric_columns])
# Normalisation
df[numeric_columns] = skp.StandardScaler().fit_transform(df[numeric_columns])
# print(df)
# ----------
# Question 3
# ----------
# X = df.loc[:, df.columns != 'class']
X = df[numeric_columns]
y = df['class']
lst_classif = [
DummyClassifier(strategy="most_frequent"),
GaussianNB(),
tree.DecisionTreeClassifier(),
LogisticRegression(solver="liblinear"),
svm.SVC(gamma='scale'),
]
lst_classif_names = [
'Dummy',
'Naive Bayes',
'Decision tree',
'Logistic regression',
'SVM'
]
def accuracy_score(lst_classif,lst_classif_names,X,y):
for clf,name_clf in zip(lst_classif,lst_classif_names):
scores = cross_val_score(clf, X, y, cv=5)
print("Accuracy of "+name_clf+" classifier on cross-validation: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
def confusion_matrix(lst_classif,lst_classif_names,X,y):
for clf,name_clf in zip(lst_classif,lst_classif_names):
predicted = cross_val_predict(clf, X, y, cv=5)
print("Accuracy of "+name_clf+" classifier on cross-validation: %0.2f" % metrics.accuracy_score(y, predicted))
print(metrics.confusion_matrix(y, predicted))
accuracy_score(lst_classif,lst_classif_names,X,y)
confusion_matrix(lst_classif,lst_classif_names,X,y)
# ----------
# Question 4
# ----------
# Remplace les valeurs manquantes par la valeur la plus fréquente
# imp_mean = SimpleImputer(missing_values=np.nan, strategy='most_frequent')
# print(df[:, df.columns not in numeric_columns])
# print(df.columns.all(numeric_columns))
| true |
e7eb0a582390f7916632cec439dcbd912ec70533 | Python | buxuele/100-days-of-code | /05/asyncio_example.py | UTF-8 | 448 | 3.359375 | 3 | [] | no_license | import asyncio
# 一个异步的小例子。
async def coroutine_1():
print("cor 1 is active on the event loop")
print("cor 1 wait for 4s")
await asyncio.sleep(4)
print("cor 1 is back")
async def coroutine_2():
print("cor 2 is active on the loop")
print("cor 2 wait for 5s")
await asyncio.sleep(5)
print("cor 2 is back")
loop = asyncio.get_event_loop()
loop.run_until_complete(asyncio.gather(coroutine_1(), coroutine_2()))
| true |
2cc960ddf64d374aafdcaa9f9836aa7bf80ad47e | Python | rebel47/PythonInOneVideoByCodeWithHarry | /strings.py | UTF-8 | 1,153 | 4.40625 | 4 | [] | no_license | # a = "ayaz's" # ---> use this if you have single quotes in your strings
# print(a)
# SLICING
# a = "AyazAlam"
# print(a[0])
# print(a[1])
# print(a[1:3])
# print(a[0:10])
# print(a[::-1])
# print(a[-4:-1])
# print(a[0::1])
# print(a[0::2])
# print(a[0::3])
# print(a[0::-1])
# print(a[0::-2])
# # Concatenating two strings
# greeting = "Good Morning, "
# name = "Ayaz"
# c = greeting + name
# print(c)
# STRING FUNCTION
story = "Once upon a time there is a boy named Mohd Ayaz Alam who lives in Okhla, Delhi who was learning Python from Code With Harry Youtube channel whose name is Harrish Ali Khan"
print(len(story))
print(story.endswith("notes"))
print(story.endswith("Khan"))
print(story.count("is"))
print(story.capitalize()) #It will only capitalize the first letter in the whole string.
print(story.upper()) #This will capitalize all letters in the given strings.
print(story.lower()) #This will make all the letters lowers in the given strings.
print(story.find("Ayaz"))
print(story.replace("Ayaz", "Josh"))
# Escape Sequence Character
kahani = "Hey guys I am Ayaz and today \nI am going to talk about \t Python\' and only Python\\Python."
print(kahani) | true |
2c16f41f30bffab8f5d52b1926477f3d416a46c1 | Python | cyclony/abbreviationSpider | /spider/PageSpider.py | UTF-8 | 1,640 | 2.90625 | 3 | [] | no_license | from bs4 import BeautifulSoup
import functools
import requests
import csv
@functools.lru_cache()
def get_soup(url):
res = requests.get(url)
res.encoding = 'utf-8'
soup = BeautifulSoup(res.text)
return soup
class PageSpider:
init_page_url = ''
prod_name = ''
downloaded_data = []
def next_page_url_gen(self, soup):
next_page_url = PageSpider.get_next_page_url(soup)
if next_page_url:
yield next_page_url
yield from self.next_page_url_gen(get_soup(next_page_url))
@staticmethod
def get_next_page_url(soup):
return None
@staticmethod
# 需要被替换
def one_page_data_gen(soup):
yield None
def __init__(self, init_page_url):
self.init_page_url = init_page_url
def page_soup_gen(self, init_url):
soup = get_soup(init_url)
for url in self.next_page_url_gen(soup):
yield get_soup(url)
# 获取指定产品id对应的历史数据
def n_page_data_gen(self, init_page_url):
if init_page_url:
for soup in self.page_soup_gen(init_page_url):
yield from PageSpider.one_page_data_gen(soup)
# 抓取该产品的所有历史价格数据
def download_data(self):
for price_item in self.n_page_data_gen(self.init_page_url):
self.downloaded_data.append(price_item)
self.save_to_file()
def save_to_file(self):
file_name = self.prod_name + '.csv'
with open(file_name, 'a', encoding='utf-8-sig', newline='') as file:
c = csv.writer(file)
c.writerows(self.downloaded_data)
| true |
c318732a6d8b1b11a9e9d2922add451e0743c785 | Python | selmi-karim/hackerrank | /arrays/crush.py | UTF-8 | 715 | 2.78125 | 3 | [
"MIT"
] | permissive | # https://www.hackerrank.com/challenges/crush
#!/bin/python3
import math
import os
import random
import re
import sys
class intersection:
def __init__(self, start, end, value):
self.start = start
self.end = end
self.value = value
def display(self):
print('start: ', self.start)
print('end: ', self.end)
print('value: ', self.value)
if __name__ == '__main__':
#fptr = open(os.environ['OUTPUT_PATH'], 'w')
nm = input().split()
n = int(nm[0])
m = int(nm[1])
queries = []
for _ in range(m):
queries.append(list(map(int, input().rstrip().split())))
result = arrayManipulation(n, queries)
print('result: ', result)
| true |
c743e4995a4c06685dbeda06fbba09449990eebc | Python | dal07065/CS10 | /lab3_kang_p.py | UTF-8 | 6,791 | 4.09375 | 4 | [] | no_license | # Lina Kang
# 1072568
# Lab 3
# Question 1.a.
def texas():
birds = 5000
print("Texas has", birds, "birds")
# Question 1.b.
def California():
birds = 8000
print("California has", birds, "birds")
# Question 1.c.
def main():
texas()
California()
if __name__ == "__main__":
main()
## Test Case 1.
##
##Texas has 5000 birds
##California has 8000 birds
# Question 2.a.
def show_interest(principal:float, rate:float, periods:int)->None:
interest = principal * rate * periods
print("The simple interest will be $", format(interest, ',.2f'), sep='')
# Question 2.b.
def main():
show_interest(10000.00, 0.01, 10)
if __name__ == "__main__":
main()
## Test Case 1.
##
##The simple interest will be $1,000.00
# Question 3.a.
# get user input for length and height
def getData()->(float, float):
length = float(input("Enter the length of the triangle: "))
height = float(input("Enter the perpendicular height of a triangle: "))
return length, height
# Question 3.b.
# calculate the triangle area based on received user input
def trigArea(length:float, height:float)->(float):
area = 0.5 * length * height
return area
# Question 3.c.
# output the data
def displayData(length:float, height:float, area:float)->None:
print("The length:", length)
print("The height:", height)
print("The calculated area:", area)
# Question 3.d.
def main():
length, height = getData()
area = trigArea(length, height)
displayData(length, height, area)
if __name__ == "__main__":
main()
## Test Case 1.
##
##Enter the length of the triangle: 5
##Enter the perpendicular height of a triangle: 7
##The length: 5.0
##The height: 7.0
##The calculated area: 17.5
##
## Test Case 2.
##
##Enter the length of the triangle: 3
##Enter the perpendicular height of a triangle: 6
##The length: 3.0
##The height: 6.0
##The calculated area: 9.0
##
## Test Case 3.
##
##Enter the length of the triangle: 14.5
##Enter the perpendicular height of a triangle: 19.2
##The length: 14.5
##The height: 19.2
##The calculated area: 139.2
##
## Test Case 4.
##
##Enter the length of the triangle: 5.67
##Enter the perpendicular height of a triangle: 8.94
##The length: 5.67
##The height: 8.94
##The calculated area: 25.3449
##
## Test Case 5.
##
##Enter the length of the triangle: 2.3
##Enter the perpendicular height of a triangle: 6.7
##The length: 2.3
##The height: 6.7
##The calculated area: 7.704999999999999
# Question 4
# gets the amount monthly sales from user input
def get_sales()->(float):
sales = float(input("Enter the monthly sales: "))
return sales
# gets the amount of advanced pay from user input
def get_advancedpay()->(float):
print("Enter the amount of advanced pay, or")
print("enter 0 if no advanced pay was given.")
advpay = float(input("Advanced pay: "))
return advpay
# calculates the commission rate based on the monthly sales
def determine_comm_rate(sales:float)->(float):
if sales < 10000.00:
comm = 0.10
elif sales < 14999.99:
comm = 0.12
elif sales < 17999.99:
comm = 0.14
elif sales < 21999.99:
comm = 0.16
else:
comm = 0.18
return comm
def main():
sales = get_sales()
advanced_pay = get_advancedpay()
comm_rate = determine_comm_rate(sales)
pay = sales * comm_rate - advanced_pay
print("the pay is $", format(pay, ',.2f'), sep = '')
if pay < 0:
print("The salesperson must reimburse")
print("The company.")
if __name__ == "__main__":
main()
## Test Case 1.
##
##Enter the monthly sales: 14550.00
##Enter the amount of advanced pay, or
##enter 0 if no advanced pay was given.
##Advanced pay: 1000.00
##the pay is $746.00
##
## Test Case 2.
##
##Enter the monthly sales: 9500
##Enter the amount of advanced pay, or
##enter 0 if no advanced pay was given.
##Advanced pay: 0
##the pay is $950.00
##
## Test Case 3.
##
##Enter the monthly sales: 12000.00
##Enter the amount of advanced pay, or
##enter 0 if no advanced pay was given.
##Advanced pay: 2000.00
##the pay is $-560.00
##The salesperson must reimburse
##The company.
##
## Test Case 4.
##
##Enter the monthly sales: 12345.00
##Enter the amount of advanced pay, or
##enter 0 if no advanced pay was given.
##Advanced pay: 5000.00
##the pay is $-3,518.60
##The salesperson must reimburse
##The company.
##
## Test Case 5.
##
##Enter the monthly sales: 2400
##Enter the amount of advanced pay, or
##enter 0 if no advanced pay was given.
##Advanced pay: 600
##the pay is $-360.00
##The salesperson must reimburse
##The company.
# Question 5.
def getInitials():
name = input("Enter your full name: ")
name_list = name.split()
initials = ''
# for every item in the list, extract the first character.
# Then accumulate all the initial characters into 'initials'
for num in range(len(name_list)):
temp = name_list[num]
initials = initials + temp[0] + '.'
print(initials)
def main():
getInitials()
if __name__ == "__main__":
main()
##Test Case 1.
##
##Enter your full name: James Tiberias Kirk
##J.T.K.
##
##Test Case 2.
##
##Enter your full name: Lina Kang
##L.K.
##
##Test Case 3.
##
##Enter your full name: Donald Trum[
##D.T.
##
##Test Case 4.
##
##Enter your full name: James
##J.
##
##Test Case 5.
##
##Enter your full name: Super Long Name Right Here
##S.L.N.R.H.
# Question 6.
def string_total(string)->(int):
sum = 0
# For each character in the string
# - extract one character by one
# - cast it to an integer
# - add it to the sum
for num in range(len(string)):
sum = sum + int(string[num])
return sum
def main():
# Get a string of numbers as input from the user.
number_string = input('Enter a sequence of digits with nothing separating them: ')
# Call string_total method, and store the total.
total = string_total(number_string)
# Display the total.
print('The total of the digits in the string you entered is', total)
if __name__ == "__main__":
main()
##Test Case 1.
##
##Enter a sequence of digits with nothing separating them: 4563
##The total of the digits in the string you entered is 18
##
##Test Case 2.
##
##Enter a sequence of digits with nothing separating them: 1010
##The total of the digits in the string you entered is 2
##
##Test Case 3.
##
##Enter a sequence of digits with nothing separating them: 2
##The total of the digits in the string you entered is 2
##
##Test Case 4.
##
##Enter a sequence of digits with nothing separating them: 0
##The total of the digits in the string you entered is 0
##
##Test Case 5.
##
##Enter a sequence of digits with nothing separating them: 9485678302945
##The total of the digits in the string you entered is 70
| true |
d9712ad4dc6955a12493eed2830390c7a04aba67 | Python | qishibo/RL-robot-maze | /Agent.py | UTF-8 | 1,778 | 2.96875 | 3 | [] | no_license | import numpy as np
import pandas as pd
class Agent:
# learning_rate学习率 reward_decay折扣率 epsilon e贪婪系数
def __init__(self, actions, learning_rate=0.01, reward_decay=0.9, epsilon=0.01):
self.lr = learning_rate
self.gamma = reward_decay
self.actions = actions
self.epsilon = epsilon
# qtable分数表
self.q_table = pd.DataFrame(columns=self.actions)
def update_q_table(self, s, a, r, sig):
self.check_in_qtable(sig)
# 获取当前qtable值
q_value = self.q_table.loc[s, a]
if sig != 'finished':
q_target = r + self.gamma * self.q_table.loc[sig, :].max()
else:
q_target = r
# 更新qtable
self.q_table.loc[s, a] += self.lr * (q_target - q_value)
def action_select(self, observation):
self.check_in_qtable(observation)
# e贪婪策略,大于e取最高reward
if np.random.uniform() > self.epsilon:
state_action = self.q_table.loc[observation, :]
# np.max(state_action) 为下一个state行中最大的一列,即最大分数的action
action = np.random.choice(state_action[state_action == np.max(state_action)].index)
else:
# 命中随机策略
action = np.random.choice(self.actions)
print("selected action: ", ['up', 'down', 'left', 'right'][action])
return action
def check_in_qtable(self, state):
# 不在则添加
if state not in self.q_table.index:
self.q_table = self.q_table.append(
pd.Series(
[0]*len(self.actions),
name=state,
index=self.q_table.columns
)
)
| true |
51fcb636036fd805aafa3a6c9469b20c7755939a | Python | thomasjzahn/geocoding-proxy | /ExternalResource.py | UTF-8 | 967 | 2.796875 | 3 | [] | no_license | #!/usr/bin/env python
import requests
from requests.exceptions import ConnectionError, Timeout
class ExternalResource():
def __init__(self):
return None
def make_request(self, url, headers=None, params=None, data=None, expected_status_code=200):
response_data = None
errors = None
if headers == None:
headers = {
"Content-Type": "application/json",
"Accept": "application/json"
}
# Make the request:
try:
endpoint = "https://{0}".format(url)
response = requests.get(endpoint, headers=headers, params=params, data=data)
if response.status_code == expected_status_code:
response_data = response.json()
else:
errors = "Unexpected status code (expected {0}, received {1})".format(expected_status_code, response.status_code)
except ConnectionError as e:
errors = "Error making request ({0})".format(e)
except Timeout as e:
errors = "Error making request ({0})".format(e)
return (response_data, errors)
| true |
6b12e089373138166021c8a2b2e547c03e27ff26 | Python | karthikdash/Lab4Code | /overload.py | UTF-8 | 230 | 3.015625 | 3 | [] | no_license | # lab 4
# Created by Chirag Wadhwani(cw844) and Karthik D.(kd453)
# This program is used to overload the CPU by running multiple instances
add = 1.1233253457564756
while True:
add = add * add # Perpeutally mulitple the number by itself
| true |
2db79b4a2f01ccb506aaabdeae5d99bba72a0de2 | Python | interestudy/pythonstudy | /neural/simple_neural_network/train.py | UTF-8 | 979 | 2.796875 | 3 | [] | no_license | # _*_ coding:utf-8 _*_
# @Author :ran
# @time :2019-01-09 07:13
# @File :train.py
# @Software :PyCharm
# 用mnist数据集 训练神经网络
import numpy as np
from neural.simple_neural_network import neural_network
input_nodes = 784
hidden_nodes = 200
output_nodes = 10
learning_rate = 0.05
n = neural_network.NeuralNetwork(input_nodes, hidden_nodes, output_nodes, learning_rate)
training_data_file = open("mnist_train.csv", "r")
training_data_list = training_data_file.readlines()
training_data_file.close()
for record in training_data_list:
all_values = record.split(',')
inputs = (np.asfarray(all_values[1:]) / 255.0 * 0.99) + 0.01
# 按照输出层制作真实的数据
targets = np.zeros(output_nodes) + 0.01
targets[int(all_values[0])] = 0.99
n.train(inputs, targets)
print("is training...")
# 保存权重 方便测试使用(训练好的权重是神经网络的核心)
np.save('who.npy', n.who)
np.save('wih.npy', n.wih)
| true |
a9d3872fadd94ee2809f459773bf8c0ccb64ecf2 | Python | DoctorSad/_Course | /Lesson_07/_0_lists_4.py | UTF-8 | 1,050 | 4.40625 | 4 | [] | no_license | """
Обход списка в цикле.
"""
from pprint import pprint
def main():
colors = ["red", "yellow", "orange", "green", "blue", "violet"]
# Обход элементов циклом for
for color in colors:
print(color)
# Обход индексов
for i in range(len(colors)):
print(colors[i])
# получение индекса и элемента с помощью enumerate
for idx, color in enumerate(colors):
print(f"{idx + 1}. {color}")
# Обход циклом вложенных списков
matrix = [
[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15],
[16, 17, 18, 19, 20],
]
for i in matrix:
for j in i:
print(j)
# Для вывода структур данных на экран можно использовать pprint
pprint(matrix, width=25) # width - максимальное количество символов строки
if __name__ == "__main__":
main()
| true |
95d178ae9a1945105ac81e43baa9ea53b62ef6c8 | Python | sfa119f/if2250-2020-k02-04-garas | /GUI/ModulPenjual/ShowPenjual.py | UTF-8 | 2,631 | 2.546875 | 3 | [] | no_license | from tkinter import *
import DefVar
from PIL import ImageTk,Image
from ModulPembeli import showProduct
from ModulFungsi.tes import *
def ShowPenjual(page):
berandaframe = Frame(DefVar.root, bg=DefVar.white)
berandaframe.place(x=200, y=50, height=550, width=600)
searchframe = Frame(DefVar.root, bg=DefVar.redcolor)
searchframe.place(x=200, y=0, height=50, width=600)
judul = Label(searchframe,text="Produk yang dijual: ",fg=DefVar.white,bg=DefVar.redcolor,font="Helvetica 12 bold")
judul.place(x=35, y=13)
x_ = 0
y_ = 0
a = 0
i = page*9-9
List = AllJual(DefVar.username)
while(i<len(List) and i<page*9):
a = 0
while(a<3 and i<len(List) and i<page*9):
Produk = List[i]
frame1 = Frame(berandaframe, bg=DefVar.white)
frame1.place(x=x_, y=y_, width=200, height=160)
filename = "../" + Produk[6]
img = Image.open(filename)
wImg, hImg = img.size
if(wImg>hImg):
hImg = hImg*130//wImg
wImg = 130
else:
wImg = wImg*85//hImg
hImg = 85
img = img.resize((wImg, hImg), Image.ANTIALIAS)
img = ImageTk.PhotoImage(img)
panel = Label(frame1, image=img)
panel.place(x=10,y=10)
panel.img=img
#Nama
nama = Label(frame1, text = Produk[1], bg=DefVar.white, font="Helvetica 10 bold")
nama.place(x=10, y=100)
#Harga
hargajual = showProduct.makeRp(str(Produk[2]))
harga = Label(frame1, text = hargajual, bg=DefVar.white, font="Helvetica 10")
harga.place(x=10, y=120)
#Stok
stok = Label(frame1, text = "Stok: " + str(Produk[3]), bg=DefVar.white, font="Helvetica 8")
stok.place(x=10, y=140)
x_ += 200
a += 1
i += 1
x_ = 0
y_ += 180
drawPageLabel(page)
if(len(List)>=page*9):
nextButton = Button(DefVar.root, text="Next", font="Helvetica 8", fg=DefVar.text, bg=DefVar.white, command=lambda:[ShowPenjual(page+1), drawPageLabel(page+1)])
nextButton.place(x=750, y=570)
if(page!=1):
nextButton = Button(DefVar.root, text="Previous", font="Helvetica 8", fg=DefVar.text, bg=DefVar.white, command=lambda:[ShowPenjual(page-1), drawPageLabel(page-1)])
nextButton.place(x=220, y=570)
def drawPageLabel(page):
pageLabel = Label(DefVar.root, text="page "+str(page), bg=DefVar.white, font="Helvetica 8")
pageLabel.place(x=480, y=570) | true |
89867d0b3649757001d6b36df03b7709ff37e6db | Python | HuCui2022/smth-smth-v2 | /mi_en_plot.py | UTF-8 | 942 | 2.8125 | 3 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | import toml
import matplotlib.pyplot as plt
import numpy as np
def main():
model_name = 'fix_clstm256'
width = 0.15
size = 10
with open(f'mutual_info_{model_name}.toml', 'r') as f:
mi_dict = toml.load(f)
with open(f'entropy_{model_name}.toml', 'r') as f:
entropy_dict = toml.load(f)
mi_score = [float(value) for value in mi_dict.values()][:size]
entropy_score = [float(value) for value in entropy_dict.values()][:size]
lefts = np.array(range(len(mi_score)))
plt.bar(lefts, mi_score, label=f'mutual info', width=width, align="center")
plt.bar(lefts + width, entropy_score, label=f'entropy', width=width, align="center")
plt.legend(bbox_to_anchor=(0, 0), loc='lower left')
plt.tick_params(bottom=False, direction='in')
plt.xticks(lefts + width / 2, lefts + 1)
plt.xlabel('Neuron')
plt.ylabel('Information')
plt.savefig(f'both_{model_name}.png')
main()
| true |
19af44a66a21ddf4f4c4f7dd593cfbcc2dfb6ce0 | Python | AlmasTalkingDogs/Doge | /training/fourier.py | UTF-8 | 379 | 2.859375 | 3 | [
"MIT"
] | permissive | # Apply Fast Fourier Transform and frequency filtering to every set of
# time points
import numpy as np
def filtered_frequency_domain_data(signal, T=1.0/192.0):
W = np.fft.fftfreq(int(signal.size/2) + 1, T)
f_signal = np.abs(np.real(np.fft.rfft(signal)))
f_signal[W < 7.5] = 0
f_signal[W > 30] = 0
f_signal /= max(f_signal)
return f_signal, W
| true |
2ab417b1aeabd05893ccb118900766f92a9cefd7 | Python | Aasthaengg/IBMdataset | /Python_codes/p03592/s164769723.py | UTF-8 | 336 | 2.703125 | 3 | [] | no_license | N, M, K = map(int, input().split())
N, M = min(N, M), max(N, M)
if K % M == 0 or K % N == 0:
print('Yes')
exit()
for i in range(N + 1):
k = K - M * i
n = N - 2 * i
if n <= 0 or k < 0:
continue
if k % n == 0:
if 0 <= k // n <= M:
print('Yes')
exit()
else:
print('No') | true |
0a2935499bbeb455d3f124043b5fb86966a05d7a | Python | nord2sudjp/atcoder | /再帰/double_list.py | UTF-8 | 161 | 3.4375 | 3 | [] | no_license | def double_list(lst):
if lst == []:
return []
first = lst[0]
rest = lst[1:]
return [first * 2] + double_list(rest)
double_list([1,2,3]) | true |
7b4fadce0cb724354be1806b68ef74b3753cf217 | Python | advecchia/hackerrank | /30-days-of-code/Day 6/solution.py | UTF-8 | 844 | 3.53125 | 4 | [
"MIT"
] | permissive | MINIMUM_T_NUMBER = 1
MAXIMUM_T_NUMBER = 10
SMALLER_STRING = 2
GREATER_STRING = 10000
def validate_test_case_number(test_case_number):
if MINIMUM_T_NUMBER > test_case_number > MAXIMUM_T_NUMBER:
raise Exception('Invalid test case number.')
def validate_string(input_string):
if SMALLER_STRING > len(input_string) > GREATER_STRING:
raise Exception('Invalid string length.')
if __name__ == '__main__':
# Test case number
t_number = int(input())
validate_test_case_number(t_number)
for i in range(0, t_number):
# Read test string
s = str(input())
validate_string(s)
even = ''
odd = ''
for j in range(0, len(s)):
if j % 2 == 0:
even += s[j]
else:
odd += s[j]
print('%s %s' % (even, odd))
| true |
6b9283e35abff12d5a27e38ec9660165689981b9 | Python | elliotCamblor/CTCI | /python/src/q_3_2.py | UTF-8 | 568 | 3.625 | 4 | [] | no_license | class Stack(list):
def __init__(self):
self.mins = []
def push(self, i):
if not len(self.mins) or i <= self.mins[-1]:
self.mins.append(i)
self.append(i)
def pop(self):
i = super().pop()
if i == self.mins[-1]:
self.mins.pop()
return i
def min(self):
return self.mins[-1]
if __name__ == "__main__":
s = Stack()
s.push(2)
s.push(4)
assert(s.min() == 2)
s.push(1)
assert(s.min() == 1)
s.pop()
assert(s.min() == 2)
print("success")
| true |
e916bf6b68968e21d84a72b11c7cbf6c7acaf435 | Python | mateusvarelo/Codewars | /Scripts/codewars13.1.py | UTF-8 | 258 | 2.859375 | 3 | [
"MIT"
] | permissive | def duplicate_encode(word):
#your code here
return ''.join(
[
'(' if word.upper().count(letra)<2 else ')'
for letra in word.upper()]
)
print(duplicate_encode("wI(GzSG@)FOdyRQwau(a")) | true |
541842fc4baf54604cb476df1f6978c567d28ed8 | Python | sametormanli/flashcards | /script.py | UTF-8 | 5,599 | 3.140625 | 3 | [] | no_license | from io import StringIO
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--import_from')
parser.add_argument('--export_to')
args = parser.parse_args()
memory_file = StringIO()
questions = {}
def write_log(string):
memory_file.read()
memory_file.write(string)
def print_log(string):
print(string)
write_log(string)
def create_card(no):
print(f'The term for card #{no}:')
while True:
term = input()
if term in questions:
print(f'The term "{term}" already exists. Try again!')
else:
break
print(f'The definition for card #{no}:')
while True:
definition = input()
if definition in questions.values():
print(f'The definition "{definition}" already exists. Try again!')
else:
break
questions[term] = definition
def ask(question):
print_log(f'Print the definition of "{question}":')
answer = input()
write_log(answer)
if answer == questions[question][0]:
print_log('Correct!')
else:
questions[question][1] += 1
if answer in [val[0] for val in list(questions.values())]:
key = list(questions.keys())[[val[0] for val in list(questions.values())].index(answer)]
print_log(f'Wrong. The right answer is "{questions[question][0]}", '
f'but your definition is correct for "{key}".')
else:
print_log(f'Wrong. The right answer is "{questions[question][0]}".')
def add_card():
print_log('The card:')
while True:
term = input()
write_log(term)
if term in questions:
print_log(f'The card "{term}" already exists.')
else:
break
print_log('The definition of the card:')
while True:
definition = input()
write_log(definition)
if definition in (questions[key][0] for key in questions):
print_log(f'The definition "{definition}" already exists.')
else:
break
questions[term] = [definition, 0]
print_log(f'The pair ("{term}":"{definition}" has been added.')
def remove_card():
print_log('Which card?')
card = input()
write_log(card)
if card in questions:
del questions[card]
print_log('The card has been removed.')
else:
print_log(f'Can\'t remove "{card}": there is no such card.')
def import_cards(exp=None):
if exp is None:
print_log('File name:')
filename = input()
write_log(filename)
else:
filename = exp
try:
with open(filename) as file:
lines = 0
for line in file:
term, definition, stats = line.split()
questions[term] = [definition, int(stats)]
lines += 1
print_log(f'{lines} cards have been loaded.')
except FileNotFoundError:
print_log('File not found.')
def export_cards(exp=None):
if exp is None:
print_log('File name:')
filename = input()
write_log(filename)
else:
filename = exp
with open(filename, 'w') as file:
lines = 0
for key, value in questions.items():
file.write(f'{key} {value[0]} {value[1]}\n')
lines += 1
print_log(f'{lines} cards have been saved.')
def main():
if args.import_from:
import_cards(exp=args.import_from)
while True:
print_log('\nSelect the action (add, remove, import, export, ask, exit, log, hardest card, reset stats):')
entry = input()
write_log(entry)
if entry == 'add':
add_card()
elif entry == 'remove':
remove_card()
elif entry == 'import':
import_cards()
elif entry == 'export':
export_cards()
elif entry == 'ask':
print_log('How many times to ask?')
keys = tuple(questions.keys())
times = int(input())
write_log(str(times))
for i in range(times):
question = keys[i % len(keys)]
ask(question)
elif entry == 'exit':
if args.export_to:
export_cards(exp=args.export_to)
print_log('Bye!')
break
elif entry == 'log':
print_log('File name:')
filename = input()
write_log(filename)
with open(filename, 'w') as log:
for line in memory_file.getvalue():
log.write(line)
print_log('The log has been saved.')
elif entry == 'hardest card':
try:
maximum = max(questions[key][1] for key in questions)
except ValueError:
maximum = 0
if maximum == 0:
print_log('There are no cards with errors.')
else:
hardest = []
for key, value in questions.items():
if value[1] == maximum:
hardest.append('"' + key + '"')
if len(hardest) == 1:
print_log(f'The hardest card is {hardest[0]}. You have {maximum} errors answering it.')
else:
print_log(f'The hardest cards are {", ".join(hardest)}. You have {maximum} errors answering it.')
elif entry == 'reset stats':
for key in questions:
questions[key][1] = 0
print_log('Card statistics have been reset.')
else:
print_log('Invalid entry.')
main()
| true |
92446b1f7d6bf4c0150d636fa5b17c1006d882b0 | Python | murawskikrzysiek/kurs_python | /notes_2c.py | UTF-8 | 5,862 | 3 | 3 | [] | no_license | import copy
# 1 ####################################################################################
class AutoSetterMetaclass(type):
def __new__(cls, name, bases, classdict):
d = {}
for attr in classdict:
if attr.startswith("_"+name+"__"):
def setter(self, value, attr=attr):
return setattr(self, attr, value)
settername = "set" + attr[3+len(name):]
setter.__name__ = settername
d[settername] = setter
d[attr] = classdict[attr]
return super(AutoSetterMetaclass, cls).__new__(cls, name, bases, d)
class Nowa(metaclass=AutoSetterMetaclass):
__x = 1
def __init__(self):
pass
# 2 ####################################################################################
class Property2:
def __init__(self, getter=None, setter=None, deleter=None, doc=None):
self.getter = getter
self.setter = setter
self.deleter = deleter
self.__doc__ = doc
def __get__(self, obj, objtype=None):
if self.getter is None:
pass
return self.getter(obj)
def __set__(self, obj, value):
return self.setter(obj, value)
def __delete__(self, obj):
return self.deleter(obj)
class Dummy(object):
def __init__(self, val):
self.__x = val
def getx(self):
return self.__x
def setx(self, val):
self.__x = val * 10
print("x is read only")
x = Property2(getx, setx)
# 3 ####################################################################################
class Counter(object):
def __init__(self, f):
self.f = f
self._counter = 0
def __call__(self, *args, **kwargs):
self._counter += 1
print(self._counter)
return self.f(*args, **kwargs)
@Counter
def test_f():
print("W funkcji")
# 4 ####################################################################################
class UpperCaseMetaclass(type):
def __new__(cls, name, bases, classdict):
d = {}
for attr in classdict:
if not attr.startswith("__") and not attr.endswith("__"):
d[attr.upper()] = classdict[attr]
else:
d[attr] = classdict[attr]
return super(UpperCaseMetaclass, cls).__new__(cls, name, bases, d)
class UpperTest(metaclass=UpperCaseMetaclass):
x = 2
ala = 21
def __init__(self):
pass
def dupa(self):
print("jestem dupa")
# __metaclass__ = UpperCaseMetaclass # Only in Py2
# 5 ####################################################################################
class RunTimer(object):
def __init__(self, f):
self.f = f
self.initial_defaults = self.f.__defaults__
def __call__(self, *args, **kwargs):
self.f.__defaults__ = copy.deepcopy(self.initial_defaults)
return self.f(*args, **kwargs)
@RunTimer
def test_func_5(l=[]):
l.append(1)
return l
# 6 ####################################################################################
class InterfaceCheckerMetaClass(type):
def __init__(cls, name, bases, classdict):
interfaces = classdict["__interfaces__"]
attrs = {}
for interface in interfaces:
attrs.update({k: v for k, v in interface.__dict__.items() if not k.startswith("__")})
for attr, attr_object in attrs.items():
if attr not in classdict and callable(attr_object):
raise NotImplementedError("{} not implemented".format(attr))
super(InterfaceCheckerMetaClass, cls).__init__(name, bases, classdict)
class BaseA:
def z_base_a(self):
pass
a = 1
class BaseB:
def z_base_b(self):
pass
class InterfaceClass(metaclass=InterfaceCheckerMetaClass):
__interfaces__ = [BaseA, BaseB]
def __init__(self):
pass
def z_base_a(self):
pass
def z_base_b(self):
pass
# 7 ####################################################################################
class SingleTonMetaClass(type):
instances = {}
def __call__(cls, *args, **kwargs):
print(cls.instances)
if cls not in cls.instances:
clsobj = super(SingleTonMetaClass, cls).__call__(*args, **kwargs)
cls.instances[cls] = clsobj
return cls.instances[cls]
class SingleTon:
instance = None
def __new__(cls, *args, **kwargs):
if cls.instance is None:
return super(SingleTon, cls).__new__(cls, *args, **kwargs)
else:
return SingleTon.instance
def __init__(self):
if SingleTon.instance is None:
SingleTon.instance = self
class Dziedziczaca(SingleTon):
def __init__(self):
super(SingleTon, self).__init__(self)
self.k = "AKUKU"
class Jakas(metaclass=SingleTonMetaClass):
def __init__(self, val):
self.val = val
class Jakas2(metaclass=SingleTonMetaClass):
def __init__(self, val):
self.val = val
if __name__ == '__main__':
# 7
# A = SingleTon()
# A.a = 4
# print(A.a)
# B = SingleTon()
# B.a = 5
# print(A.a)
# print(B.a)
# print(A)
# print(B)
# print(A is B)
# KK = Dziedziczaca()
# print(KK is A)
A = Jakas(7)
B = Jakas(8)
print(A.val)
print(B.val)
print(A is B)
C = Jakas2(10)
D = Jakas2(10)
print(C is A)
# 6
# A = InterfaceClass()
# 5
# print(test_func_5())
# print(test_func_5())
# print(test_func_5())
# 4
# U = UpperTest()
# print(dir(U))
# print(U.ALA)
# U.DUPA()
# 3
# test_f()
# test_f()
# print(test_f._counter)
# 2
# a = Dummy(1)
# print(a.x)
# a.x = 2
# print(a.x)
# 1
# N = Nowa()
# N.setx(2)
# print(N._Nowa__x) | true |
a339ff0a4e9930a48b0f63f5e17e1bfa7352ff06 | Python | pingjuiliao/crawler | /301_readers.py | UTF-8 | 1,679 | 2.96875 | 3 | [] | no_license |
## TXT
from urllib.request import urlopen
from bs4 import BeautifulSoup
textPage = urlopen("http://www.pythonscraping.com/pages/warandpeace/chapter1.txt")
print(textPage.read())
textPage2 = urlopen("http://www.pythonscraping.com/pages/warandpeace/chapter1-ru.txt")
print(textPage2.read(), 'utf-8')
html = urlopen("http://en.wikipedia.org/wiki/Python_(programming_language)")
bsObj = BeautifulSoup(html)
content = bsObj.find("div", {"id": "mw-content-text"}).get_text()
content = bytes(content, "UTF-8")
content = content.decode("UTF-8")
## CSV
from io import StringIO
import csv
data = urlopen("http://pythonscraping.com/files/MontyPythonAlbums.csv").read().decode('ascii', 'ignore')
dataFile = StringIO(data)
csvReader = csv.reader(dataFile)
## Alternative : csv.DictReader
print("HERE COMES THE CSV OUTPUT !!!!!!!!!!!!!!!!!!")
for row in csvReader:
# print(row)
print("The album \"" + row[0] + "\" was released in " + str(row[1]))
## PDF
from pdfminer.pdfinterp import PDFResourceManager, process_pdf
from pdfminer.converter import TextConverter
from pdfminer.layout import LAParams
from io import StringIO
from io import open
def readPDF(pdfFile) :
rsrcmgr = PDFResourceManager()
retstr = StringIO()
laparams = LAParams()
device = TextConverter(rsrcmgr, retstr, laparams= laparams)
process_pdf(rsrcmgr, device, pdfFile)
device.close()
content = retstr.getvalue()
retstr.close()
return content
print("HERE COMES THE PDF OUTPUT !!!!!!!!!!!!!!!!!!")
pdfFile = urlopen("http://pythonscraping.com/pages/warandpeace/chapter1.pdf")
outputString = readPDF(pdfFile)
print(outputString)
pdfFile.close()
| true |
0755ca3f5b14af3b62e1e9e2e401fcd930c0ceb4 | Python | janapellegrino/curso-python2 | /quarto_dia_2.py | UTF-8 | 319 | 3.421875 | 3 | [] | no_license | with open('frutas.txt', 'r') as arquivo:
conteudo = arquivo.readlines()
aux = []
for x in conteudo:
x = x.replace('\n', '')
aux.append(x)
print(aux)
aux.append('Pessego')
with open('frutas2.txt', 'w') as arquivo:
for item in aux:
arquivo.write('Fruta: {}\n'.format(item.title())) | true |
f28b47e1b07011ce9d0708331f68d7f16195c567 | Python | JohnStokes228/home_scraping_project | /HousingPriceScraper/HousingPriceScraper/functions/menus.py | UTF-8 | 14,331 | 3.1875 | 3 | [] | no_license | """
contains generic code for use in main menus. currently this is a function which turns dictionaries of functions
into a menu. I envision any further menu functions being stored here so don't expect it to run like a pipeline but
rather like a suite of individual menus.
TODO - refactor spider selection function as jesus christ that things fat
- incorporate spider selector in config manager options
"""
import re
import os
import json
from datetime import date, datetime
from collections import defaultdict
import pandas as pd
from HousingPriceScraper.HousingPriceScraper.functions.basic_functions import return_false, end_process, \
alphabet_list_length, flatten_list_of_lists
from HousingPriceScraper.HousingPriceScraper.functions.data_management import save_list_to_txt
def final_option(dict_of_options, back):
"""
adds the final option to the dictionary based on a boolean
:param dict_of_options: dictionary with readable labels as keys and uncalled functions as values. It is important
that these functions don't require parameters.
:param back: boolean
:return: dict_of_options but with a new, final key added in
"""
if back:
dict_of_options['back'] = return_false
else:
dict_of_options['end_process'] = end_process
return dict_of_options
def basic_menu(dict_of_options, back=False):
"""
basic text based user interface, allows user to select a function to run from a dictionary of options, by using a
simple numeric code. User will see this dictionaries keys enumerated on screen to choose from.
:param dict_of_options: dictionary with readable labels as keys and uncalled functions as values. It is important
that these functions don't require parameters.
:param back: boolean. choose between scrolling back to previous menu or ending the process entirely. defaults to
ending process since that's how the main menu does it and unsure of where else function will be called
:return: run the chosen function
"""
choose = True
dict_of_options = final_option(dict_of_options, back)
list_of_options = list(dict_of_options.keys())
while choose:
print('The following options are available:\n')
for option in enumerate(list_of_options):
print('\t{} - {}'.format(option[0], option[1]))
pick = input('\nType the numeric code you wish to run\n\n')
if pick in [str(i) for i in range((len(dict_of_options)))]:
choose = dict_of_options[list_of_options[int(pick)]]()
else:
print('{} is not currently an option!\n'.format(pick))
def basic_menu_non_functional(list_of_options):
"""
basic text based user interface, allows user to select multiple options from a list of available choices.
:param list_of_options: list of available choices
:return: a list of chosen strings
"""
choose = True
list_of_options.append('back')
while choose:
print('The following options are available:\n')
for option in enumerate(list_of_options):
print('\t{} - {}'.format(option[0], option[1]))
picks = input('\nType the numeric codes you wish to run\n\n').split(',')
choice = []
if str(len(list_of_options)) in picks:
return True
for pick in picks:
if pick in [str(i) for i in range((len(list_of_options)))]:
choice.append(list_of_options[int(pick)])
else:
print('{} is not currently an option!\n'.format(pick))
if len(choice) > 0:
return choice
def select_spiders(spiders_dict):
"""
select from spiders available. allows user to select all spiders, select all spiders within a
project group, select some comma separated list of individual/groups of spiders, or by prefixing a
given selection with "-", the user can remove a spider from his or her selection.
:param spiders_dict: dictionary who's keys are broad options and values are lists of spiders
:return: list containing the spiders the user has selected to run
"""
print('Available spiders include:\n')
enumerated_keys = list(enumerate(spiders_dict.keys()))
for key_group in enumerated_keys:
print('{} - {}'.format(key_group[0], key_group[1]))
for spider in zip(alphabet_list_length(len(key_group[1])), spiders_dict[key_group[1]]):
print('\t{}{} - {}'.format(key_group[0], spider[0], spider[1].name))
print('{} - run all'.format(len(spiders_dict.keys())))
print('{} - back'.format(len(spiders_dict.keys())+1))
choices = input('\nfor multiple, comma separate. To remove, use "-" prefix\ni.e.: 0,-0a to run all of group 0 except the first\n').replace(' ', '').split(',')
if str(len(spiders_dict.keys())+1) in choices:
return False
if str(len(spiders_dict.keys())) in choices:
chosen_spiders = list(spiders_dict.values())
else:
chosen_spiders = []
for choice in choices:
if choice.isdigit():
if choice in [str(i[0]) for i in enumerated_keys]:
chosen_spiders.append(spiders_dict[enumerated_keys[int(choice)][1]])
else:
print('{} is not an option!'.format(choice))
elif '-' not in choice:
numeric = re.findall(r'\d+', choice)
if len(numeric) == 1:
alpha = choice.split(numeric[0])[1]
alpha = len(alphabet_list_length(0, index=alpha))-1
try:
chosen_spiders.append(spiders_dict[enumerated_keys[int(numeric[0])][1]][alpha])
except IndexError:
print('{} is not an option!'.format(choice))
else:
print('{} is not an option!'.format(choice))
if any(isinstance(el, list) for el in chosen_spiders):
chosen_spiders = flatten_list_of_lists(chosen_spiders, make_set=True)
else:
chosen_spiders = list(set(chosen_spiders))
to_remove = [choice for choice in choices if '-' in choice]
if len(to_remove) > 0:
for removee in to_remove:
if removee.replace('-', '').isdigit():
if removee.replace('-', '') in [str(i[0]) for i in enumerated_keys]:
for spider in spiders_dict[enumerated_keys[int(removee.replace('-', ''))][1]]:
chosen_spiders.remove(spider)
else:
print('{} is not an option!'.format(removee))
else:
numeric = re.findall(r'\d+', removee)
if len(numeric) == 1:
alpha = removee.split(numeric[0])[1]
alpha = len(alphabet_list_length(0, index=alpha)) - 1
try:
chosen_spiders.remove(spiders_dict[enumerated_keys[int(numeric[0])][1]][alpha])
except IndexError:
print('{} is not an option!'.format(removee))
else:
print('{} is not an option!'.format(removee))
if len(chosen_spiders) > 0:
return chosen_spiders
else:
print("You haven't selected any spiders!")
return False
def project_visibility_menu():
"""
creates menu to allow user to set which project groups are visible in the run_scrapers menu
:return: creates a txt file containing the list of desired project names, one per row.
"""
projects = [i.split('.')[0] for i in os.listdir('HousingPriceScraper/HousingPriceScraper/spiders/SpiderGroups')[:-1]]
print('Available projects are:\n')
for project in enumerate(projects):
print('\t{} - {}'.format(project[0], project[1]))
print('\t{} - back'.format(len(projects)))
choices = input('\nType the options you wish to select.\nFor multiple, comma separate\n\n').split(',')
if str(len(projects)) in choices:
return True
else:
choice_list = []
for choice in choices:
if choice.isdigit() and int(choice) in range(len(projects)):
choice_list.append(projects[int(choice)])
print('You have selected to display the following spider groupings:\n\t{}\n'.format(choice_list))
save_list_to_txt(choice_list, 'HousingPriceScraper/HousingPriceScraper/configs/visible_projects_to_scrape.txt')
return True
def set_config():
"""
menu to set the url configs.
:return: will set the start_urls of the spiders.
"""
available_configs = open('HousingPriceScraper/HousingPriceScraper/configs/input_url_config_descriptions.txt', 'r')
options = available_configs.readlines()
options_dict = {}
print('available configs include:\n')
for option in enumerate(options):
options_dict[option[0]] = option[1].split(':')[0]
print('\t{} - {}'.format(option[0], option[1].replace('\n', '')))
print('\t{} - back'.format(len(options)))
chosen = input('\ncomma separate for multiple\n').split(',')
if (str(len(options)) in chosen) or (chosen == ['']):
return True
configs = []
for choice in chosen:
if int(choice) in options_dict:
with open('HousingPriceScraper/HousingPriceScraper/configs/input_urls/{}.json'.format(options_dict[int(choice)])) as f:
configs.append(json.load(f))
final_config = defaultdict(list)
for config in configs:
for key, value in config.items():
if key in final_config:
final_config[key] += value
else:
final_config[key] = value
for key, value in final_config.items():
if any(isinstance(val, list) for val in value):
final_config[key] = flatten_list_of_lists(value, make_set=True)
with open('HousingPriceScraper/HousingPriceScraper/configs/input_urls/defaults.json') as default_urls_json:
default_dict = json.load(default_urls_json)
for key, value in default_dict.items():
if key not in final_config.keys():
final_config[key] = value
with open('HousingPriceScraper/HousingPriceScraper/configs/chosen_urls.json', 'w') as fp:
json.dump(final_config, fp, sort_keys=True, indent=4)
return True
def append_recent_urls():
"""
function for appending recent scraped urls to default urls json
:return: default.json is updated
"""
with open('HousingPriceScraper/HousingPriceScraper/configs/input_urls/defaults.json') as default_urls_json:
default_dict = json.load(default_urls_json)
with open('HousingPriceScraper/HousingPriceScraper/configs/input_urls/recent_urls.json') as recent_urls_json:
recent_dict = json.load(recent_urls_json)
for key, value in recent_dict.items():
default_dict.setdefault(key, []).extend(value)
with open('HousingPriceScraper/HousingPriceScraper/configs/input_urls/defaults.json', 'w') as fp:
json.dump(default_dict, fp, sort_keys=True, indent=4)
def replace_default_urls():
"""
function for replacing default urls config with recent scrapes
:return: defaults.json is updated
"""
with open('HousingPriceScraper/HousingPriceScraper/configs/input_urls/defaults.json') as default_urls_json:
default_dict = json.load(default_urls_json)
with open('HousingPriceScraper/HousingPriceScraper/configs/input_urls/recent_urls.json') as recent_urls_json:
recent_dict = json.load(recent_urls_json)
for key, value in recent_dict.items():
default_dict[key] = value
with open('HousingPriceScraper/HousingPriceScraper/configs/input_urls/defaults.json', 'w') as fp:
json.dump(default_dict, fp, sort_keys=True, indent=4)
def create_new_config():
"""
function which creates a whole new config file to store recent scraped urls in
:return: new config is created
"""
with open('HousingPriceScraper/HousingPriceScraper/configs/input_urls/recent_urls.json') as recent_urls_json:
urls_dict = json.load(recent_urls_json)
config_name = input('Type a name for the new config file:\n').replace(' ', '_').replace(':', '')
config_desc = input('Type a brief description for the new config file:\n')
with open('HousingPriceScraper/HousingPriceScraper/configs/input_urls/{}.json'.format(config_name), 'w') as fp:
json.dump(urls_dict, fp, sort_keys=True, indent=4)
with open('HousingPriceScraper/HousingPriceScraper/configs/input_url_config_descriptions.txt', 'a') as input_descs:
input_descs.write('\n{}: {}'.format(config_name, config_desc))
print('\nSuccessfully saved recently scraped urls to new config: {}.json'.format(config_name))
def clear_recent_urls():
"""
function which bleaches the recent urls config in order to start fresh next time
:return: recent_urls will become an empty dictionary.
"""
with open('HousingPriceScraper/HousingPriceScraper/configs/input_urls/recent_urls.json') as recent_urls_json:
recent_dict = json.load(recent_urls_json)
for key in recent_dict.keys():
recent_dict[key] = []
with open('HousingPriceScraper/HousingPriceScraper/configs/input_urls/recent_urls.json', 'w') as fp:
json.dump(recent_dict, fp, sort_keys=True, indent=4)
def select_date_interval_menu():
"""
function allows user to inout start and end date to define an interval of dates
:return: list of dates
"""
while True:
start_date = input('\nInput desired start date with format dd-mm-yyyy:\n')
try:
start_date = datetime.strptime(start_date, '%d-%m-%Y')
break
except ValueError:
print('invalid start date selected')
while True:
end_date = input('\nInput desired start date with format dd-mm-yyyy,\nor hit enter to select todays date\n')
if end_date == '':
end_date = date.today()
break
else:
try:
end_date = datetime.strptime(end_date, '%d-%m-%Y')
break
except ValueError:
print('invalid end date selected')
list_of_dates = pd.date_range(start_date, end_date, freq='d')
list_of_dates = [i.strftime('%d%m%Y') for i in list_of_dates]
return list_of_dates
| true |