blob_id stringlengths 40 40 | language stringclasses 1
value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30
values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2
values | text stringlengths 12 5.47M | download_success bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
0fffd5437bcb94e1b7face790e2e857800a54b90 | Python | Lucas08Ben/Aprendizado-de-Maquina | /Lista 6 - Dados faltantes/ml_libs/transform.py | UTF-8 | 496 | 2.96875 | 3 | [] | no_license | import pandas as pd
import numpy as np
class Normalize:
X_min = None
X_max = None
def fit(self,X):
self.X_max = X.max()
self.X_min = X.min()
def transform(self,X):
return (X - self.X_min) / (self.X_max - self.X_min)
class Standardize:
X_avg = None
X_std = None
def fit(self,X):
self.X_avg = X.mean()
self.X_std = X.std()
def transform(self,X):
return (X - self.X_avg) / self.X_std
| true |
b9830c64d8364d4156f2f0693f85dd51d54964cd | Python | hackersky109/SDN_FinalProject | /final.py | UTF-8 | 2,693 | 2.890625 | 3 | [] | no_license | """Custom topology example
Two directly connected switches plus a host for each switch:
host --- switch --- switch --- host
Adding the 'topos' dict with a key/value pair to generate our newly defined
topology enables one to pass in '--topo=mytopo' from the command line.
"""
from mininet.topo import Topo
from mininet.net import Mininet
from mininet.link import TCLink
from mininet.util import dumpNodeConnections
from mininet.log import setLogLevel
from mininet.node import RemoteController
from mininet.cli import CLI
import os
class MyTopo( Topo ):
"Simple topology example."
def __init__( self ):
"Create custom topo."
# Initialize topology
Topo.__init__( self )
# Add hosts h1~h16
"""
self.addHost('h1', ip='10.0.0.1', mac='00:00:00:00:00:01')
self.addHost('h2', ip='10.0.0.2', mac='00:00:00:00:00:02')
self.addHost('server1', ip='10.0.0.3', mac='00:00:00:00:00:03')
self.addHost('server2', ip='10.0.0.4', mac='00:00:00:00:00:04')
"""
self.addHost('h1', ip='0.0.0.0')
self.addHost('h2', ip='0.0.0.0')
self.addHost('server1', ip='0.0.0.0')
self.addHost('server2', ip='0.0.0.0')
# Add Switches
self.addSwitch('s1')
# Add links
self.addLink('s1', 'h1')
self.addLink('s1', 'h2')
self.addLink('s1', 'server1')
self.addLink('s1', 'server2')
def perfTest() :
"Create network and run simple performance test"
topo = MyTopo()
net = Mininet(topo=topo, link=TCLink, controller=None)
#net.addController("ryu", controller=RemoteController, ip="192.168.2.48")
net.addController("ryu", controller=RemoteController, ip="127.0.0.1")
net.start()
'''
print "-----Dumping host connections-----"
dumpNodeConnections(net.hosts)
print "-----Testing pingFull-----"
net.pingFull()
h1, h2, h16 = net.get('h1', 'h2', 'h16')
print "-----Setting iperf server with h1-----"
h1.cmdPrint('iperf -dds -u -i 1 &')
print "-----Setting iperf server with h16-----"
h16.cmdPrint('iperf -s -u -i 1 &')
print "-----h2 connect to h1-----"
h2.cmdPrint('iperf -c'+h1.IP()+' -u -t 10 -i 1 -b 100m')
print "-----h2 connect to h16-----"
h2.cmdPrint('iperf -c'+h16.IP()+' -u -t 10 -i 1 -b 100m')
'''
server1, server2= net.get('server1', 'server2')
os.popen('ovs-vsctl add-port s1 enp0s8')
server1.cmdPrint('dhclient '+server1.defaultIntf().name)
server2.cmdPrint('dhclient '+server2.defaultIntf().name)
CLI(net)
net.stop()
if __name__ == '__main__':
setLogLevel('info')
perfTest()
topos = { 'mytopo': ( lambda: MyTopo() ) }
| true |
45d3f35dd8ce525568520b884c3f7dce7ecc2316 | Python | benhynes/COVID-ML-Prediction-Model | /models/FC_model.py | UTF-8 | 2,286 | 2.828125 | 3 | [] | no_license | from keras.layers import Input, Dense, BatchNormalization
from keras.models import Model
from keras.optimizers import Adam
import numpy as np
import os
class FC_Model():
def __get_model(self):
x = Input(shape = self.input_shape)
hidden = Dense(12, activation = 'relu') (x)
hidden = Dense(12, activation = 'relu') (hidden)
out = Dense(self.output_shape)(hidden)
model = Model(x,out)
model.compile(loss = 'mse', metrics = ['mae'], optimizer = Adam(lr = self.lr))
return model
def __init__(self, input_shape, output_shape, lr = 0.0002):
self.lr = lr
self.input_shape = input_shape
self.output_shape = output_shape
self.model = self.__get_model()
def predict(self, x):
if (len(x.shape) == 1):
x = np.reshape(x,(1,self.input_shape))
y = self.model.predict(x)
if (y.shape[0] == 1):
y = np.reshape(y,self.output_shape)
return y
def fit(self, x, y):
self.model.fit(x,y)
def train_on_batch(self, x_batch, y_batch):
return self.model.train_on_batch(x_batch,y_batch)
def save_weights(self, path = "trained_models/FC.h5"):
os.makedirs('trained_models',exist_ok=True)
self.model.save_weights(path)
def load_weights(self, path = "trained_models/FC.h5"):
self.model.load_weights(path)
class FC_Data_Formatter():
def __init__(self, input_shape, output_shape):
self.input_shape = input_shape
self.output_shape = output_shape
def robust_normalize(self,x,x_median,q1,q3):
return (x-x_median)/(q3-q1)
def robust_denormalize(self,x,x_median,q1,q3):
return x*(q3-q1)+x_median
def sampling(self, x):
r = np.random.randint(len(x)-self.input_shape-1)
return x[r:r+self.input_shape], x[r+self.input_shape]
def get_minibatch(self, dataset, batch_size):
minibatch_x = []
minibatch_y = []
for _ in range(batch_size):
x, y = self.sampling(dataset)
minibatch_x.append(x)
minibatch_y.append(y)
return np.reshape(np.asarray(minibatch_x),(batch_size,self.input_shape)), np.reshape(np.asarray(minibatch_y),(batch_size, self.output_shape)) | true |
c23f11053efdda3ccb2c9df673a48711b00351d8 | Python | bompi88/aiprog | /src/algorithms/adversial_search/expectimax_c.py | UTF-8 | 1,579 | 2.84375 | 3 | [] | no_license | import ctypes
import src.clibs
class ExpectimaxC(object):
def __init__(self, depth, heuristic=0):
self.path = src.clibs.__path__[0] + '/expectimax_lib.so'
self.search = ctypes.CDLL(self.path)
self.depth = depth
self.heuristic = heuristic
self.smoothness_constant = 0.2
self.max_tile_constant = 0.9
self.free_tiles_constant = 2.3
self.max_placement_constant = 1.0
self.monotonicity_constant = 1.9
def decision(self, board):
b = board.board
result = self.search.decision(
self.depth, b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7], b[8],
b[9], b[10], b[11], b[12], b[13], b[14], b[15],
ctypes.c_double(self.smoothness_constant),
ctypes.c_double(self.max_tile_constant),
ctypes.c_double(self.free_tiles_constant),
ctypes.c_double(self.max_placement_constant),
ctypes.c_double(self.monotonicity_constant),
self.heuristic
)
mapping = ['left', 'up', 'right', 'down']
move = board.possible_moves[mapping[result]]
return move
def main():
from src.puzzles.play_2048.play_2048_state import Play2048State
game = Play2048State()
game.board = [0, 0, 0, 1, 2, 3, 4, 0, 3, 2, 1, 0, 1, 2, 3, 4]
search = ExpectimaxC(3)
correct_decision = [0, 1]
for _ in range(20):
decision = search.decision(game)
if decision != correct_decision:
print('Something broke!')
if __name__ == '__main__':
main()
| true |
45c55bc2dc0b37e698f78e8534602ca644b62836 | Python | marcusfreire0504/datascience_course | /03-machine-learning-tabular-crossection/06 - Clustering/01/solutions/solution_02.py | UTF-8 | 309 | 2.546875 | 3 | [
"MIT"
] | permissive | wcss = []
for i in range(1, 11):
kmeans = KMeans(n_clusters = i, init = 'random')
kmeans.fit(X)
print (i,kmeans.inertia_)
wcss.append(kmeans.inertia_)
plt.plot(range(1, 11), wcss)
plt.title('O Metodo Elbow')
plt.xlabel('Numero de Clusters')
plt.ylabel('WSS') #within cluster sum of squares | true |
078f936b053c268cbeaeb0095cc615a47454d1f0 | Python | brettasmi/spark-helpers | /spark_helpers.py | UTF-8 | 1,509 | 2.828125 | 3 | [] | no_license | #!/usr/bin/env python
def spark_model_saver(sparkmodelCV, path):
"""
Helper function to save the best model from the CrossValidator
model
Parameters
sparkmodel (spark.ml.sparkmodelCV): spark CrossValidation model
with .bestmodel attr
path(str): path to save the model
Returns: "Success"
"""
sparkmodelCV.bestModel.save(path)
return "Success"
def param_writer(cv_info_dict, outfile):
"""
Save parameters and rmse to a text file for all models in
a cv_info_dict returned by get_cv_info
Parameters
cv_info_dict (dict): dictionary with information about the CV model
outfile (str): path to save text file
"""
with open(outfile, "w") as of:
for i in cv_info_dict["param_map"]:
of.write(f"params = {i[0]} \nrmse = {i[1]}\n\n")
def get_CV_info(cv_model):
"""
Returns a dictionary of information from inside the cv_test_model
Parameters
cv_model (spark.ml.tuning CrossValidator): fit crossvalidator model
save_path (str): local path to save dict
Returns
cv_info_dict (dict): dictionary with information about the CV model
"""
cv_info_dict = {}
cv_info_dict["best_model"] = cv_model.bestModel
cv_info_dict["avg_metrics"] = cv_model.avgMetrics
cv_info_dict["model_list"] = cv_model.getEstimatorParamMaps()
cv_info_dict["param_map"] = list(zip(cv_info_dict["model_list"],
cv_info_dict["avg_metrics"]))
return cv_info_dict
| true |
ac297a1ebd8cc8ba0b2cf9b74b1894a64f439391 | Python | ShitalTShelke/Assignments | /Nehali Assignment/star_pattern_recursion.py | UTF-8 | 158 | 3.4375 | 3 | [] | no_license | def star_fun(num):
if num==0:
return
else:
star_fun(num - 1)
print(num*'*')
num=int(input("Enter row count:"))
star_fun(num)
| true |
f8ccdc8bb2d537697892a202d971f9971147c311 | Python | scr34m/nagios_check_summary | /check_summary.py | UTF-8 | 4,406 | 2.90625 | 3 | [] | no_license | #!/usr/bin/env python
import os, time, argparse
HOST_STATE_MAP = {"0": "UP", "1": "DOWN"}
STATE_MAP = {"0": "OK", "1": "WARNING", "2": "CRITICAL", "3": "UNKNOWN"}
HOST_TO_SERVICE_STATE_MAP = {"0": "0", "1": "2"}
def parse_status_file(filepath):
"""Parse a nagio status.dat file. Returns a
dictionary where the primary keys are the hostnames. For each
host all of the services are listed in the 'services' key; the other
key elements are used for host details."""
STATUS=open(filepath)
summary = {}
while 1:
line = STATUS.readline()
if not line:
break
line = line.strip()
if line.startswith("#"):
# A Comment
pass
elif line.find('{') != -1:
statustype = line[0:line.find('{')]
if statustype.strip() == "hoststatus":
# We except host_name and service_description first
line = STATUS.readline()
name, hostname = line.split("=", 1)
name = name.strip()
hostname = hostname.strip()
if name != "host_name":
continue
if not summary.has_key(hostname):
summary[hostname] = {}
summary[hostname]['services'] = {}
# Now read all the details
while 1:
line = STATUS.readline()
if not line:
break
elif line.find("=") != -1:
name, value = line.split("=", 1)
name = name.strip()
value = value.strip()
summary[hostname][name] = value
elif line.find("}") != -1:
break
elif statustype.strip() == "servicestatus":
# We except host_name and service_description first
line = STATUS.readline()
name, hostname = line.split("=", 1)
name = name.strip()
hostname = hostname.strip()
line = STATUS.readline()
name, service_desc = line.split("=", 1)
name = name.strip()
service_desc = service_desc.strip()
if name != "service_description":
continue
summary[hostname]['services'][service_desc] = {}
# Now read all the details
while 1:
line = STATUS.readline()
if not line:
break
elif line.find("=") != -1:
name, value = line.split("=", 1)
name = name.strip()
value = value.strip()
summary[hostname]['services'][service_desc][name] = value
elif line.find("}") != -1:
break
return summary
def pretty_print_status(path):
summary = parse_status_file(path)
str_out = ""
state_out = -1
hosts = summary.keys()
hosts.sort()
for host in hosts:
status = summary[host]
host_state = HOST_STATE_MAP[status['current_state']]
if host_state != "UP" and status['problem_has_been_acknowledged'] == "0":
str_out += "%s: %s, " % (host, host_state)
state_out = max(state_out, HOST_TO_SERVICE_STATE_MAP[status['current_state']])
else:
services = summary[host]['services'].keys()
services.sort()
for service in services:
status = summary[host]['services'][service]
current_state = STATE_MAP[status['current_state']]
if current_state != "OK" and ( status['problem_has_been_acknowledged'] == "0"):
str_out += "%s/%s: %s, " % (host, service, current_state)
state_out = max(state_out, status['current_state'])
if str_out:
print STATE_MAP[state_out] + " " + str_out[:-2]
else:
print "OK all fine"
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-S", help="Status file path default is /var/cache/nagios3/status.dat", type=str)
args = parser.parse_args()
path = "/var/cache/nagios3/status.dat"
if args.S:
path = args.S
try:
pretty_print_status(path)
except Exception, e:
print "WARNING", e
| true |
4d140e07bfa74adb18593990ecf1e7da0ef47fe5 | Python | dorseytc/groc | /reference/sunset.py | UTF-8 | 2,131 | 3.109375 | 3 | [] | no_license | #!/usr/bin/python3
import pygame
import groc
import math
import world
import time
def interpolateScalar(v1, v2, scale):
return (scale*v2) + (1-scale)*v1
def interpolateColor(color1, color2, scale):
result = [None,None,None]
for i in range(3):
result[i] = interpolateScalar(color1[i], color2[i], scale)
return tuple(result)
def main():
pygame.init()
pygame.font.init()
gauge = pygame.display.set_mode([1800, 800])
gaugeColor = world.World.BLACK
green = (0, 255, 0)
blue = (0,0,255)
paleblue = (0,0,128)
black = (0,0,0)
red = (128,0,0)
deepred = (255,0,0)
white = (255,255,255)
gray = (159,159,159)
#colorList = [black, red, blue, white]
colorList = [black, paleblue, white]
gauge.fill(gaugeColor)
segments = len(colorList)-1
chunk = 100/segments
print("segments", segments, "chunk", chunk)
x,y = 400,400
font = pygame.font.Font('/usr/share/fonts/truetype/ubuntu/UbuntuMono-R.ttf', 32)
text = font.render('Light Level', True, green, blue)
textRect = text.get_rect()
textRect.center = (x//2, y//2)
for i in range(100):
foo = math.trunc(i//chunk)
grad = (i % chunk)/chunk
print("i", i, "foo", foo, "grad", grad)
color = interpolateColor(colorList[foo], colorList[foo+1], grad)
print("color", color)
gauge.fill(color)
pygame.draw.circle(gauge, red, (300, 300), 7)
pygame.draw.circle(gauge, deepred, (30, 30), 7)
pygame.draw.circle(gauge, blue, (600, 600), 7)
pygame.draw.circle(gauge, paleblue, (900, 600), 7)
text = font.render('Light Level: ' + str(i/100), True, green, blue)
gauge.blit(text, textRect)
pygame.display.flip()
time.sleep(1)
running = True
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
running = False
elif event.type == pygame.MOUSEBUTTONDOWN:
x,y = event.pos
if y < 60:
y = y + 30
else:
y = y - 30
if x < textRect.center[0]:
x = x + 30
else:
x = x - 30
textRect.center = (x, y)
print ("Done")
if __name__ == '__main__':
main()
| true |
a0e2d67b7537b17a6791e343525bce9f42a56bce | Python | yemi33/Virtual-Zoltar | /zoltar5/fileProcessing.py | UTF-8 | 561 | 3.421875 | 3 | [] | no_license | '''
fileProcessing.py
A Python program that processes, analyzes, and visualizes earthquake data. SOLUTIONS.
author: Yemi Shin
CS 111, Fall 2018
date: 5 October 2018
'''
def processFile(f):
'''
Reads in data from a file, and stores it in a list.
PARAMETER:
f - the name of the file
RETURN VALUE:
a list containing the data from the file.
'''
lst = []
infile = open(f, "r")
for line in infile:
value = str(line)
lst.append(value)
infile.close()
return lst
| true |
30bf1f31e1100ec86e748e74a015b37775e8609f | Python | DajunFeng/UNSW_Study | /COMP9024/Chapter2/PredatoryCreditCard.py | UTF-8 | 1,423 | 3.359375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 10 05:57:20 2019
@author: Michael
"""
import CreditCard
class PredatoryCreditCard(CreditCard):
""" An extension to CreditCard that compounds interest and fees """
def __init__(self, customer, bank, acnt, limit, apr):
""" Create a new predatory credit card instance
The initial balance is zero
customer the name of the customer (e.g. 'Michael Jordan')
bank the name of the bank (e.g. 'Commonwealth')
acnt the account identifier (e.g. '1234 5678 3462 1111')
limit credit limit
apr annual percentage rate
"""
super().__init__(customer, bank, acnt, limit)
self._apr = apr
def charge(self, price):
""" Charge given price to the card, assuming sufficient credit limit
Return True if charge was processed
Return False and assess $5 fee if charge is denied
"""
success = super().charge(price)
if not success:
self._balance += 5
return success
def process_month(self):
""" Assess monthly interest on outstanding balance """
if self._balance > 0:
# if positive balance, convert APR to monthly multiplicative factor
monthly_factor = pow(1 + self._apr, 1/12)
self._balance *= monthly_factor | true |
a298e7b001fb2d31fa5199840da37212932f740b | Python | idiom/mlib | /src/struct/__init__.py | UTF-8 | 1,540 | 2.609375 | 3 | [] | no_license | import sys
import struct as st
import ctypes
if st == sys.modules[__name__]:
del sys.modules['struct']
st = __import__('struct')
uqword = lambda d, off=0: st.unpack_from('<Q',d,off)[0]
udword = lambda d, off=0: st.unpack_from('<I',d,off)[0]
uword = lambda d, off=0: st.unpack_from('<H',d,off)[0]
ubyte = lambda d ,off=0: st.unpack_from('<B',d,off)[0]
class Structure(ctypes.Structure):
_blacklist_ = []
@classmethod
def sizeof(self):
return ctypes.sizeof(self)
@classmethod
def parse(self,data):
return self.from_buffer_copy(data)
@classmethod
def new(self):
return self.parse("\x00"*self.sizeof())
@classmethod
def from_cstruct(self,code):
try:
import pycparser
from .cparse import parse_cstruct
return parse_cstruct(code,self)
except ImportError:
raise Exception('i need pycparser for it')
def pack(self):
return buffer(self)[:]
def as_dict(self):
ret = {}
for field, _ in self._fields_:
if field in self._blacklist_:
continue
value = getattr(self, field)
if isinstance(value, Structure):
ret[field] = value.as_dict()
elif hasattr(value, "value"):
ret[field] = value.value
elif hasattr(value, "__getitem__"):
ret[field] = value[:]
else:
ret[field] = value
return ret
| true |
bb8486d10572a62b7b1c22ddf9bcf316cf0b0c6e | Python | ericmoritz/travisircbot | /priv/arduino.py | UTF-8 | 163 | 2.796875 | 3 | [
"BSD-2-Clause"
] | permissive | import serial
import sys
s = serial.Serial(sys.argv[1], 9600)
while True:
char = sys.stdin.read(1)
if not char:
break
s.write(char)
| true |
b2bed42175ea065ef627a97df72fb8aff5caa467 | Python | daisy0x00/LeetCode_Python | /LeetCode532/LeetCode532.py | UTF-8 | 1,258 | 3.828125 | 4 | [] | no_license | #coding:utf-8
# 解题思路:先对数组排序,然后使用滑动窗口遍历数组,因为数组排序之后差是可以连续移动的两个指针得到的,这种方法同样要对相同的数字进行排除
class Solution():
def findPairs(self, nums, k):
"""
:param nums: List[int]
:param k: int
:return: int
"""
sortedNums = sorted(nums)
count = 0
left = 0
right = 1
while right < len(sortedNums):
firNum = sortedNums[left]
secNum = sortedNums[right]
if secNum - firNum < k:
right += 1
elif secNum - firNum > k:
left +=1
else:
count += 1
while left < len(sortedNums) and sortedNums[left] == firNum:
left += 1
while right < len(sortedNums) and sortedNums[right] == secNum:
right += 1
if right == left:
right += 1
return count
def main():
test = Solution()
nums = [3, 1, 4, 1, 5]
k = 2
nums1 = [1,3,1,5,4]
k1 = 0
print(test.findPairs(nums, k))
print(test.findPairs(nums1, k1))
if __name__ == '__main__':
main()
| true |
f336d7d763c325f1ebc1fef6902832b027ebb8ad | Python | atirek-ak/mario | /2_code.py | UTF-8 | 516 | 2.71875 | 3 | [] | no_license | import numpy as np
new_w = np.array([1, 0, -1])[:,np.newaxis]
neta = 0.1
w = np.array([1, 0, 0])[:,np.newaxis]
from copy import deepcopy
t = np.array([-1, -1, 1, -1, 1, 1])[:,np.newaxis]
ind = np.where(t != 0)
X = np.array([[1, 1, 1], [-1, -1, 1], [2, 2, 1], [-2, -2, 1], [-1, 1, 1], [1, -1 ,1]])
while(len(ind[0])>0):
w = deepcopy(new_w)
y = X@w
o = np.where(y > 0, 1, -1)
z = t-o
ind = np.where(z != 0)
new_w = w + neta*(X[ind[0]].T@z[ind[0]])
print("->>>>\n",ind[0],"\n", new_w)
| true |
bed8b0f2e5bf46eb568f41716275f196a477af7d | Python | emremutlu16/string_smilarity | /string_smilarity.py | UTF-8 | 2,099 | 3.640625 | 4 | [] | no_license | def prefix_finder(string_to_process):
prefixes = list()
prefixes.append(string_to_process)
letter_list = list(string_to_process)
for w in range(len(letter_list)):
letter_list.pop()
if letter_list:
prefixes.append("".join(letter_list))
return prefixes
def suffix_finder(string_to_process):
suffixes = list()
suffixes.append(string_to_process)
letter_list = list(string_to_process)[::-1]
for w in range(len(letter_list)):
letter_list.pop()
if letter_list:
suffixes.append("".join(letter_list[::-1]))
return suffixes
def similarity_score_calculator(prefix_list, suffix_list):
similarity_score = 0
for suffix in suffix_list:
if suffix[0] != prefix_list[0][0]:
continue
elif suffix in prefix_list:
similarity_score += len(suffix)
else:
check_word = suffix[:-1]
for w in range(len(check_word)):
if check_word in prefix_list:
similarity_score += len(check_word)
break
elif check_word:
# check_word = suffix[:-1] yaptıktan sonra check_word
# boş gelmesi ihtimaline karşı
check_word = check_word[:-1]
# suffix in sonundan bir harf çıkartıp kontrol edildiğinde
# prefix listesinde çıkmıyorsa harf bitene kadar suffix
# sonundan harf çıkartıp kontrole devam etmek için
return similarity_score
if __name__ == '__main__':
test_case_number = int(input("How many test case do you need?: "))
strings_to_process = []
print("Please enter your test cases:")
for i in range(test_case_number):
strings_to_process.append(input())
for elem in strings_to_process:
prefix_list_of_string = prefix_finder(elem)
suffix_list_of_string = suffix_finder(elem)
print(similarity_score_calculator(prefix_list_of_string,
suffix_list_of_string))
| true |
7eb520409ed2d57e6856f6464963dffa70b44720 | Python | SINOVATEblockchain/trezor-firmware | /tests/click_tests/recovery.py | UTF-8 | 1,680 | 2.671875 | 3 | [] | no_license | from .. import buttons
def enter_word(debug, word):
word = word[:4]
for coords in buttons.type_word(word):
debug.click(coords)
return debug.click(buttons.CONFIRM_WORD, wait=True)
def select_number_of_words(debug, num_of_words=20):
# confirm recovery
layout = debug.wait_layout()
assert layout.text.startswith("Recovery mode")
layout = debug.click(buttons.OK, wait=True)
# select number of words
assert "Select number of words" in layout.text
layout = debug.click(buttons.OK, wait=True)
assert layout.text == "WordSelector"
# click the number
word_option_offset = 6
word_options = (12, 18, 20, 24, 33)
index = word_option_offset + word_options.index(
num_of_words
) # raises if num of words is invalid
coords = buttons.grid34(index % 3, index // 3)
layout = debug.click(coords, wait=True)
assert "Enter any share" in layout.text
def enter_share(debug, share: str):
layout = debug.click(buttons.OK, wait=True)
assert layout.text == "Slip39Keyboard"
for word in share.split(" "):
layout = enter_word(debug, word)
return layout
def enter_shares(debug, shares: list):
layout = debug.read_layout()
expected_text = "Enter any share"
remaining = len(shares)
for share in shares:
assert expected_text in layout.text
layout = enter_share(debug, share)
remaining -= 1
expected_text = "RecoveryHomescreen {} more".format(remaining)
assert "You have successfully recovered your wallet" in layout.text
def finalize(debug):
layout = debug.click(buttons.OK, wait=True)
assert layout.text == "Homescreen"
| true |
21e36c6626b803a0f1f5b5c22d279e2b8d25e0d0 | Python | 00mjk/QHAL | /test/test_utils.py | UTF-8 | 1,681 | 2.65625 | 3 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | import numpy as np
import unittest
from qhal.hal._utils import angle_binary_representation
class UtilsTest(unittest.TestCase):
"""basic tests for HAL util functions.
"""
def test_angle_binary_conversion(self):
"""Thest the conversion of angles to 16-bit representation."""
test_cases = {
0: angle_binary_representation(0),
8192: angle_binary_representation(np.pi/4),
10923: angle_binary_representation(np.pi/3),
16384: angle_binary_representation(np.pi/2),
21845: angle_binary_representation(2 * np.pi/3),
24576: angle_binary_representation(3 * np.pi/4),
32768: angle_binary_representation(np.pi),
40960: angle_binary_representation(5 * np.pi/4),
43691: angle_binary_representation(4 * np.pi/3),
49152: angle_binary_representation(3 * np.pi/2),
54613: angle_binary_representation(5 * np.pi/3),
57344: angle_binary_representation(7 * np.pi/4),
0: angle_binary_representation(2 * np.pi),
8192: angle_binary_representation(2 * np.pi + np.pi/4),
32768: angle_binary_representation(2 * np.pi + np.pi),
57344: angle_binary_representation(2 * np.pi + 7 * np.pi/4),
0: angle_binary_representation(-2 * np.pi),
8192: angle_binary_representation(-7 * np.pi/4),
32768: angle_binary_representation(-np.pi),
57344: angle_binary_representation(-np.pi/4),
}
for expected, calculated in test_cases.items():
self.assertEqual(expected, calculated)
if __name__ == "__main__":
unittest.main()
| true |
a5bab5fbca465c0ea1f70161c2a28b73d3be3d5c | Python | noah-goodrich/leo-python | /leosdk/aws/cfg.py | UTF-8 | 1,438 | 3.15625 | 3 | [
"MIT"
] | permissive | import os
class Cfg:
def __init__(self, leo_config):
self.cfg = self.__by_environment(leo_config)
def value(self, key: str) -> str:
config_attr = getattr(self.cfg, key, None)
return str(config_attr).strip() if self.__is_valid_str(config_attr) else None
def int_value(self, key: str) -> int:
config_attr = getattr(self.cfg, key, None)
return config_attr if self.__is_valid_int(config_attr) else None
def value_or_else(self, key: str, or_else: str) -> str:
val = self.value(key)
return val if val is not None else str(or_else)
def int_value_or_else(self, key: str, or_else: int) -> int:
val = self.int_value(key)
return val if val is not None else or_else
@staticmethod
def __by_environment(leo_config):
env = os.getenv('PYTHON_ENV', 'dev')
config_attr = getattr(leo_config, env, None)
if config_attr is not None:
return config_attr()
else:
raise AssertionError("'%s' class is missing in leo_config.py" % env)
@staticmethod
def __is_valid_str(val):
return val is not None and str(val).strip().__len__() > 0
@staticmethod
def __is_valid_int(val):
return val is not None and Cfg.__is_int(val)
@staticmethod
def __is_int(val):
try:
int(val)
return True
except ValueError:
return False
| true |
81a3917390cf3d2ee8285bfefd1ad21a399e38fe | Python | ZeGmX/Projet_jeu | /rockets.py | UTF-8 | 4,790 | 2.828125 | 3 | [] | no_license | import math
import engine
import shapes
import bad_guys
import game
class Rocket(engine.GameObject):
SPEEDBOOST = 0.6
SPEEDDECREASESTEP = 0.02
GRAVITYSTEP = 0.02
NBLIVES = 3
skin = 'bird' # rocket or bird
radius = 20 # 20 for 'bird', 30 for 'rocket
debuginit = 0
def __init__(self):
self.speed = [0, 0] # v_x, v_y
self.angle = 90
self.lives = Rocket.NBLIVES
self.radius = Rocket.radius
self.landed = False
self.countdown = 0
self.bulletproof = False
super().__init__(0, 0, 0, 0, Rocket.skin, 'black')
@staticmethod
def init_rockets():
assert game.Game.rocket is None, "rocket already initialized"
print("Initializing the rocket...")
game.Game.rocket = Rocket()
def heading(self):
return self.angle
def rocket_up(self):
"""when the up arrow is pressed"""
self.speed[0] -= Rocket.SPEEDBOOST * math.cos(math.radians(180 - self.angle))
self.speed[1] -= Rocket.SPEEDBOOST * math.sin(math.radians(180 - self.angle))
self.shape = Rocket.skin + "_powered"
self.countdown = 20
self.landed = False
def rocket_left(self):
self.angle += 30
self.landed = False
def rocket_right(self):
self.angle -= 30
self.landed = False
def move(self):
game.Stats.show_fps()
if not (game.Game.pause or game.Game.freeze_spawn):
game.gameplay(self)
self.x += self.speed[0]
self.y -= self.speed[1]
self.speed[1] += Rocket.GRAVITYSTEP
if self.speed[1] < 0: # Natural slowing down - friction
self.speed[1] += Rocket.SPEEDDECREASESTEP / 2
if self.speed[0] < 0:
self.speed[0] += Rocket.SPEEDDECREASESTEP # less friction horizontally
elif self.speed[0] > 0:
self.speed[0] -= Rocket.SPEEDDECREASESTEP
if self.countdown > 0: # display back the unpowered skin
self.countdown -= 1
else:
self.shape = Rocket.skin
if not self.landed:
if self.can_land():
if abs(self.speed[1]) > 0.8 or self.angle % 360 != 90:
self.losealife()
else:
self.land()
elif shapes.collide_gnd(self) or shapes.collide_door(self):
self.losealife()
else:
self.land()
def can_land(self):
"""Checks if there is a platform just under the rocket"""
for landingpad in game.Game.platforms[game.Game.posi][game.Game.posj]:
x1, x2 = landingpad[0][0], landingpad[1][0]
y = landingpad[0][1]
"""first line : the rocket is at the right position vertically
second and third : the rocket is at the right position horizontally"""
if self.y > y >= self.y - self.radius and self.x - self.radius >= min(x1, x2) and \
self.x + self.radius <= max(x1, x2):
return True
return False
def land(self):
self.speed[1] = self.speed[0] = 0
self.landed = True
def losealife(self):
if self.lives == 0:
game.banner('Game over')
engine.exit_engine()
game.Stats.display_stats()
else:
self.lives -= 1
self.speed[0] = self.speed[1] = 0
self.angle = 90
self.x = 0
self.y = 0
self.skin = Rocket.skin
game.Game.freeze_spawn = True
game.banner("Life lost, {} remaining".format(self.lives))
if game.Game.posi == 2 and game.Game.posj == 3 and not bad_guys.Boss.bossbeaten:
engine.del_obj(game.Game.boss)
game.Game.posi = 0
game.Game.posj = 4
game.load()
def isoob(self):
"""out of bond management"""
if super().isoob():
if self.y < -300:
game.Game.posi += 1
self.y = 280
elif self.y > 300:
game.Game.posi -= 1
self.y = -280
elif self.x < -300:
game.Game.posj -= 1
self.x = 280
elif self.x > 300:
game.Game.posj += 1
self.x = -280
game.load()
elif Rocket.debuginit < 2: # Weird problems when first loading
for door in shapes.Door.ldoor[game.Game.posi][game.Game.posj]:
engine.add_obj(door)
for key in shapes.Key.lkey[game.Game.posi][game.Game.posj]:
engine.add_obj(key)
Rocket.debuginit += 1
return False
| true |
c91f9bdb32ddf8f8913062f85019e5a685d30af0 | Python | tomasbm07/IPRP---FCTUC | /6/Exercicios complementares/1_6.py | UTF-8 | 973 | 3.96875 | 4 | [] | no_license | linhas = int(input('Digite o numero de linhas da matriz: '))
colunas = int(input('Digite o numero de colunas da matriz: '))
print('--Digite os valores separados por um espaço--')
print('Digite os valores de uma matriz {} x {}'.format(linhas, colunas))
matrix = [(input('Linha {}: '.format(i + 1)).split()) for i in range(linhas)]
# matrix = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
def transposta(matrix):
print('\n---Matriz introduzida---')
represent(matrix)
# convert(matrix)
# new_matrix = [[0 for elem in linha] for linha in matrix]
new_matrix = [[0 for i in range(linhas)] for i in range(colunas)]
for i in range(len(matrix)):
for j in range(len(matrix[i])):
new_matrix[j][i] = (matrix[i][j])
# print(new_matrix)
print('\n---Matriz transposta---')
represent(new_matrix)
def represent(matrix):
for row in matrix:
for elem in row:
print(elem, end=' ')
print()
transposta(matrix)
| true |
6362748a4d98a2165fc88c76c49122a48e0a3d6c | Python | QuentinTedeschi/HeadMovement | /Dataload.py | UTF-8 | 271 | 3.015625 | 3 | [] | no_license | import numpy as np
d = np.load('test.npy')
print(d)
def differential (data):
diff = []
prev = data[1]
for second in data[1:]:
deriv = second - prev
prev = second
diff.append(deriv)
return diff
diff = differential(d)
print(diff) | true |
6c60ad63f4c82f5f147d6df3d4ccf5b3df6c987e | Python | AP-MI-2021/lab-1-PrigoanaClaudiu | /main.py | UTF-8 | 1,137 | 4.25 | 4 | [] | no_license | '''
Returneaza true daca n este prim si false daca nu.
'''
def is_prime(n):
'''
Returneaza true daca n este prim si false daca nu este prim.
:param n: n, ok, i
:return: true sau false
'''
if n < 2:
return False
else:
ok=True
for i in range (2,n//2+1):
if n%i == 0:
ok=False
if ok:
return True
else:
return False
'''
Returneaza produsul numerelor din lista lst.
'''
def get_product(lst):
y=1
x=len(lst)
for i in range(x):
y=lst[i]*y
return y
'''
Returneaza CMMDC a doua numere x si y folosind primul algoritm.
'''
def get_cmmdc_v1(x, y):
r=x%y
while r>0 :
x=y
y=r
r=x%y
return y
'''
Returneaza CMMDC a doua numere x si y folosind al doilea algoritm.
'''
def get_cmmdc_v2(x, y):
while x!=y :
if x>y :
x=x-y
else :
y=y-x
return x
def main():
n = int(input("Dati numarul:"))
print(is_prime(n))
lst = [2, 5, 8, 2, 3, 9]
print(get_product(lst))
x=int(input("Dati primul numar:"))
y=int(input("Dati al doilea numar:"))
print(get_cmmdc_v1(x,y))
print(get_cmmdc_v2(x,y))
if __name__ == '__main__':
main() | true |
b8fdb123a04cd338ff98efc0b360327f33b552ca | Python | vfabrics/DeepStock | /word2vec.py | UTF-8 | 1,117 | 2.703125 | 3 | [] | no_license | #-*- coding: utf-8 -*-
from krwordrank.word import KRWordRank
from krwordrank.hangle import normalize
import pickle
def load_data(filename):
data_list = []
try:
with open(filename, 'rb') as fp:
data = pickle.load(fp)
data_list.append(data)
print("read : %s"%(filename))
return data_list[0]
except:
print("error : %s"%(filename))
def get_data_set(y, m, d):
for year in range(2017, y, -1):
for month in range(6, m, -1):
for day in range(31, d, -1):
contents_list = []
hashValue_list = []
contents_filename = "contents/%d-%d-%d.pickle"%(year, month, day)
hashValue_filename = "hashValue/%d-%d-%d_label.pickle"%(year, month, day)
contents_list.append(load_data(contents_filename))
hashValue_list.append(load_data(hashValue_filename))
return contents_list, hashValue_list
def get_texts_scores(docs):
docs = [doc for doc in docs if len(doc) == 2]
return docs
texts_list, labels_list = get_data_set(2016, 5, 10) | true |
7274fa848b8a6c3289d79a324d9f54e35ba56625 | Python | amc6630/MechWarfare | /MechUI/displayTerminal.py | UTF-8 | 1,932 | 3.40625 | 3 | [] | no_license | #! /usr/bin/python
#This will define a class that implements
# a surface that acts like a terminal, with a maximum number of lines present.
# in pygame.
import pygame
class SurfaceTerminal:
def __init__(self, in_pos, in_color=(255,255,255),in_size=20,in_font_size=20):
#initialize members
self.display_position = in_pos
self.display_list = []
self.display_size = 0
self.max_line_width = 0
self.display_max_size = in_size
self.default_color = in_color
self.font_size = in_font_size
self.disp_font = pygame.font.Font(None,self.font_size)
def blitTerminal(self, in_backing):
temp_pos = self.display_position
for thing in self.display_list:
surf = self.disp_font.render(thing,0,self.default_color)
write_rect = surf.get_rect()
write_rect.top = temp_pos[0]
write_rect.left = temp_pos[1]
in_backing.blit(surf,write_rect)
#update the position
temp_pos = (temp_pos[0]+self.disp_font.get_linesize(),temp_pos[1])
def setPos(self,in_pos):
self.display_position = in_pos
def getBoundingRect(self):
return pygame.Rect(self.display_position,(self.max_line_width,self.display_size*self.disp_font.get_linesize()))
def clearTerminal(self):
self.display_list.clear()
sefl.display_size = 0
def appendLine(self, in_line):
if(self.display_size >= self.display_max_size):
self.display_list.pop(0)
self.display_list.append(in_line)
else:
self.display_size += 1
self.display_list.append(in_line)
#update the max width.
if (self.disp_font.size(in_line) > self.max_line_width):
self.max_line_width = self.disp_font.size(in_line)[0]
#def setLineLimit(in_max):
| true |
b9fc9f1efb15677a81411a1f3942f85d95366bc9 | Python | Almondo4/ML-IDS-Framework | /Static/AdaBoost.py | UTF-8 | 1,879 | 2.546875 | 3 | [] | no_license | import pandas as pd
import numpy as np
DataTrain = pd.read_pickle("../Data/Static_Training.csv")
DataTest = pd.read_pickle("../Data/Static_Testing.csv")
featureMatrixTR = DataTrain.iloc[:,:-1].values
labelVectorTR = DataTrain.iloc[:,-1].values
featureMatrix = DataTest.iloc[:,:-1].values
labelVector = DataTest.iloc[:,-1].values
# # 3 Scaling the dataSet
# from sklearn.preprocessing import StandardScaler
# sc = StandardScaler()
# featureMatrixTR = sc.fit_transform(featureMatrixTR)
# featureMatrix = sc.fit_transform(featureMatrix)
# from tensorflow.keras.utils import to_categorical
# labelVector = to_categorical(labelVector)
from sklearn.preprocessing import LabelEncoder
labelencoder = LabelEncoder()
labelVectorTR = labelencoder.fit_transform(labelVectorTR)
labelVector = labelencoder.fit_transform(labelVector)
# # Feature Extraction
# from sklearn.feature_selection import SelectKBest
# from sklearn.feature_selection import f_classif
#
# selector = SelectKBest(f_classif, k=100)
# selected_features = selector.fit_transform(featureMatrix, labelVector)
#
# print((-selector.scores_).argsort()[:])
from sklearn.ensemble import AdaBoostClassifier
from sklearn.metrics import confusion_matrix as CM
clf = AdaBoostClassifier(n_estimators=150,algorithm="SAMME.R",)
clf.fit(featureMatrixTR,labelVectorTR)
y_pred2 = clf.predict(featureMatrix)
print("Performance:",sum(y_pred2==labelVector)/len(labelVector))
print("Confusion Matrix:\n",CM(labelVector,y_pred2))
## TESTING
# predictions = cb.predict(featureMatrix)
from sklearn.metrics import classification_report,confusion_matrix
from sklearn.metrics import roc_auc_score
# XGB_predictions_Classes =model.predict_classes(test)
#
cm = confusion_matrix(labelVector, y_pred2)
print(classification_report(labelVector, y_pred2,digits=4))
auc = roc_auc_score(labelVector, y_pred2)
print('ROC AUC: %f' % auc)
| true |
e28f7a520a4182cf002ec552a85659bf4b6714d4 | Python | Aasthaengg/IBMdataset | /Python_codes/p00003/s534893219.py | UTF-8 | 241 | 3.4375 | 3 | [] | no_license | import sys
n = int(sys.stdin.readline())
for i in range(n):
sides = [int(k) for k in sys.stdin.readline().split()]
sides.sort()
if sides[0] ** 2 + sides[1] ** 2 == sides[2] ** 2:
print("YES")
else:
print("NO") | true |
f713a29342f212e5cfeee2d462f325924e93cad7 | Python | Matheuspds/Prediction_Volume | /predicao-com-serie/Ajustando-Dados-Reais-Teste.py | UTF-8 | 918 | 2.515625 | 3 | [] | no_license |
# coding: utf-8
# In[52]:
import pandas as pd
import numpy as np
from datetime import time
path = "../dataset/"
freq = "20min"
# In[54]:
df_test = pd.read_csv("resultado_real_teste.csv", parse_dates=['time'])
df_test = df_test.rename(columns = {'date_time':'time','tollgate':'tollgate_id','is_etc':'has_etc','veh_type':'vehicle_type','model':'vehicle_model'})
df_test.head()
# In[55]:
del df_test['time_window']
# In[57]:
df_test['time_window'] = df_test['time']
# In[59]:
del df_test['time']
# In[63]:
df_test.head()
df_test.to_csv('result/resultado_real.csv', index=False)
# In[49]:
def df_filter(df_volume):
df_volume["time"] = df_volume["time_start"]
df_volume["time_window"] = df_volume["time"]
df_volume = df_volume[["tollgate_id", "time_window", "direction", "volume"]]
return df_volume
# In[50]:
df_test_final = df_filter(df_test)
# In[51]:
df_test_final
| true |
64e428507c5017c260ea9520802173f92d472bfa | Python | 01090841589/solved_problem | /D2/D2 20190806회문.py | UTF-8 | 678 | 3.140625 | 3 | [] | no_license | T = int(input())
for test_case in range(1, T+1):
NM = list(map(int,input().split()))
words = []
for a in range(NM[0]):
words.append(input())
for i in range(NM[0]):
word_row = ''
for j in range(NM[0]):
word_row += words[j][i]
words.append(word_row)
for word in words:
for i in range(len(word)):
for j in range(i, len(word)):
if word[i] == word[j] and i != j:
comp_word = word[i:j+1]
if comp_word == comp_word[::-1]:
if len(comp_word) == NM[1]:
print('#{} {}'.format(test_case,comp_word)) | true |
6eda7614bcd3fc123263dc74551757df6d78aee3 | Python | sherafatian-amirabbas/DistributedSystem | /Task1/Source/DHTServer.py | UTF-8 | 9,733 | 2.71875 | 3 | [] | no_license | from DHTMessage import DHTMessage, DHTMessageType
from DHTSocket import DHTSocketAddress, DHTSocket
import time
class DHTServer():
def __init__(self, host = None, port = None, onRequestHandler = None):
self.Host = host
self.Port = port
self.OnRequestHandler = onRequestHandler
self.DHTNodeDataValue = None # will keep the node data (DHTNodeDataValue class)
# list of Data the node knows about (List of DHTNodeDataValue classes)
self.DHTNodeDataValueList = None
self.PrevDHTServerSocketAddress = None # A reference to previous DHTSocketAddress class
self.SuccessorDHTServerSocketAddress = None # the Successor as a DHTSocketAddress class
self.NextSuccessorDHTServerSocketAddress = None # the next successor as a DHTSocketAddress class
# list of shortcuts (List of None Classes)
self.FingerDHTServerSocketAddresses = []
self.__dhtSocket = None
self.__worker = None
self.__interval = 1000
self.__timeout = 2000
def IsDataValueIncluded(self, dataValue):
result = False
for value in self.DHTNodeDataValueList:
if(value == dataValue):
result = True
break
return result
def Initialize(self, dhtSocket, worker):
self.__dhtSocket = dhtSocket;
self.__worker = worker;
def GetDHTSocketAddress(self):
return DHTSocketAddress(self.Host, self.Port)
def Upload(self, dhtNodeDataValue, dhtNodeDataValueList, prevDHTServerSocketAddress, successorDHTServerSocketAddress, nextSuccessorDHTServerSocketAddress, fingerDHTServerSocketAddresses):
self.DHTNodeDataValue = dhtNodeDataValue
self.DHTNodeDataValueList = dhtNodeDataValueList
self.PrevDHTServerSocketAddress = prevDHTServerSocketAddress
self.SuccessorDHTServerSocketAddress = successorDHTServerSocketAddress
self.NextSuccessorDHTServerSocketAddress = nextSuccessorDHTServerSocketAddress
self.FingerDHTServerSocketAddresses = fingerDHTServerSocketAddresses
def OnRequest(self, dataBytes):
dataStr = dataBytes.decode("utf-8")
msg = DHTMessage.deserialize(dataStr)
if self.OnRequestHandler != None:
self.OnRequestHandler(msg)
return str(self.handleMessage(msg))
# ----------------------------------------------------------------------------------- private methods
def updateReferences(self, param):
# TODO: references to Successor/NextSuccessor/PrevDHTServerSocketAddress/finger
pass
def checkSuccessorInInterval(self):
# TODO: Open the socket to ping and wait for the response
# TODO: if it's not getting back in timeout duration, "updateReferences" (with param) is called
#try:
# TODO: DHTSocket.OpenASocketAndSendTheRequestWithTimeout(self.__timeout)
#except timeoutexception:
# self.updateReferences()
pass
def onTimeStart(self):
checkSuccessorInInterval()
pass
def setTheTimer(self, onTimeStart):
# TODO: defining the timer and using "interval"
#new Timer(self.__interval)
#{
# OnStart += () => {
# onTimeStart()
# }
#}
pass
def getInfoDataAsString(self):
return self.DHTNodeDataValue.ToString()
def getValueFromTheCurrentRequest(self, socketAddresses_tuple, socketAddress):
val = ''
for address_tuple in socketAddresses_tuple:
if address_tuple[0].IsEqualTo(socketAddress):
val += str(address_tuple[1])
break
return val
def GetShortcutsDataValueAsString(self, socketAddresses_tuple):
shortcuts = ''
if self.FingerDHTServerSocketAddresses:
for fingerSocketAddress in self.FingerDHTServerSocketAddresses:
res = ''
res += self.getValueFromTheCurrentRequest(socketAddresses_tuple, fingerSocketAddress)
if res == None or res == '':
shortcuts += DHTSocket.OpenASocketAndSendTheRequest(fingerSocketAddress.Host, fingerSocketAddress.Port, DHTMessage(DHTMessageType.DataAsString)) + ','
else:
shortcuts += res + ','
if shortcuts == '':
shortcuts += ','
return shortcuts
def GetSuccessorDataValueAsString(self, socketAddresses_tuple):
successorDataAsString = ""
successorDataAsString += self.getValueFromTheCurrentRequest(socketAddresses_tuple, self.SuccessorDHTServerSocketAddress)
if successorDataAsString == "":
successorDataAsString = DHTSocket.OpenASocketAndSendTheRequest(self.SuccessorDHTServerSocketAddress.Host, self.SuccessorDHTServerSocketAddress.Port, DHTMessage(DHTMessageType.DataAsString))
return successorDataAsString
def GetNextSuccessorDataValueAsString(self, socketAddresses_tuple):
nextSuccessorDataAsString = ""
nextSuccessorDataAsString += self.getValueFromTheCurrentRequest(socketAddresses_tuple, self.NextSuccessorDHTServerSocketAddress)
if nextSuccessorDataAsString == "":
nextSuccessorDataAsString = DHTSocket.OpenASocketAndSendTheRequest(self.NextSuccessorDHTServerSocketAddress.Host, self.NextSuccessorDHTServerSocketAddress.Port, DHTMessage(DHTMessageType.DataAsString))
return nextSuccessorDataAsString
def DataValueAsStringCommandHandler(self, dhtMessage):
return self.getInfoDataAsString()
# ---------------------------------------------------------------------------------------- Commands
def handleMessage(self, dhtMessage):
result = None
if dhtMessage.Type == DHTMessageType.Ping:
result = self.Ping(dhtMessage)
if dhtMessage.Type == DHTMessageType.Lookup:
result = self.Lookup(dhtMessage)
if dhtMessage.Type == DHTMessageType.Join:
result = self.Join(dhtMessage)
if dhtMessage.Type == DHTMessageType.List:
result = self.ListCommandHandler(dhtMessage)
if dhtMessage.Type == DHTMessageType.DataAsString:
result = self.DataValueAsStringCommandHandler(dhtMessage)
if dhtMessage.Type == DHTMessageType.Shortcut:
result = self.Shortcut(dhtMessage)
return result
def Ping(self, dhtMessage):
return "hey.. I'm alive.."
def Lookup(self, dhtMessage):
pass
def Join(self, dhtMessage):
addressofJoinedNode = DHTSocketAddress(dhtMessage.Argument, int(dhtMessage.Tag))
datavalueofJoinedNode=int(dhtMessage.Nodevalue)
serverDataAsString = self.getInfoDataAsString() #server node value as string
serverAddress = self.GetDHTSocketAddress() # host and port number
if dhtMessage.OriginDHTSocketAddress.IsEqualTo(serverAddress):
dhtMessage.OriginArgument = serverDataAsString
dhtMessage.Tag = serverAddress.Host + "|" + str(serverAddress.Port) + "|" + serverDataAsString + "_"
else:
dhtMessage.Tag += serverAddress.Host + "|" + str(serverAddress.Port) + "|" + serverDataAsString + "_"
def Shortcut(self, dhtMessage):
result = ""
address = DHTSocketAddress(dhtMessage.Argument, int(dhtMessage.Tag))
isExisted = False
for add in self.FingerDHTServerSocketAddresses:
if add.IsEqualTo(address):
isExisted = True
break
if isExisted:
return "the node has already listed as shortcut, try another one..."
else:
self.FingerDHTServerSocketAddresses.append(address)
return "shortcut is established now"
def ListCommandHandler(self, dhtMessage):
serverDataAsString = self.getInfoDataAsString()
serverAddress = self.GetDHTSocketAddress()
if dhtMessage.OriginDHTSocketAddress.IsEqualTo(serverAddress):
dhtMessage.OriginArgument = serverDataAsString
dhtMessage.Tag = serverAddress.Host + "|" + str(serverAddress.Port) + "|" + serverDataAsString
else:
dhtMessage.Tag += "_" + serverAddress.Host + "|" + str(serverAddress.Port) + "|" + serverDataAsString
socketAddresses_tuple = []
traversedServers = dhtMessage.Tag.split("_")
for ts in traversedServers:
inf = ts.split("|")
socketAddresses_tuple.append((DHTSocketAddress(inf[0], int(inf[1])), int(inf[2])))
shortcuts = self.GetShortcutsDataValueAsString(socketAddresses_tuple)
successorDataAsString = self.GetSuccessorDataValueAsString(socketAddresses_tuple)
nextSuccessorDataAsString = self.GetNextSuccessorDataValueAsString(socketAddresses_tuple)
dataAsStr = '%s:%s S-%s, NS-%s' % (serverDataAsString, shortcuts, successorDataAsString, nextSuccessorDataAsString) + "\n"
isTheLastOne = dhtMessage.OriginDHTSocketAddress.IsEqualTo(self.SuccessorDHTServerSocketAddress)
if isTheLastOne == False:
dataAsStr += DHTSocket.OpenASocketAndSendTheRequest(self.SuccessorDHTServerSocketAddress.Host, self.SuccessorDHTServerSocketAddress.Port, dhtMessage)
return dataAsStr
# ---------------------------------------------------------------------------------------- Commands
def GetKey(host, port, dataValue):
return host + ":" + str(port) + ":" + str(dataValue)
DHTServer.GetKey = staticmethod(DHTServer.GetKey) | true |
0e587143ec8cb5deca99b75408c104fdeb2656dd | Python | Dhrumil-Zion/Competitive-Programming-Basics | /LeetCode/Arrays/Minimum Operations_to_Make_the_Array_Increasing.py | UTF-8 | 244 | 2.515625 | 3 | [] | no_license | class Solution:
def minOperations(self, nums: List[int]) -> int:
pre,ans=0,0
for i in nums:
if pre < i:
pre=i
else:
pre+=1
ans+=pre-i
return ans | true |
b344393f1ddda6fa6ee997886649d1f8863d56cf | Python | 1871023/stm32_multi_MPU6050 | /visualize/python_serial.py | UTF-8 | 3,397 | 2.734375 | 3 | [] | no_license | import settings
import serial
import re
ser = serial.Serial(settings.SERIAL_PORT, 250000)
print("connected to: " + ser.portstr)
line_temp = ""
def read_all_filtered_out():
global ser
line_temp = ""
attributes = []
data = {}
while len(attributes) < 6:
for line in ser.read():
# print(chr(line)+"("+str(line)+")",end='')
# print(chr(line),end='')
if line == 0xa :
temp_attributes = line_temp.split(":")
attributes = []
for attribute in temp_attributes:
for sub_attribute in attribute.split("#"):
attributes.append(sub_attribute)
if len(attributes) < 6 :
print(line_temp)
#clear if no collect char from a 1st char or not collect all data
line_temp = ""
break
if attributes[0] != "Index" and attributes[2] != "DEL" \
and attributes[4] != "FIL":
#clear if no collect char from a 1st char or not collect all data
line_temp = ""
break
pass
angle = attributes[5].split(',')
if len(angle) < 3 :
break
data = {
"result": 0,
"id": attributes[1],
"delta_time":attributes[3],
"angle_x":angle[0],
"angle_y":angle[1],
"angle_z":angle[2]
}
# print(attributes)
print(data)
print()
line_temp = ""
else :
line_temp+=chr(line)
return data
def terminate():
global ser
ser.close()
if __name__ == '__main__':
while True:
for line in ser.read():
# print(chr(line)+"("+str(line)+")",end='')
# print(chr(line),end='')
if line == 0xa :
temp_attributes = line_temp.split(":")
attributes = []
for attribute in temp_attributes:
for sub_attribute in attribute.split("#"):
attributes.append(sub_attribute)
if len(attributes) < 6 :
print(line_temp)
#clear if no collect char from a 1st char or not collect all data
line_temp = ""
break
if attributes[0] != "Index" and attributes[2] != "DEL" \
and attributes[4] != "FIL":
#clear if no collect char from a 1st char or not collect all data
line_temp = ""
break
pass
angle = attributes[5].split(',')
if len(angle) < 3 :
break
data = {
"result": 0,
"id": attributes[1],
"delta_time":attributes[3],
"angle_x":angle[0],
"angle_y":angle[1],
"angle_z":angle[2]
}
# print(attributes)
print(data)
print()
line_temp = ""
else :
line_temp+=chr(line)
ser.close() | true |
a58add5009cf0a389e2faae975de552202a74a96 | Python | robertonscjr/failed-trading-bot | /stuff/hermes/testing/improved-pacman/caesar/__init__.py | UTF-8 | 7,274 | 2.59375 | 3 | [
"Apache-2.0"
] | permissive | from poloniex import Poloniex
from poloniex import PoloniexError
from strategy.pacman import Pacman
from utils import conf
from utils import poloniex
from utils.logger import Log, configure_logging
import time
import os
class Caesar(object):
def __init__(self, client):
caesar_log = Log("caesar", "logs/caesar.log")
debug_log = Log("debug", "logs/debug.log")
configure_logging()
running_pairs = {}
minimums = {}
balance_ratio = 5
balance_percentage = 5
while(True):
# Sleep, sweetie, sleep...
# time.sleep(5)
# Handle Poloniex errors
try:
ticker = poloniex.get_ticker(client)
my_orders = poloniex.get_open_orders(client)
order_book = poloniex.get_order_book(client)
balances = poloniex.get_balances(client)
except PoloniexError as e:
caesar_log.log("(%s) - Exception: %s : %s" % ('CAESARBOT', e,
self._time()))
continue
# Operation loop to each pair
for pair in ticker:
# STEP 1: CHECK IF PAIR IS NOT RUNNING
is_running = (pair in running_pairs)
if is_running:
if not running_pairs[pair].isAlive():
del running_pairs[pair]
continue
# STEP 2: UPDATE PARAMETERS AND SPECIAL CONDITIONS
# Parameters
first_coin = pair.split("_")[0]
second_coin = pair.split("_")[1]
lowest_ask = poloniex.lowest_ask(ticker, pair)
highest_bid = poloniex.highest_bid(ticker, pair)
first_balance = poloniex.get_balance(balances, first_coin)
second_balance = poloniex.get_balance(balances, second_coin)
open_orders = my_orders[pair]
balance_avaliable = balance_ratio - len(running_pairs)
# balance = ((first_balance * balance_percentage)
# / balance_avaliable)
# Special conditions
have_cases_enough = highest_bid * 1000 > 1.0
exist_open_orders = len(open_orders) > 0
small_amount = second_balance * highest_bid <= 0.0001
possible_error = (exist_open_orders or second_balance != 0.0)
# SPECIAL CONDITION 1: Check if have cases enough
if not have_cases_enough or first_coin != 'BTC':
continue
# STEP 3: CHECK IF EXISTS ERROR
if possible_error:
# Cancel all existing orders
if exist_open_orders:
poloniex.cancel_orders(client, open_orders)
# STEP 3.1: COME BACK TO THE PAIR
if second_balance != 0.0 and not small_amount:
caesar_log.log("(%s) - Possible error, back to pair: %s" %
(pair, self._time()))
run = Pacman(client, pair, 0.00011, True, caesar_log)
running_pairs[pair] = run
run.start()
else:
# STEP 4: CHECK IF CAN ENTER IN PAIR
if float(first_balance) >= 0.00011:
chart_data = client.returnChartData(pair, 300)
ma_1 = poloniex.ema(chart_data, 3)
ma_2 = poloniex.sma(chart_data, 7)
ma_3 = poloniex.ema(chart_data, 21)
ma = (ma_1 > ma_2 and ma_1 > ma_3)
# lo_ma = poloniex.ema(chart_data, 7)
# hi_ma = poloniex.ema(chart_data, 21)
# candle_open, candle_close = poloniex.candle_info(
# client, pair)
# 1st CONDITION: Hole
hole = (lowest_ask/highest_bid - 1) * 100
# 3rd CONDITION: Close greater than open
# candle_ratio = (candle_close / candle_open - 1.0) * 100.0
# candle_ratio = (poloniex.last(ticker, pair) / candle_open - 1.0) * 100.0
# 4rt CONDITION: Order amount is greater than balance
valid_balance = (poloniex.get_first_order_amount(
order_book, pair, 'asks') >= 0.00011)
# debug_log.log("(%s) - Candle ratio: %.2f / Hole: %.2f : %s" % \
# (pair,
# candle_ratio,
# hole,
# self._time()))
debug_log.log("(%s) - EMA(3): %.8f / SMA(7): %.8f / EMA(21): %.8f / Hole: %.2f: %s" % \
(pair,
ma_1,
ma_2,
ma_3,
hole,
self._time()))
# STEP 5: VERIFY CONDITIONS AND, IF YES, ENTER IN PAIR
if (hole < 0.2 and ma and valid_balance):
#caesar_log.log("(%s) - Candle ratio: %.2f / Hole: %.2f : %s" % \
# (pair,
# candle_ratio,
# hole,
# self._time()))
caesar_log.log("(%s) - EMA(3): %.8f / SMA(7): %.8f / EMA(21): %.8f : %s" % \
(pair,
ma_1,
ma_2,
ma_3,
self._time()))
debug_log.log("(%s) - Enter in pair: %s" % (pair,
self._time()))
caesar_log.log("(%s) - Enter in pair: %s" % (pair,
self._time()))
run = Pacman(client, pair, 0.00011, False, caesar_log)
running_pairs[pair] = run
run.start()
def _time(self):
return time.asctime(time.localtime(time.time()))
if __name__ == "__main__":
client = Poloniex(conf.api_key, conf.secret)
try:
caesar = Caesar(client)
except KeyboardInterrupt:
os._exit(1)
| true |
7436f15ae784cd9e8e1197f4805696d3ee1bdbd7 | Python | emilmueller/Spider | /card.py | UTF-8 | 2,764 | 3.234375 | 3 | [] | no_license | from tkinter import *
from PIL import Image, ImageTk
import random
class Card:
suitChars = ['H','D','C','S']
rankChars = [None,'A','2','3','4','5','6','7','8','9','T','J','Q','K']
posX=-1
posY=-1
suit = -1
rank = -1
showCard = False
image = None
backImage = None
imageWidth = 110 #Pixels
imageHeight = 0
@staticmethod
def makeFullStack(num, shuffle=True):
res = []
for n in range(num):
for s in range(4):
for r in range(1,14):
c = Card(s,r)
res.append(c)
if shuffle:
random.shuffle(res)
return res
@staticmethod
def getCardWidth() -> int:
return int(Card.imageWidth)
@staticmethod
def getCardHeight() -> int:
tempC = Card(0,1)
return int(tempC.getImageHeight())
def __init__(self,suit,rank):
self.suit = suit
self.rank = rank
self.showCard = False
fn = self.getRankChar()+""+self.getSuitChar()
self.image = Image.open("cardsSVG\\"+fn+".png")
imageSize = self.imageWidth/self.image.size[0]
self.image=self.image.resize((int(self.image.size[0]*imageSize),int(self.image.size[1]*imageSize)),Image.ANTIALIAS)
self.imageHeight = self.image.size[1]
self.backImage = Image.open("cardsSVG\\1B.png")
self.backImage=self.backImage.resize((int(self.backImage.size[0]*imageSize),int(self.backImage.size[1]*imageSize)),Image.ANTIALIAS)
def getSuit(self):
return self.suit
def getSuitChar(self):
return self.suitChars[self.suit]
def getRank(self):
return self.rank
def getRankChar(self):
return self.rankChars[self.rank]
def __str__(self):
res = self.getRankChar()+""+self.getSuitChar()
if self.showCard:
res = res +"*"
return res
def __repr__(self):
res = self.getRankChar() + "" + self.getSuitChar()
if self.showCard:
res = res + "*"
return res
def turn(self):
self.showCard = not self.showCard
def isShowCard(self):
return self.showCard
def moveTo(self,x,y):
self.posX=x
self.posY=y
def getImg(self):
if self.showCard:
return ImageTk.PhotoImage(self.image)
else:
return ImageTk.PhotoImage(self.backImage)
def getImageWidth(self):
return self.imageWidth
def getImageHeight(self):
return self.imageHeight
def suits(self,c):
if self.getSuit()==c.getSuit():
return True
else:
return False
def getPosition(self):
return (self.posX, self.posY) | true |
9713faab8938e5256b37ea32e5ec826dd8d7afbf | Python | 1009652/project3-4 | /dispenser-receipt.py | UTF-8 | 2,674 | 3 | 3 | [] | no_license | import serial
import time
from datetime import datetime
from datetime import date
arduino = serial.Serial(port='COM8', baudrate=9600, timeout=1)
def printReceipt(currentTransaction, currentAccount, currentCard, currentAmount):
sendChoice(2)
#Get time
now = datetime.now()
currentTime = now.strftime("%H:%M")
#Get date
today = date.today()
currentDay = today.strftime("%d-%m-%Y")
temp = 0
while True:
readVal = arduino.readline()[:-2]
data = str(readVal, 'UTF-8')
if data == "" or int(data) <= int(temp):
data = temp
else:
temp = data
data = int(data)
print("data-bon: ", data)
if(data == 0):
output = "d," + currentDay
writeOut(output)
elif (data == 1):
output = "t," + currentTime
writeOut(output)
elif (data == 2):
output = "o," + str(currentTransaction)
writeOut(output)
elif (data == 3):
output = "a," + str(currentAccount)
writeOut(output)
elif (data == 4):
output = "c," + str(currentCard)
writeOut(output)
elif (data == 5):
output = "h," + str(currentAmount)
writeOut(output)
elif (data == 6):
break;
def sentNotes(notes50, notes20, notes10):
sendChoice(1)
temp = 0
while True:
readVal = arduino.readline()[:-2]
data = str(readVal, 'UTF-8')
#print(data)
if data == "" or int(data) <= int(temp):
data = temp
else:
temp = data
data = int(data)
print("data: ", data)
if data == 0:
output = "f," + str(notes50)
writeOut(output)
elif data == 1:
output = "s," + str(notes20)
writeOut(output)
elif data == 2:
output = "t," + str(notes10)
writeOut(output)
break
def writeOut(x):
print("x: ", x)
output = x
outputBytes = str.encode(output)
arduino.write(outputBytes)
time.sleep(1)
def sendChoice(choice):
temp = 9
if choice == 1:
output = 'm'
elif choice == 2:
output = 'r'
while(True):
readVal = arduino.readline()[:-2]
data = str(readVal, 'UTF-8')
print("data: ", data)
if data == "":
data = temp
else:
temp = data
data = int(data)
#print("data: ", data)
print("output: ", output)
writeOut(output)
if data != -1:
break
sentNotes(1, 0, 0)
printReceipt(2, 1306, 10, 220)
| true |
b7ea87bb76ec0d12c77a2384a702f84a815fd85d | Python | Xynoclafe/leetcode | /easy/mergeTwoSortedLists.py | UTF-8 | 1,221 | 3.40625 | 3 | [] | no_license | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def mergeTwoLists(self, l1: ListNode, l2: ListNode) -> ListNode:
node1 = l1
node2 = l2
if(node1 is None):
return node2
if(node2 is None):
return node1
if(node1.val < node2.val):
head = node1
node1 = node1.next
else:
head = node2
node2 = node2.next
node = head
while(True):
if(node1 == None and node2 == None):
break
if(node1 == None):
node.next = node2
node = node.next
node2 = node2.next
elif(node2 == None):
node.next = node1
node = node.next
node1 = node1.next
elif(node1.val < node2.val):
node.next = node1
node = node.next
node1 = node1.next
else:
node.next = node2
node = node.next
node2 = node2.next
return head
| true |
5425e480633f6c97432712386e47a29a04e6f90b | Python | HugoSilva177/StockData | /web_scraping/fundamentus/tests/test_dao/test_connect_db/test_da_conexao_mongo.py | UTF-8 | 629 | 2.59375 | 3 | [] | no_license | import pytest
from web_scraping.fundamentus.src.connect_db.DAConexaoMongo import DAConexaoMongo
class TestDAConexaoMongo:
@pytest.fixture(params=["info_empresa",
"cotacao_empresa",
"balanco_empresa"])
def da_conexao_mongo(self, request):
return DAConexaoMongo(nome_banco="test_fundamentus", nome_colecao=request.param)
def test_deve_retornar_conexao_com_colecao_no_mongo_db(self, da_conexao_mongo):
retorno_conexao = da_conexao_mongo.get_colecao_mongo()
assert str(type(retorno_conexao)) == "<class 'pymongo.collection.Collection'>"
| true |
b31517ba580abf092c5e44b7473f9e6a59e07280 | Python | AikenH/Aikens_programming_notes | /CODE_Leetcode/2.两数相加.py | UTF-8 | 946 | 3.484375 | 3 | [] | no_license | #
# @lc app=leetcode.cn id=2 lang=python
#
# [2] 两数相加
#
# @lc code=start
# Definition for singly-linked list.
# TODO: try to figure out why the memory use is too much
# TODO: find out out way to solve this problem
# TODO: Familiar with the idea and writing of recursion
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def addTwoNumbers(self, l1, l2):
"""
:type l1: ListNode
:type l2: ListNode
:rtype: ListNode
"""
return self.search(l1,l2,0)
def search(self,r,v,i):
if not r and not v and not i: return None
temp = (r.val if r else 0) + (v.val if v else 0) + i
node = ListNode(temp%10)
node.next = self.search(r.next if r else 0, v.next if v else 0, temp//10)
return node
# if __name__ == "__main__":
# print(Solution.addTwoNumbers(123,234))
# @lc code=end
| true |
8e76c298e35e953a8222e98b49a854923dd1684a | Python | hzwh6910/DNA_features | /ANF.py | UTF-8 | 465 | 2.703125 | 3 | [] | no_license |
def ANF(fastas, **kw):
AA = 'ACGT'
encodings = []
header = ['#', 'label']
for i in range(1, len(fastas[0][1]) + 1):
header.append('ANF.' + str(i))
encodings.append(header)
for i in fastas:
name, sequence, label = i[0], i[1], i[2]
code = [name, label]
for j in range(len(sequence)):
code.append(sequence[0: j + 1].count(sequence[j]) / (j + 1))
encodings.append(code)
return encodings | true |
5b1bac8f0301815dfdc22ca3af498ea52660f8c6 | Python | MichWozPol/Sleep_Disorders_Assessment | /tools/prepare_data_in_csv.py | UTF-8 | 2,129 | 3.265625 | 3 | [] | no_license | import csv
def read_csv():
votes = []
i = 15
last_id = 0
with open('C:\\Users\\micha\\Downloads\\1_wd.csv', 'r') as f:
reader = csv.reader(f)
for u_id, a, b in reader:
if int(u_id) > last_id:
last_id = int(u_id)
last_user_id_plus_1 = last_id + 1
for x in range(i, last_user_id_plus_1):
dict_15 = {14: "'trudnoĹ›ci w pracy'", 15: "'kłopoty finansowe'", 16: "'problemy rodzinne'", 17: "'problemy uczuciowe'",
18: "'smutek i poczucie niskiej wartości'", 19: "'problemy na uczelni'", 20: "'inne'"}
dict_17 = {22: "'regularne godziny wstawania'",
23: "'unikanie źródeł światła niebieskiego minimum godzinę przed spaniem'",
24: "'ostatnia kawa 8h przed pójściem spać '", 25: "'obniżenie temperatury w sypialni przed snem'",
26: "'wypicie szklanki wody przed snem'", 27: "'łóżko jedynie jako miejsce snu i aktywności seksualnej'"}
dict_24 = {34: "'chrapanie'", 35: "'mówienie przez sen'", 36: "'nocne kurcze mięśni'", 37: "'lunatykowanie'",
38: "'nie posiadam takiej osoby'"}
with open('C:\\Users\\micha\\Downloads\\1_wd.csv', 'r') as file:
reader = csv.reader(file)
buf_list = [x]
for user_id, question_id, answer in reader:
#print(f'{user_id}, {question_id}, {answer}')
if int(user_id) == x:
buf_list.append(f"'{answer}'")
is_in_buf_lis(dict_15, buf_list)
is_in_buf_lis(dict_17, buf_list)
is_in_buf_lis(dict_24, buf_list)
votes.append(buf_list)
with open('C:\\Users\\micha\\Downloads\\2_wd.csv', 'w', newline='') as file_out:
writer = csv.writer(file_out)
writer.writerows(votes)
def is_in_buf_lis(dict_n, buf_list):
for key in dict_n:
if dict_n[key] not in buf_list:
buf_list.insert(key, "?")
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
read_csv()
| true |
c18535113bd69391dea1b876e37e80da181f1144 | Python | maclermo/gameoflife | /main.py | UTF-8 | 4,884 | 3.296875 | 3 | [] | no_license | import sys
import pygame
import numpy as np
from random import choice
from math import floor
R = (255, 0, 0)
O = (255, 127, 0)
Y = (255, 255, 0)
G = (0, 255, 0)
B = (0, 0, 255)
I = (75, 0, 130)
V = (148, 0, 211)
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
class GameOfLife:
def __init__(self, size=15, fps=60, play=False):
self.size = size
self.fps = fps
self.play = play
self.grid = np.zeros((self.size, self.size), dtype=int)
self.buffer_grid = np.zeros((self.size, self.size), dtype=int)
def findNeighbours(self, x, y):
return (
self.buffer_grid[(x - 1) % self.size, y]
+ self.buffer_grid[(x + 1) % self.size, y]
+ self.buffer_grid[x, (y - 1) % self.size]
+ self.buffer_grid[x, (y + 1) % self.size]
+ self.buffer_grid[(x - 1) % self.size, (y - 1) % self.size]
+ self.buffer_grid[(x + 1) % self.size, (y - 1) % self.size]
+ self.buffer_grid[(x - 1) % self.size, (y + 1) % self.size]
+ self.buffer_grid[(x + 1) % self.size, (y + 1) % self.size]
)
def next(self):
if self.play:
self.buffer_grid = np.copy(self.grid)
for x in range(0, self.size):
for y in range(0, self.size):
self.ruleOne(x, y)
self.ruleTwo(x, y)
self.ruleThree(x, y)
self.ruleFour(x, y)
def ruleOne(self, x, y):
# Any live cell with fewer than two live neighbours dies, as if by underpopulation.
num_of_live_neighbours = self.findNeighbours(x, y)
if num_of_live_neighbours < 2 and self.grid[x, y]:
self.grid[x, y] = 0
def ruleTwo(self, x, y):
# Any live cell with two or three live neighbours lives on to the next generation.
num_of_live_neighbours = self.findNeighbours(x, y)
if (num_of_live_neighbours == 2 or num_of_live_neighbours == 3) and self.grid[x, y]:
self.grid[x, y] = 1
def ruleThree(self, x, y):
# Any live cell with more than three live neighbours dies, as if by overpopulation.
num_of_live_neighbours = self.findNeighbours(x, y)
if num_of_live_neighbours > 3 and self.grid[x, y]:
self.grid[x, y] = 0
def ruleFour(self, x, y):
# Any dead cell with exactly three live neighbours becomes a live cell, as if by reproduction.
num_of_live_neighbours = self.findNeighbours(x, y)
if num_of_live_neighbours == 3 and not self.grid[x, y]:
self.grid[x, y] = 1
def main():
gol = GameOfLife(size=30, fps=30)
pygame.init()
fpsClock = pygame.time.Clock()
screen = pygame.display.set_mode((gol.size ** 2, gol.size ** 2))
pygame.display.set_caption("Conway's Game Of Life - Press ENTER to start/pause simulation (stopped)")
game_mode = 0
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
if game_mode:
game_mode = 0
else:
game_mode = 1
if event.key == pygame.K_RETURN:
if gol.play:
gol.play = False
pygame.display.set_caption(
"Conway's Game Of Life - Press ENTER to start/pause simulation (stopped)"
)
else:
gol.play = True
pygame.display.set_caption(
"Conway's Game Of Life - Press ENTER to start/pause simulation (running)"
)
if event.type == pygame.MOUSEBUTTONUP:
pos_x, pos_y = pygame.mouse.get_pos()
x = floor(pos_x / gol.size)
y = floor(pos_y / gol.size)
if gol.grid[x, y]:
gol.grid[x, y] = 0
else:
gol.grid[x, y] = 1
gol.next()
for x in range(0, gol.size):
for y in range(0, gol.size):
if gol.grid[x, y]:
if game_mode:
pygame.draw.rect(
screen, choice([R, O, Y, G, B, I, V]), (x * gol.size, y * gol.size, gol.size, gol.size), 0
)
else:
pygame.draw.rect(screen, BLACK, (x * gol.size, y * gol.size, gol.size, gol.size), 0)
else:
pygame.draw.rect(screen, WHITE, (x * gol.size, y * gol.size, gol.size, gol.size), 0)
pygame.display.update()
fpsClock.tick(gol.fps)
if __name__ == "__main__":
main()
| true |
138127d53d6d351c88762117665be4c5f0bdd2c4 | Python | arbonap/interview-practice | /matrix_spiral.py | UTF-8 | 2,193 | 4.28125 | 4 | [] | no_license | def spiral_by_nested_boxes(matrix_size):
"""Spiral coordinates of a matrix of `matrix_size` size.
This version works by drawing TRBL boxes until the entire matrix
has been printed.
"""
# Loop and create square nested boxes
for box_number in range(0, matrix_size / 2):
top = left = box_number
bottom = right = matrix_size - box_number - 1
for x in range(left, right): # draw top line going >
print (x, top)
for y in range(top, bottom): # draw right line going \/
print (right, y)
for x in range(right, left, -1): # draw bottom line going <
print (x, bottom)
for y in range(bottom, top, -1): # draw left line going /\
print (left, y)
# Odd-width matrices: print center point manually
if matrix_size % 2 != 0:
print (matrix_size / 2, matrix_size / 2)
def spiral_by_points(matrix_size):
"""Spiral coordinates of a matrix of n size.
This version works by looping over all of the points, changing
directions at the corners.
"""
min_x = min_y = 0 # Full matrix starts at top left
max_x = max_y = matrix_size - 1 # ... and can go to bottom right
curr_x = curr_y = 0 # We begin at top left
vel_x, vel_y = +1, 0 # ... and are going east
# Loop over each point, print point then move to the next point
for i in range(matrix_size ** 2):
print (curr_x, curr_y)
curr_x += vel_x
curr_y += vel_y
# If going east and at edge, start going down & reduce box \/
if vel_x == +1 and curr_x == max_x:
vel_x, vel_y = 0, +1
min_y += 1
# If going south and at edge, start going west & reduce box <
elif vel_y == +1 and curr_y == max_y:
vel_x, vel_y = -1, 0
max_x -= 1
# If going west and at edge, start going up & reduce box /\
elif vel_x == -1 and curr_x == min_x:
vel_x, vel_y = 0, -1
max_y -= 1
# If going north and at edge, start going east & reduce box >
elif vel_y == -1 and curr_y == min_y:
vel_x, vel_y = +1, 0
min_x += 1
| true |
b131174585405ca17f70ff8e40894efcb7da76dd | Python | siva4646/Private-Projects | /Python_Course/PROJECTS/Weight Converter/weight_converter_program.py | UTF-8 | 230 | 4.25 | 4 | [] | no_license | weight = float(input("Weight: "))
system = str(input("(L)bs or (K)g: "))
system = system.lower()
if system == 'l':
print(f'You are {weight * 0.45} kilograms')
elif system == 'k':
print(f'You are {weight / 0.45} pounds')
| true |
8fe220790a9d20b0f2d2bbedd38d0938953037d3 | Python | stephencross/currentweather | /currentweather.py | UTF-8 | 3,421 | 2.6875 | 3 | [] | no_license | #!/usr/bin/python
import sys
import os
libdir = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), 'lib')
picdir = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), 'pic')
if os.path.exists(libdir):
sys.path.append(libdir)
import logging
from waveshare_epd import epd2in13b_V3
import time
from PIL import Image,ImageDraw,ImageFont
import traceback
import requests
from datetime import datetime
logging.basicConfig(level=logging.INFO)
logging.info("E-Ink weather API")
## Refresh rate in seconds
weather_refresh = 30 * 60;
# openweathermap API www.openweathermap.org
# Retrieves current weather by zipcode
baseurl = 'http://api.openweathermap.org/data/2.5/weather'
apikey = 'Your-apikey'
zip = '02809'
units = 'imperial'
apicall = baseurl + '?zip=' + zip + '&units=' + units + '&appid=' + apikey
def get_weather():
global desc
global temp
global feels_like
global low
global high
global degree
logging.info("Getting current weather.")
response = requests.get(apicall)
if response.status_code == 200:
try:
jsonResponse = response.json()
desc = jsonResponse["weather"][0]["description"]
temp = jsonResponse["main"]["temp"]
feels_like = jsonResponse["main"]["feels_like"]
low = jsonResponse["main"]["temp_min"]
high = jsonResponse["main"]["temp_max"]
degree = "\N{DEGREE SIGN}"
return True
except:
logging.debug("Weather JSON parsing error.")
return False
else:
logging.debug("Weather API call error.")
return False
try:
epd = epd2in13b_V3.EPD()
font20 = ImageFont.truetype(os.path.join(picdir, 'Font.ttc'), 20)
while True:
if get_weather():
now = datetime.now()
dt_string = now.strftime("%d/%m/%Y %H:%M:%S")
logging.info("Init and Clear " + dt_string)
epd.init()
epd.Clear()
# Diplay weather data
logging.info("Display weather data.")
HBlackimage = Image.new('1', (epd.height, epd.width), 255) # 298*126
HRYimage = Image.new('1', (epd.height, epd.width), 255) # 298*126 ryimage: red or yellow image
drawblack = ImageDraw.Draw(HBlackimage)
drawry = ImageDraw.Draw(HRYimage)
drawblack.text((10, 0), dt_string, font = font20, fill = 0)
drawblack.text((10, 20), 'Bristol', font = font20, fill = 0)
drawblack.text((10, 40), desc.capitalize(), font = font20, fill = 0)
drawblack.text((10, 60), str(temp) + degree + " Feels Like " + str(feels_like) + degree + " (" + str(low) + degree + "/" + str(high) + degree + ")", font = font20, fill = 0)
drawblack.text((10, 80), "(" + str(low) + degree + "/" + str(high) + degree + ")", font = font20, fill = 0)
epd.display(epd.getbuffer(HBlackimage), epd.getbuffer(HRYimage))
time.sleep(2)
logging.info("Goto Sleep...")
epd.sleep()
time.sleep(weather_refresh)
else:
raise KeyboardInterrupt
except IOError as e:
logging.info(e)
except KeyboardInterrupt:
logging.info("ctrl + c:")
epd2in13b_V3.epdconfig.module_exit()
exit() | true |
42f676d2131513b236201f74cf013a37894bd1ff | Python | jingchaoyy/IoT_HeatIsland | /multistep_lstm/run.py | UTF-8 | 15,705 | 2.625 | 3 | [] | no_license | """
Created on 8/27/20
@author: Jingchao Yang
"""
import matplotlib.pyplot as plt
import pandas as pd
import time
import math
from sklearn.metrics import mean_squared_error
from statistics import mean
import torch
from torch.utils.data import TensorDataset, DataLoader
from multistep_lstm import multistep_lstm_pytorch
from sklearn import preprocessing
import numpy as np
from numpy import isnan
import seaborn as sns
from sklearn.metrics import r2_score
from sklearn.metrics import mean_absolute_error
def fill_missing(values):
"""
fill missing values with a value at the same time one day ago
:param values:
:return:
"""
one_day = 24
for row in range(values.shape[0]):
for col in range(values.shape[1]):
if isnan(values[row, col]):
values[row, col] = values[row - one_day, col]
def min_max_scaler(df):
"""
:param df:
:return:
"""
min_max_scaler = preprocessing.MinMaxScaler()
df_np = df.values
df_np_scaled = min_max_scaler.fit_transform(df_np)
df_scaled = pd.DataFrame(df_np_scaled)
df_scaled.index = df.index
df_scaled.columns = df.columns
return df_scaled
'''pytorch'''
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
multi_variate_mode = True
load_model = True
'''data'''
geohash_df = pd.read_csv(r'D:\IoT_HeatIsland\exp_data_bak\merged\nodes_missing_5percent.csv',
usecols=['Geohash'])
iot_sensors = geohash_df.values.reshape(-1)
iot_df = pd.read_csv(r'D:\IoT_HeatIsland\exp_data_bak\merged\tempMatrix_LA_2019_20.csv',
usecols=['datetime'] + iot_sensors.tolist(), index_col=['datetime'])
ext_name = ['humidity', 'windSpeed']
# ext_name = ['humidity', 'windSpeed', 'dewPoint']
ext_data_scaled = []
if multi_variate_mode:
ext_data_path = r'D:\IoT_HeatIsland\exp_data_bak\WU_preprocessed_LA\processed\byAttributes'
for ext in ext_name:
ext_df = pd.read_csv(ext_data_path + f'\{ext}.csv', index_col=['datetime'])
while ext_df.isnull().values.any():
fill_missing(ext_df.values)
print(f'NaN value in {ext} df?', ext_df.isnull().values.any())
ext_data_scaled.append(min_max_scaler(ext_df))
iot_wu_match_df = pd.read_csv(r'D:\IoT_HeatIsland\exp_data_bak\merged\iot_wu_colocate.csv', index_col=0)
while iot_df.isnull().values.any():
fill_missing(iot_df.values)
print('NaN value in IoT df?', iot_df.isnull().values.any())
'''all stations'''
selected_vars = iot_sensors
dataset = iot_df
print('selected sensors', dataset.columns)
dataset = dataset.values
dataset[dataset < 0] = 0
print('size', dataset.shape)
# find max and min values for normalization
norm_min = dataset.min()
norm_max = dataset.max()
print('dataset min, max', norm_min, norm_max)
# normalize the data
dataset = (dataset - norm_min) / (norm_max - norm_min)
print('normalized dataset min, max', dataset.min(), dataset.max())
# separate train and test stations
train_stations = set(np.random.choice(selected_vars, int(len(selected_vars) * 0.7), replace=False))
test_stations = set(selected_vars) - train_stations
train_data_raw = iot_df[train_stations]
test_data_raw = iot_df[test_stations]
print(train_data_raw.shape)
print(test_data_raw.shape)
print(train_data_raw.columns)
train_window = 24
output_size = 1
if not multi_variate_mode:
train_data = multistep_lstm_pytorch.Dataset(train_data_raw,
(norm_min, norm_max),
train_window, output_size)
test_data = multistep_lstm_pytorch.Dataset(test_data_raw,
(norm_min, norm_max),
train_window,
output_size,
test_station=True)
else:
train_data = multistep_lstm_pytorch.Dataset_multivariate(train_data_raw,
(norm_min, norm_max),
train_window,
output_size,
ext_data_scaled,
ext_name,
iot_wu_match_df)
test_data = multistep_lstm_pytorch.Dataset_multivariate(test_data_raw,
(norm_min, norm_max),
train_window,
output_size,
ext_data_scaled,
ext_name,
iot_wu_match_df,
test_station=True)
print('Number of stations in training data: ', len(train_data))
print('Number of stations in testing data: ', len(test_data))
print("Training input and output for each station: %s, %s" % (train_data[0][0].shape, train_data[0][1].shape))
print("Validation input and output for each station: %s, %s" % (train_data[0][2].shape, train_data[0][3].shape))
print("Testing input and output for each station: %s, %s" % (test_data[0][0].shape, test_data[0][1].shape))
'''initialize the model'''
num_epochs = 15
epoch_interval = 1
# https://towardsdatascience.com/choosing-the-right-hyperparameters-for-a-simple-lstm-using-keras-f8e9ed76f046
# hidden_size = int((2/3)*(train_window*len(ext_data_scaled)+1))
hidden_size = 6
loss_func, model, optimizer = multistep_lstm_pytorch.initial_model(input_size=train_data[0][0].shape[-1],
hidden_size=hidden_size,
output_size=output_size,
learning_rate=0.001)
if not load_model:
train_loss, test_loss, mean_loss_train, mean_test_loss = [], [], [], []
min_val_loss, mean_min_val_loss = np.Inf, np.Inf
n_epochs_stop = 3
epochs_no_improve = 0
early_stop = False
start = time.time()
# train the model
for epoch in range(num_epochs):
running_loss_train = []
running_loss_val = []
loss2 = 0
for idx in range(len(train_data)):
train_loader = DataLoader(TensorDataset(train_data[idx][0][:, :, 0, :].to(device),
train_data[idx][1][:, :, 0, :].to(device)),
shuffle=True, batch_size=1000, drop_last=True)
val_loader = DataLoader(TensorDataset(train_data[idx][2][:, :, 0, :].to(device),
train_data[idx][3][:, :, 0, :].to(device)),
shuffle=True, batch_size=400, drop_last=True)
loss1 = multistep_lstm_pytorch.train_LSTM(train_loader, model, loss_func, optimizer,
epoch) # calculate train_loss
loss2 = multistep_lstm_pytorch.test_LSTM(val_loader, model, loss_func, optimizer, epoch) # calculate test_loss
running_loss_train.append(sum(loss1))
running_loss_val.append(sum(loss2))
train_loss.extend(loss1)
test_loss.extend(loss2)
if mean(loss2) < min_val_loss:
# Save the model
# torch.save(model)
epochs_no_improve = 0
min_val_loss = mean(loss2)
else:
epochs_no_improve += 1
if epoch > 5 and epochs_no_improve == n_epochs_stop:
print('Early stopping!')
early_stop = True
break
else:
continue
mean_loss_train.append(mean(running_loss_train))
mean_test_loss.append(mean(running_loss_val))
if epoch % epoch_interval == 0:
print(
"Epoch: %d, train_loss: %1.5f, val_loss: %1.5f" % (epoch, mean(running_loss_train), mean(running_loss_val)))
if mean(running_loss_val) < mean_min_val_loss:
mean_min_val_loss = mean(running_loss_val)
else:
print('Early stopping!')
early_stop = True
if early_stop:
print("Stopped")
break
end = time.time()
print(end - start)
print(model)
plt.plot(train_loss)
plt.plot(test_loss)
plt.show()
plt.plot(mean_loss_train)
plt.plot(mean_test_loss)
plt.show()
# save trained model
modelName = int(time.time())
torch.save(model.state_dict(), r'D:\1_GitHub\IoT_HeatIsland\multistep_lstm\saved_models'
f'\\ep{num_epochs}_neu{hidden_size}_pred{output_size}_{modelName}.pt')
print('model saved')
else:
model_path = r'E:\IoT_HeatIsland_Data\data\LA\exp_data\result_multi_point_prediction' \
r'\fillmiss_humidity_windSpeed_6neurons_epoch6_24_1\ep15_neu6_pred1_1600364827.pt'
model.load_state_dict(torch.load(model_path))
# Predict the training dataset of training stations and testing dataset of testing stations
train_pred_orig_dict = dict()
for idx in range(len(train_data)):
station = train_data.keys[idx]
with torch.no_grad():
train_pred = model(train_data[idx][0][:, :, 0, :].to(device))
train_pred_trans = train_pred * (norm_max - norm_min) + norm_min
train_orig = train_data[idx][1][:, :, 0, :].reshape(train_pred.shape).to(device)
train_orig_trans = train_orig * (norm_max - norm_min) + norm_min
train_pred_orig_dict[station] = (train_pred_trans, train_orig_trans)
test_pred_orig_dict = dict()
for idx in range(len(test_data)):
station = test_data.keys[idx]
with torch.no_grad():
test_pred = model(test_data[idx][0][:, :, 0, :].to(device))
test_pred_trans = test_pred * (norm_max - norm_min) + norm_min
test_orig = test_data[idx][1][:, :, 0, :].reshape(test_pred.shape).to(device)
test_orig_trans = test_orig * (norm_max - norm_min) + norm_min
test_pred_orig_dict[station] = (test_pred_trans, test_orig_trans)
print(list(test_pred_orig_dict.keys())[0])
# plot baseline and predictions
d = {'ori': test_pred_orig_dict[list(test_pred_orig_dict.keys())[0]][1][:, 0].data.tolist(),
'pred': test_pred_orig_dict[list(test_pred_orig_dict.keys())[0]][0][:, 0].data.tolist()}
pred_df = pd.DataFrame(data=d)
pred_df.to_csv(r'D:\1_GitHub\IoT_HeatIsland\multistep_lstm\saved_models\pred.csv')
pred_df.plot()
plt.xlabel('time (hour)')
plt.ylabel('temperature (F)')
plt.show()
# getting r2 score for mode evaluation
model_score = r2_score(pred_df.pred, pred_df.ori)
print("R^2 Score: ", model_score)
# calculate root mean squared error
trainScores_stations, trainScores_stations_mae = dict(), dict()
testScores_stations, testScores_stations_mae = dict(), dict()
# for key in train_data.keys:
# trainScores_stations[key] = math.sqrt(mean_squared_error(train_pred_orig_dict[key][0].data.tolist(),
# train_pred_orig_dict[key][1].data.tolist()))
# testScores_stations_mae[key] = mean_absolute_error(train_pred_orig_dict[key][0].data.tolist(),
# train_pred_orig_dict[key][1].data.tolist())
#
# for key in test_data.keys:
# testScores_stations[key] = math.sqrt(mean_squared_error(test_pred_orig_dict[key][0].data.tolist(),
# test_pred_orig_dict[key][1].data.tolist()))
# testScores_stations_mae[key] = mean_absolute_error(test_pred_orig_dict[key][0].data.tolist(),
# test_pred_orig_dict[key][1].data.tolist())
for key in train_data.keys:
trainScores_stations[key] = math.sqrt(mean_squared_error((train_pred_orig_dict[key][0].data.cpu().numpy() - 32) * 5.0/9.0,
(train_pred_orig_dict[key][1].data.cpu().numpy() - 32) * 5.0/9.0))
trainScores_stations_mae[key] = mean_absolute_error((train_pred_orig_dict[key][0].data.cpu().numpy() - 32) * 5.0/9.0,
(train_pred_orig_dict[key][1].data.cpu().numpy() - 32) * 5.0/9.0)
for key in test_data.keys:
testScores_stations[key] = math.sqrt(mean_squared_error((test_pred_orig_dict[key][0].data.cpu().numpy() - 32) * 5.0/9.0,
(test_pred_orig_dict[key][1].data.cpu().numpy() - 32) * 5.0/9.0))
testScores_stations_mae[key] = mean_absolute_error((test_pred_orig_dict[key][0].data.cpu().numpy() - 32) * 5.0/9.0,
(test_pred_orig_dict[key][1].data.cpu().numpy() - 32) * 5.0/9.0)
print('max train RMSE', max(trainScores_stations.values()))
print('min train RMSE', min(trainScores_stations.values()))
score_df = pd.DataFrame(trainScores_stations.values())
score_df.to_csv(r'D:\1_GitHub\IoT_HeatIsland\multistep_lstm\saved_models\trainScores.csv')
print('max test RMSE', max(testScores_stations.values()))
print('min test RMSE', min(testScores_stations.values()))
score_df = pd.DataFrame(testScores_stations.values())
score_df.to_csv(r'D:\1_GitHub\IoT_HeatIsland\multistep_lstm\saved_models\testScores.csv')
print('max train MAE', max(trainScores_stations_mae.values()))
print('min train MAE', min(trainScores_stations_mae.values()))
print('max test MAE', max(testScores_stations_mae.values()))
print('min test MAE', min(testScores_stations_mae.values()))
# using 3-sigma for selecting high loss stations
trainScores_stations_df = pd.DataFrame.from_dict(trainScores_stations, orient='index', columns=['value'])
sigma_3 = (3 * trainScores_stations_df.std() + trainScores_stations_df.mean()).values[0]
anomaly_iot_train = trainScores_stations_df.loc[trainScores_stations_df['value'] >= sigma_3]
print('High loss stations (train):', anomaly_iot_train)
testScores_stations_df = pd.DataFrame.from_dict(testScores_stations, orient='index', columns=['value'])
sigma_3 = (3 * testScores_stations_df.std() + testScores_stations_df.mean()).values[0]
anomaly_iot_test = testScores_stations_df.loc[testScores_stations_df['value'] >= sigma_3]
print('High loss stations (test):', anomaly_iot_test)
'''keras'''
# aggr_df = pd.read_csv('/Users/jc/Documents/GitHub/Fresh-Air-LA/data/aggr_la_aq_preprocessed.csv', index_col=False)
# print(aggr_df.head())
#
# vars = list(set(aggr_df.columns[1:]) - set(['datetime']))
#
# sensors = pd.read_csv('/Users/jc/Documents/GitHub/Fresh-Air-LA/data/sensors_la_preprocessed.csv',
# index_col=False, dtype=str)
# print(sensors.head())
#
# selected_vars = [var for var in vars if var.split('_')[1] == 'PM2.5']
# print(selected_vars)
#
# # plot the timeseries to have a general view
# selected_df = aggr_df[selected_vars]
# selected_df.index = aggr_df['datetime']
# if selected_df.shape[1] > 5:
# for i in range(0, selected_df.shape[1], 5):
# selected_df_plot = selected_df[selected_df.columns[i:(i+5)]]
# selected_df_plot.plot(subplots=True)
# plt.show()
# variable = '060371201_PM2.5'
# start = time.time()
#
# multistep_lstm_keras.encoder_decoder_LSTM_univariate(variable)
# # multistep_lstm_keras.encoder_decoder_LSTM_multivariate(variable)
#
# end = time.time()
# print(end - start)
| true |
3a83bc7ec7f791a405da5d2a3c4715b82bc74a09 | Python | haruyasu/improve_python | /interview_searching_and_sorting.py | UTF-8 | 1,220 | 4.09375 | 4 | [] | no_license | # Binary Search
def binary_search(arr, ele):
first = 0
last = len(arr) - 1
found = False
while first <= last and not found:
mid = int((first + last) / 2)
if arr[mid] == ele:
found = True
else:
if ele < arr[mid]:
last = mid - 1
else:
first = mid + 1
return found
arr = [1, 2, 3, 4, 5]
print(binary_search(arr, 6))
def rec_binary_search(arr, ele):
if len(arr) == 0:
return False
else:
mid = int(len(arr) / 2)
if arr[mid] == ele:
return True
else:
if ele < arr[mid]:
return rec_binary_search(arr[:mid], ele)
else:
return rec_binary_search(arr[mid + 1:], ele)
arr = [1, 2, 3, 4, 5]
# print(rec_binary_search(arr, 5))
####
# Bubble Sort
def bubble_sort(arr):
for n in range(len(arr) - 1, 0, -1):
print("This is the n:", n)
for k in range(n):
print("This is the k index check:", k)
if arr[k] > arr[k + 1]:
temp = arr[k]
arr[k] = arr[k + 1]
arr[k + 1] = temp
arr = [5, 3, 2, 1]
bubble_sort(arr)
print(arr) | true |
f8143b2d969846a28cef52a77a6d0b1cdd47c795 | Python | ramikowaluru/juggling-with-python | /linked_lists/find_mid_element_of_linked_list.py | UTF-8 | 1,894 | 3.953125 | 4 | [] | no_license | class Node:
def __init__(self, data):
self.data = data
self.next = None
class LinkedList:
def __init__(self):
self.head = None
self.num_of_nodes = 0
def insert_a_node(self, data):
node = Node(data)
if not self.head:
self.head = node
self.num_of_nodes += 1
else:
node.next = self.head
self.head = node
self.num_of_nodes += 1
def traverse(self):
s=""
current_node = self.head
while current_node:
s+=str(current_node.data)+"->"
current_node = current_node.next
print(s)
def find_middle_element_by_using_mid_positon(self):
mid = self.num_of_nodes//2
current_node = self.head
while mid>0:
mid -= 1
current_node = current_node.next
return current_node.data
def using_two_pointers(self):
actual_ref = self.head
secondary_ref = self.head
while secondary_ref and secondary_ref.next:
secondary_ref = secondary_ref.next.next
actual_ref = actual_ref.next
return actual_ref.data
def find_mid_element_by_tracking_odd_positions(self):
pos_counter = 1
current_node = self.head
mid =self.head
while current_node:
pos_counter += 1
current_node = current_node.next
if pos_counter%2==1:
mid = mid.next
return mid.data
if __name__ == '__main__':
ll = LinkedList()
ll.insert_a_node(2)
ll.insert_a_node("df")
ll.insert_a_node("asd")
ll.insert_a_node(34)
ll.insert_a_node("w")
# ll.insert_a_node("sd")
ll.traverse()
print(ll.find_middle_element_by_using_mid_positon())
print(ll.using_two_pointers())
print(ll.find_mid_element_by_tracking_odd_positions())
| true |
e06039f32692300d2c770cd1b842210cf0ac7573 | Python | heyuan7676/Gibbs_sampler | /blocked-sampler.py | UTF-8 | 13,210 | 2.625 | 3 | [] | no_license | import os
import sys
import pandas as pd
pd.set_option('display.precision', 13)
import numpy as np
from itertools import chain
from collections import Counter
from numpy import random
import time
import bisect
from copy import copy
class literature:
def __init__(self, filename_train, filename_test, K):
## all topics
self.K = K
## all documents: training + test
self.documents_train, self.documents_test = list(), list()
## train
datafile = open(filename_train)
for l in datafile:
self.documents_train.append(document(l,K))
## test
datafile = open(filename_test)
for l in datafile:
self.documents_test.append(document(l,K))
self.D = len(self.documents_train) + len(self.documents_test)
## all words
all_words = list(chain(*[d.words for d in (self.documents_train + self.documents_test)]))
all_words = list(set(all_words))
## length of all words
V = len(all_words)
self.V = V
## words index in all documents
self.words_index = dict(zip(all_words, range(V)))
## fill in index for all documents
for d in self.documents_train:
d.word_index(self.words_index)
for d in self.documents_test:
d.word_index(self.words_index)
## initialize Nwk for training data
self.cal_Nwk()
def cal_Nwk(self):
## Nwk
V = self.V
K = self.K
self.Nwk, self.Nwk_0, self.Nwk_1 = np.zeros([V,K]), np.zeros([V,K]), np.zeros([V,K]) ### V x K
## Nwk(s): V x K. ordered lists of ordered lists
for d in self.documents_train:
d.cal_nwk(self.V, self.K)
self.Nwk = self.Nwk + d.nwk
self.Nwk_0 = self.Nwk_0 + d.nwk_0
self.Nwk_1 = self.Nwk_1 + d.nwk_1
self.Nk = np.sum(self.Nwk, axis = 0)
self.Nk_0 = np.sum(self.Nwk_0, axis = 0)
self.Nk_1 = np.sum(self.Nwk_1, axis = 0)
def gibbs_sampler(self, datatype):
if datatype == 'train':
for d in self.documents_train:
d.update_z_x_train(self.Nk, self.Nk_0, self.Nk_1, self.Nwk, self.Nwk_0, self.Nwk_1, self.V, self.K)
elif datatype == 'test':
for d in self.documents_test:
d.update_z_x_test(self.Nk, self.Nk_0, self.Nk_1, self.Nwk, self.Nwk_0, self.Nwk_1, self.V, self.K)
def MAP_estimate(self, datatype):
if datatype == 'train':
for d in self.documents_train:
d.estimate_theta(self.K)
### equation 6 - 7
self.phi = np.divide((self.Nwk + beta), (self.Nk + self.V * beta)) ### V x K
self.phi_0 = np.divide((self.Nwk_0 + beta), (self.Nk_0 + self.V * beta))
self.phi_1 = np.divide((self.Nwk_1 + beta), (self.Nk_1 + self.V * beta))
self.phi_sum = np.sum(self.phi, axis = 0)
self.phi_0_sum = np.sum(self.phi_0, axis = 0)
self.phi_1_sum = np.sum(self.phi_1, axis = 0)
elif datatype == 'test':
### theta for each document
for d in self.documents_test:
d.estimate_theta(self.K)
def cal_llk(self, datatype):
### equation 8
llk = 0.0
if datatype == 'train':
documents = self.documents_train
else:
documents = self.documents_test
for d in documents:
each_d = 0.0
if d.corpus == 1:
phi_c = self.phi_1
else:
phi_c = self.phi_0
for w in d.words:
current_v = w
most_inner = (1-lmda)*self.phi[current_v,] + lmda*(phi_c[current_v,])
unit = np.multiply(d.theta, most_inner) ## 1 x K
sum_z = sum(unit)
log_sum_z = np.log(sum_z)
each_d += log_sum_z
llk += each_d
return llk
class document:
def __init__(self, l, K):
line = l.rstrip().split(' ')
self.corpus = int(line[0])
self.words = line[1:]
### randomly assign z and x when creating the object
self.v = len(self.words) # total number of words in document d
random.seed(0)
self.z = random.choice(range(K), size = self.v) # z: 1 x v. the index of topics for all words.
self.x = random.choice([0,1], size = self.v) # x: 1 x v. which phi the word is drawn from.
self.ndk = np.zeros(K)
self.cal_ndk(K)
def word_index(self, words_index):
for t in range(self.v):
self.words[t] = words_index[self.words[t]]
def cal_nwk(self, V, K):
### append assignments (z) to words. nwk: V x K. ordered lists of ordered lists
self.nwk, self.nwk_0, self.nwk_1 = np.zeros([V,K]), np.zeros([V,K]), np.zeros([V,K])
for t in range(self.v):
w_idx = self.words[t]
w_z = self.z[t]
w_x = self.x[t]
if w_x == 0:
self.nwk[w_idx, w_z] += 1
elif w_x == 1 and self.corpus == 0:
self.nwk_0[w_idx, w_z] += 1
elif w_x == 1 and self.corpus == 1:
self.nwk_1[w_idx, w_z] += 1
def cal_ndk(self, K):
### number of words assigned in each topic
ndk = Counter(self.z)
for k in range(K):
self.ndk[k] = ndk[k] ## 1 x K
def update_z_x_train(self, Nk, Nk_0, Nk_1, Nwk, Nwk_0, Nwk_1, V, K):
### update z and x word by word
Nd = self.v - 1
for t in range(self.v):
current_k = self.z[t] # current topic / k
current_v = self.words[t] # current word location
current_x = self.x[t] # current x
### update z and x
## for numerator
self.ndk[current_k] -= 1
## first term: doens't change for x = 0/1
first_term = np.divide((self.ndk + alpha), (Nd + K * alpha)) ## 1 x K
## second term
if self.corpus == 0:
temp_Nwk = Nwk_0
temp_Nk = Nk_0
elif self.corpus == 1:
temp_Nwk = Nwk_1
temp_Nk = Nk_1
# delete the current assignment
if current_x == 0:
Nk[current_k] -= 1
Nwk[current_v, current_k] -= 1
elif current_x == 1:
temp_Nk[current_k] -= 1
temp_Nwk[current_v, current_k] -= 1
## x = 0
second_term = np.divide((Nwk[current_v,] + beta), (Nk + V * beta)) ## 1 x K
prop_0 = (1-lmda) * np.multiply(first_term, second_term) ## 1 x K
## x = 1
second_term = np.divide((temp_Nwk[current_v,] + beta), (temp_Nk + V * beta)) ## 1 x K
prop_1 = lmda * np.multiply(first_term, second_term) ## 1 x K
# add back the deletion
if current_x == 0:
Nk[current_k] += 1
Nwk[current_v, current_k] += 1
elif current_x == 1:
temp_Nk[current_k] += 1
temp_Nwk[current_v, current_k] += 1
## sample x, z
pdf = list(prop_0) + list(prop_1)
pdf = pdf / np.sum(pdf)
if (pdf<0).any():
print current_v
assert (pdf>=0).all()
random_zx = np.random.random_sample()
idx = bisect.bisect_left(np.cumsum(pdf), random_zx)
self.z[t] = idx % K
self.x[t] = idx / K
## update Nwk and Nwk_c, and ndk
if current_x == 0 and self.x[t] == 1:
Nk[current_k] -= 1
Nwk[current_v, current_k] -= 1
temp_Nwk[current_v, self.z[t]] += 1
temp_Nk[self.z[t]] += 1
elif current_x == 0 and self.x[t] == 0:
Nk[current_k] -= 1
Nwk[current_v, current_k] -= 1
Nwk[current_v, self.z[t]] += 1
Nk[self.z[t]] += 1
elif current_x == 1 and self.x[t] == 0:
temp_Nk[current_k] -= 1
temp_Nwk[current_v, current_k] -= 1
Nwk[current_v, self.z[t]] += 1
Nk[self.z[t]] += 1
elif current_x == 1 and self.x[t] == 1:
temp_Nk[current_k] -= 1
temp_Nwk[current_v, current_k] -= 1
temp_Nwk[current_v, self.z[t]] += 1
temp_Nk[self.z[t]] += 1
self.ndk[self.z[t]] += 1
def update_z_x_test(self, Nk, Nk_0, Nk_1, Nwk, Nwk_0, Nwk_1, V, K):
### update z and x word by word
Nd = self.v - 1
for t in range(self.v):
current_k = self.z[t] # current topic / k
current_v = self.words[t] # current word location
current_x = self.x[t] # current x
### update z and x
## for numerator
self.ndk[current_k] -= 1
## first term: doesn't change for x = 0 and x = 1
first_term = np.divide((self.ndk + alpha), (Nd + K * alpha)) ## 1 x K
## second term
# x = 0
second_term = np.divide((Nwk[current_v,] + beta), (Nk + V * beta)) ## 1 x K
prop_0 = (1-lmda) * np.multiply(first_term, second_term) ## 1 x K
## x = 1
# choose c
if self.corpus == 0:
temp_Nwk = Nwk_0
temp_Nk = Nk_0
elif self.corpus == 1:
temp_Nwk = Nwk_1
temp_Nk = Nk_1
## calcualte
second_term = np.divide((temp_Nwk[current_v,] + beta), (temp_Nk + V * beta)) ## 1 x K
prop_1 = lmda * np.multiply(first_term, second_term) ## 1 x K
## sample x, z
pdf = list(prop_0) + list(prop_1)
pdf = pdf / np.sum(pdf)
if (pdf<0).any():
print current_v, prop_0, prop_1, self.ndk, Nwk[current_v,], temp_Nwk[current_v,]
assert (pdf>=0).all()
random_zx = np.random.random_sample()
idx = bisect.bisect_left(np.cumsum(pdf), random_zx)
self.z[t] = idx % K
self.x[t] = idx / K
### update ndk
self.ndk[self.z[t]] += 1
def estimate_theta(self, K):
### equation 5
self.theta = np.divide((self.ndk + alpha), (self.v + K * alpha)) ### D x K
def phi_addindex(phi, words_index):
keys = words_index.keys()
idx = words_index.values()
words = []
for t in range(len(phi)):
words.append(keys[idx.index(t)])
phi_df = pd.DataFrame(phi)
phi_df['words'] = words
df = phi_df.set_index(['words'])
return df
def main(train='input-train.txt', test='input-test.txt', output='output.txt', K=10, lmda=.5, alpha=.1, beta=.01, iter_max=1100, burn_in=1000):
filename_train = os.path.join(train)
filename_test = os.path.join(test)
data = literature(filename_train, filename_test, K)
theta = np.zeros([len(data.documents_train), data.K])
phi, phi_0, phi_1 = np.zeros([data.V, data.K]), np.zeros([data.V, data.K]), np.zeros([data.V, data.K])
llk_train, llk_test = [], []
t = []
for it in range(iter_max):
START = time.time()
### train
data.gibbs_sampler('train') ## (a)
data.MAP_estimate('train') ## (b)
### test
data.gibbs_sampler('test')
data.MAP_estimate('test')
if it > burn_in: ## (c)
phi += data.phi
phi_0 += data.phi_0
phi_1 += data.phi_1
for d in xrange(len(data.documents_train)):
theta[d] = data.documents_train[d].theta
### llk
llk_train.append(data.cal_llk('train'))
llk_test.append(data.cal_llk('test'))
t.append((time.time() - START))
pd.DataFrame(llk_train).to_csv('%s-trainll' % output, sep=' ', index=False , header=False)
pd.DataFrame(llk_test).to_csv('%s-testll' % output, sep=' ', index=False, header=False)
pd.DataFrame(t).to_csv('%s-t' % output, sep=' ', index=False, header=False)
pd.DataFrame(theta / (iter_max - burn_in)).to_csv('%s-theta' % output, sep=' ', index=False, header=False)
phi = phi_addindex(phi / (iter_max - burn_in), data.words_index)
phi.to_csv('%s-phi' % output, sep=' ', index=True, header=False)
phi0 = phi_addindex(phi_0 / (iter_max - burn_in), data.words_index)
phi0.to_csv('%s-phi0' % output, sep=' ', index=True, header=False)
phi1 = phi_addindex(phi_1 / (iter_max - burn_in), data.words_index)
phi1.to_csv('%s-phi1' % output, sep=' ', index=True, header=False)
if __name__ == '__main__':
para = sys.argv[1:]
train, test, output = para[:3]
K = int(para[3])
lmda, alpha, beta = [float(x) for x in para[4:7]]
iter_max, burn_in = [int(x) for x in para[7:]]
main(train, test, output, K, lmda, alpha, beta, iter_max, burn_in)
| true |
23b0e52951ded69bbb8fe3158fdcef7596a741b6 | Python | SpirinEgor/HSE.optimization | /assignment_1/hw1_optimization.py | UTF-8 | 2,502 | 3.046875 | 3 | [
"Apache-2.0"
] | permissive | from typing import Callable, Tuple, Dict
import numpy
from assignment_1.optimize import BrentNumericalRecipes, OptimizeResult
# Требуется реализовать метод: который будет находить минимум функции на отрезке [a,b]
def optimize(
oracle: Callable[[float], Tuple[float, float]],
a: float,
b: float,
eps: float = 1e-8,
optimizer_params: Dict = None,
) -> numpy.ndarray:
if optimizer_params is None:
optimizer_params = {}
optimize_function = BrentNumericalRecipes(**optimizer_params).get_optimize_function()
optimize_result: OptimizeResult = optimize_function(oracle, a, b, eps)
return numpy.array(optimize_result.x_min)
# Задание состоит из 2-х частей — реализовать любой алгоритм оптимизации по выбору
# Провести анализ работы алгоритма на нескольких функция, построить графики сходимости вида:
# кол-во итераций vs log(точность); время работы vs log(точность)
# Изучить, как метод будет работать на неунимодальных функций и привести примеры, подтверждающие поведение
# (например, что будет сходится в ближайший локальный минимум)
# Критерий оценки:
# 4-5 баллов — решение работает и дает правильный ответ,
# код реализации не вызывает вопрос + ipynb отчет с исследованием работы метода
# Оценка по дальнейшим результатам: будет 4-5 тестовых функций.
# На каждой будет для всех сданных решений строится распределение времени работы
# Далее по квантилям распределения: 10: 95%, 9: 85%, 8: 75%, 7: 50% — по каждому заданию независимо,
# далее среднее по всем
# Дополнительно требование на 8+ баллов: минимальное требование обогнать бейзлайн-решение
# (скрыт от вас, простая наивная реализация одного из методов с лекции)
| true |
c613a65aa9fe770dd9b9082495dd99ab122fb3bb | Python | donmajkelo/kurs_Python | /60_5.py | UTF-8 | 641 | 3.90625 | 4 | [] | no_license |
# imie= ["Mike", "Zdzich", "Rysio"]
#
# def przywitajSie(imie):
# for i in range(10):
# print(f"Hello {imie}")
#
# for i in range(len(imie)):
# przywitajSie(imie[i])
# #
# def suma(liczba1, liczba2, liczba3, liczba4):
# wynik=liczba1+liczba2+liczba3+liczba4
# print(wynik)
#
# suma(2,34,23,42)
def szymon(liczba1, liczba2=0, liczba3=0, liczba4=0):
wynik=liczba1+liczba2+liczba3+liczba4
print(wynik)
# suma(2,34,23,42)
def bartek(liczba1, liczba2=0, liczba3=0, liczba4=0):
wynik=liczba1+liczba2+liczba3+liczba4
return wynik
x=bartek(3,18)
y=szymon(3,8)
print(x)
print(y) | true |
8aa1e483f0aad0d5c214810ed4cac72ab6f64618 | Python | EthanKoland/CrackingCodesWithPython | /EncryptText.py | UTF-8 | 600 | 2.546875 | 3 | [] | no_license | import transpotionHack, transportionCipher, os, sys
def main():
filename = "frankenstein.txt"
key = 10
with open(filename, "r") as text:
s = text.read()
enc = transportionCipher.encrypt(key, s)
if(os.path.exists("frankensteinEcrypted.txt")):
input("THe file will be over written")
with open("frankensteinEcrypted.txt", "w") as output:
output.write(enc)
dec = transpotionHack.decipher(10, enc)
with open("FrankCheck.txt", "w") as check:
check.write(dec)
print(enc)
if (__name__ == "__main__"):
main() | true |
2644b30be4f6dd3430b6f25cf9c112e705ce60ee | Python | homutovan/Diffraction | /converter.py | UTF-8 | 262 | 2.609375 | 3 | [] | no_license | import json
def converter(path):
with open(path, encoding='utf-8') as file:
data = json.load(file)
with open(path, 'w', encoding='utf-8') as fil:
json.dump(data, fil, ensure_ascii=True, indent=2)
path = 'fixtures.json'
converter(path) | true |
7ecff60b26980769146d550392d893761103c22b | Python | martinphellwig/ghretrac | /igecas/models.py | UTF-8 | 5,966 | 2.765625 | 3 | [] | no_license | """
IGECAS models
"""
from django.db import models
from django.core.exceptions import ValidationError
from . import coercion
# pylint: disable=locally-disabled, too-few-public-methods, no-member
class _Abstract(models.Model):
"""
All models should inherit from here, this adds convenient timestamps.
"""
class Meta:
"""
Make sure that django knows this is a meta field.
"""
abstract = True
dts_insert = models.DateTimeField(auto_now_add=True)
dts_update = models.DateTimeField(auto_now=True)
dts_delete = models.DateTimeField(null=True, blank=True, editable=False)
class Person(_Abstract):
"""
This table serves as a connector to an external table that refers to an
individual, the reason for its existence is so that the datapoints can be
used out of the context of the individual and thus ensuring anonymity.
"""
identifier = models.CharField(max_length=128, unique=True)
def __str__(self):
return self.identifier
class Origin(_Abstract):
"""
Where does the prototype come from, e.g. Sequence, Internal, Customer
"""
value = models.CharField(max_length=32, unique=True)
def __str__(self):
return self.value
class Prototype(_Abstract):
"""
What kind of prototype is this, e.g.: SNV, IDV, CUS, Derived, Survey
"""
value = models.CharField(max_length=32, unique=True)
origin = models.ForeignKey(Origin, related_name='prototypes')
def __str__(self):
return self.value +' (' + str(self.origin) + ') '
class Coercion(_Abstract):
"""
All datapoints are stored as strings, this list functions that describe how
a datapoint is coerced from a string to the datatype and back again.
"""
value = models.CharField(max_length=64, unique=True)
def __str__(self):
return self.value
class DataType(_Abstract):
"""
DataType defines what kind of data the datapoint is, for example;
name = rs12913832
description = BLUEEYE
prototype = FK 1 (SNP)
coercion = Enum Coercion
"""
identifier = models.CharField(max_length=32)
description = models.TextField(null=True, blank=True)
prototype = models.ForeignKey(Prototype, related_name='datatypes')
coercion = models.ForeignKey(Coercion, related_name='datatypes')
def __str__(self):
return self.identifier + ' (' + str(self.prototype) + ')'
class TypeValue(_Abstract):
"""
What possible values can DataType have, this also has the confidence and
prevalence factor defined for each value.
value = 'GG'
datatype = FK 1 (rs12913832)
confidence = None (or a digit between 0.00 and 100)
prevalence = None (or a digit between 0.00 and 100)
"""
value = models.CharField(max_length=64)
datatype = models.ForeignKey(DataType, related_name='typevalues')
confidence = models.DecimalField(max_digits=5, decimal_places=2, null=True)
prevalence = models.DecimalField(max_digits=5, decimal_places=2, null=True)
def clean(self, *args, **kwargs):
if self.confidence < 0 or self.confidence > 100:
text = "Confidence level '%s' is not between (and including) 0-100."
raise ValidationError(text % self.confidence)
super(TypeValue, self).clean(*args, **kwargs)
def save(self, *args, **kwargs):
self.full_clean()
super(TypeValue, self).save(*args, **kwargs)
def __str__(self):
tmp = [str(self.datatype), self.value]
return ':'.join(tmp)
class ReferenceType(_Abstract):
"""
What kind of reference is this, e.g., dbSNP, PubMed
"""
value = models.CharField(max_length=64)
def __str__(self):
return self.value
class Reference(_Abstract):
"""
References contains PubMed number or other publications.
reference = 8172690
reference_type = FK 1 (PubMed)
datatype = FK 1 (rs12913832)
"""
reference = models.CharField(max_length=128)
reference_type = models.ForeignKey(ReferenceType, related_name='references')
datatype = models.ManyToManyField(DataType, related_name='references')
def __str__(self):
return str(self.reference_type) + ':' + self.reference +\
str(self.datatype.all())
class Data(_Abstract):
"""
Data is a single bit of data from a person, for example;
identifier : FK 1 (Martin P. Hellwig)
datatype : FK 1 (rs12913832)
value : CG
It is likely that at some point this table will no longer be in the RDBMS,
but is actually in an external KeyValue store, as we will have hundreds of
thousand data values for millions of people.
"""
class Meta(_Abstract.Meta):
"""Override admin plural name"""
verbose_name_plural = "Data"
person = models.ForeignKey(Person, related_name='data')
datatype = models.ForeignKey(DataType, related_name='data')
value = models.TextField()
def clean(self, *args, **kwargs):
coercer = getattr(coercion, self.datatype.coercion.value)
if coercer.multi_item:
type_values = self.datatype.typevalues.all()
test = coercer(self.value).into_string()
values = [item.value for item in type_values]
if test not in values:
text = "Value '%s' must be one of: %s"
raise ValidationError(text % (self.value, str(values)))
self.value = test
else:
try:
self.value = coercer(self.value).into_string()
except ValueError as error_instance:
raise ValidationError(error_instance)
super(Data, self).clean(*args, **kwargs)
def save(self, *args, **kwargs):
self.full_clean()
super(Data, self).save(*args, **kwargs)
def __str__(self):
tmp = [str(self.person), str(self.datatype), str(self.value),
str(self.dts_insert)]
return ':'.join(tmp)
| true |
be1ba77085c9330139dd7b5a285a8718017017c5 | Python | kosticlab/athlete | /python_scripts/preprocess_rnaseq_metadata.py | UTF-8 | 700 | 2.578125 | 3 | [] | no_license | import sys
#metadata #mappingkey #outputf
metadata_f = open(sys.argv[1],'r')
mappingkey_f = open(sys.argv[2],'r')
mappings = {}
for elem in mappingkey_f:
key_value = elem.rstrip().split('\t')
mappings[key_value[0]] = key_value[1]
mappingkey_f.close()
data_arr = []
for elem in metadata_f:
data_arr.append(elem.rstrip().split('\t'))
metadata_f.close()
converted_data_arr = []
for elem in data_arr[1:]:
if mappings.has_key(elem[0]):
elem[0] = mappings[elem[0]]+"_rutx"
converted_data_arr.append(elem)
output_f = open(sys.argv[3],'w')
output_f.write('\t'.join(data_arr[0])+'\n')
for elem in converted_data_arr:
output_f.write('\t'.join(elem)+'\n')
output_f.close()
| true |
f84ff36d6599f37128ffb3e1108e619578b9a0ec | Python | wdmx666/sleep_proj | /bak/recognition_pre_fog_simple_pipe/my_test/test_bak/test17.py | UTF-8 | 3,158 | 2.71875 | 3 | [] | no_license | from __future__ import print_function
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.datasets import fetch_20newsgroups
from sklearn.datasets.twenty_newsgroups import strip_newsgroup_footer
from sklearn.datasets.twenty_newsgroups import strip_newsgroup_quoting
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import classification_report
from sklearn.pipeline import Pipeline
from sklearn.compose import ColumnTransformer
from sklearn.svm import LinearSVC
class TextStats(BaseEstimator, TransformerMixin):
"""Extract features from each document for DictVectorizer"""
def fit(self, x, y=None):
return self
def transform(self, posts):
return [{'length': len(text),
'num_sentences': text.count('.')}
for text in posts]
class SubjectBodyExtractor(BaseEstimator, TransformerMixin):
"""Extract the subject & body from a usenet post in a single pass.
Takes a sequence of strings and produces a dict of sequences. Keys are
`subject` and `body`.
"""
def fit(self, x, y=None):
return self
def transform(self, posts):
# construct object dtype array with two columns
# first column = 'subject' and second column = 'body'
features = np.empty(shape=(len(posts), 2), dtype=object)
for i, text in enumerate(posts):
headers, _, bod = text.partition('\n\n')
bod = strip_newsgroup_footer(bod)
bod = strip_newsgroup_quoting(bod)
features[i, 1] = bod
prefix = 'Subject:'
sub = ''
for line in headers.split('\n'):
if line.startswith(prefix):
sub = line[len(prefix):]
break
features[i, 0] = sub
return features
pipeline = Pipeline([
# Extract the subject & body
('subjectbody', SubjectBodyExtractor()),
# Use ColumnTransformer to combine the features from subject and body
('union', ColumnTransformer(
[('subject', TfidfVectorizer(min_df=50), 0),
('body_bow', Pipeline([('tfidf', TfidfVectorizer()), ('best', TruncatedSVD(n_components=50)),]), 1),
('body_stats', Pipeline([('stats', TextStats()),('vect', DictVectorizer()),]), 1),],
transformer_weights={ 'subject': 0.8,'body_bow': 0.5,'body_stats': 1.0,})),
# Use a SVC classifier on the combined features
('svc', LinearSVC()),
])
# limit the list of categories to make running this example faster.
categories = ['alt.atheism', 'talk.religion.misc']
train = fetch_20newsgroups(random_state=1,
subset='train',
categories=categories,
)
test = fetch_20newsgroups(random_state=1,
subset='my_test',
categories=categories,
)
pipeline.fit(train.data, train.target)
y = pipeline.predict(test.data)
print(classification_report(y, test.target)) | true |
f2a32c22e23ec18a89fe814abc30bd28e74f72fb | Python | AdamZhouSE/pythonHomework | /Code/CodeRecords/2759/60796/249507.py | UTF-8 | 377 | 2.953125 | 3 | [] | no_license | n=int(input())
result=[]
s=input()
print(s)
ls=s.split("\n")
ls[0]=ls[0].split(" ")
ls[i]=ls[1].split(" ")
for i in range(2):
m=int(ls[i][0])
n=int(ls[i][1])
a=int(ls[i][2])
b=int(ls[i][3])
total=0
for i in range(m,n+1):
if i%a==0 or i%b==0:
total=total+1
result.append(total)
for i in range(len(result)):
print(result[i])
| true |
80b6e0baf38e85f1853dcece32b08086f547aa9b | Python | ethanbonin/Traveling_Sales_Problem | /TSP Code/TSP.py | UTF-8 | 3,541 | 3.46875 | 3 | [] | no_license | import sys
import random
from math import sqrt
from itertools import permutations
from timeit import default_timer as timer
import parseFile
def rand(value):
points = []
for x in range(0, value):
y = random.random()*100
x = random.random()*100
a = [x, y]
points.append(a);
#writes to a file with current points. before they are chosen for an optimal route.
target = file("points.txt", "w+")
for p in points:
target.write('{}'.format(p))
target.write('\n')
target.close()
return points
#Calculates the distances between points
def calculateDistance(current, point):
return sqrt(((current[0]-point[0])**2) + ((current[1] - point[1])**2))
#Returns true is comparing is less than current distances
def compare(comparing, current):
if comparing < current:
return True
return False
def OptimalRouteNearestNeighbor(opt):
# writes to a file with current points. after they are chosen for an optimal route.
target = file("newRoute.txt", "w+")
for p in opt:
target.write('{}'.format(p))
target.write('\n')
target.close()
def OptimalRouteBrute(opt):
# writes to a file with current points. after they are chosen for an optimal route.
target = file("newRoute.txt", "w+")
target.write("Total Distance: %s" % opt[1])
target.write('\n')
for p in opt[0]:
target.write('{}'.format(p))
target.write('\n')
target.close()
def NearestNeighbor(point):
optimalRoute = []
current = point[0]
optimalRoute.append(point[0])
point.remove(current)
while point:
distance = calculateDistance(current, point[0])
next = point[0]
for p in point:
comparingDistance = calculateDistance(current, p)
if compare(comparingDistance, distance):
next = p
distance = comparingDistance
optimalRoute.append(next)
point.remove(next)
current = next
return optimalRoute
def PermuationOfPoints(points):
value = [p for p in permutations(points)]
return value
def totalPath(route):
total = float("inf")
for bestDistance, p in route.items():
if bestDistance < total:
total = bestDistance
optimalRoute = p
return (optimalRoute, total)
def ExhaustiveRoute(P):
optimalRoute = []
total = 0
permutation = PermuationOfPoints(points)
route = {}
for p in permutation:
distance = 0
for i in range(0, len(p)-1):
distance += calculateDistance(p[i], p[i+1])
distance += calculateDistance(p[0], p[len(optimalRoute)-1])
route[distance] = p
return (totalPath(route))
print "Welcome to the TSP. Would you like to your own file?[1]\nOr\nRandomly Generate a file?[2]"
choice = input()
if choice == 1:
parse = parseFile.ParseFile()
points = parse.read()
value = input("Would you like to use Nearest Neighbor(0) or Exhaustive Search(1): ")
else:
print "Choice 2 Chosen"
points = input("How many points would you like?: ")
points = rand(points)
value = input("Would you like to use Nearest Neighbor(0) or Exhaustive Search(1): ")
if value == 0:
startTimer = timer()
opt = NearestNeighbor(points)
endtimer = timer()
OptimalRouteNearestNeighbor(opt)
if value == 1:
startTimer = timer()
opt = ExhaustiveRoute(points)
endtimer = timer()
OptimalRouteBrute(opt)
time = repr(endtimer - startTimer)
print("\n\ntime = " + time)
| true |
b38a1429b3a0bcb6a7915b86e61748858530f060 | Python | LKrysik/databricks-data-engineering | /data-engineering-1.1.0/Python/solutions/plus/includes/test/python/test_operations.py | UTF-8 | 2,008 | 2.546875 | 3 | [] | no_license | # Databricks notebook source
# MAGIC
# MAGIC %md
# MAGIC # Unit Tests for Operations
# COMMAND ----------
import pytest
from pyspark.sql import SparkSession
from pyspark.sql.types import *
# COMMAND ----------
from pyspark import sql
"""
For local testing it is necessary to instantiate the Spark Session in order to have
Delta Libraries installed prior to import in the next cell
"""
spark = sql.SparkSession.builder.master("local[8]").getOrCreate()
# COMMAND ----------
from main.python.operations import transform_raw
# COMMAND ----------
@pytest.fixture(scope="session")
def spark_session(request):
"""Fixture for creating a spark context."""
request.addfinalizer(lambda: spark.stop())
return spark
# COMMAND ----------
def test_transform_raw(spark_session: SparkSession):
testDF = spark_session.createDataFrame(
[
(
'{"device_id":0,"heartrate":52.8139067501,"name":"Deborah Powell","time":1.5778368E9}',
),
(
'{"device_id":0,"heartrate":53.9078900098,"name":"Deborah Powell","time":1.5778404E9}',
),
(
'{"device_id":0,"heartrate":52.7129593616,"name":"Deborah Powell","time":1.577844E9}',
),
(
'{"device_id":0,"heartrate":52.2880422685,"name":"Deborah Powell","time":1.5778476E9}',
),
(
'{"device_id":0,"heartrate":52.5156095386,"name":"Deborah Powell","time":1.5778512E9}',
),
(
'{"device_id":0,"heartrate":53.6280743846,"name":"Deborah Powell","time":1.5778548E9}',
),
],
schema="value STRING",
)
transformedDF = transform_raw(testDF)
assert transformedDF.schema == StructType(
[
StructField("datasource", StringType(), False),
StructField("ingesttime", TimestampType(), False),
StructField("value", StringType(), True),
StructField("p_ingestdate", DateType(), False),
]
)
| true |
cf461cbad6559c64e1e9486a14dab09030e23d7f | Python | rfgrammer/Tkinter | /8 - Using classes.py | UTF-8 | 575 | 3.421875 | 3 | [] | no_license | from tkinter import *
class CharlesButton:
def __init__(self, master):
frame = Frame(master)
frame.pack()
self.print_button = Button(frame, text="Print Message", command=self.print_message)
self.print_button.pack(side=LEFT)
self.quit_button = Button(frame, text="Quit", command=frame.quit)
self.quit_button.pack(side=LEFT)
def print_message(self):
print("Wow, this actually worked!")
def main():
root = Tk()
b = CharlesButton(root)
root.mainloop()
if __name__ == "__main__":
main()
| true |
8c6b9e90d1647fecf61670bcbc68b78e23ea4cb5 | Python | sevenhe716/LeetCode | /Mock/m1231_google.py | UTF-8 | 512 | 3.15625 | 3 | [] | no_license | # Time: O(n)
# Space: O(1)
# Ideas:
#
class Solution:
def plusOne(self, digits: 'List[int]') -> 'List[int]':
digits_map = {i: i + 1 for i in range(10)}
digits_map[9] = 0
carry = True
for i in range(len(digits))[::-1]:
if not carry:
break
carry = False
digits[i] = digits_map[digits[i]]
if digits[i] == 0:
carry = True
if carry:
digits.insert(0, 1)
return digits
| true |
74a5dd44abca97abcd09b2d173e720f730c3fca7 | Python | cp3-three/scrapy | /universityRank.py | UTF-8 | 1,177 | 3.203125 | 3 | [] | no_license | # codeing=utf-8
# author:jone yang
# date:2020/7/11
# describe:利用requests,bs4.beautifulSoup库进行一个定向网络爬虫,并整理出大学排名
import requests
from bs4 import BeautifulSoup
def getHTMLText(url):
try:
html=requests.get(url)
html.encoding=html.apparent_encoding
html.raise_for_status()
return html.text
except:
print("爬取失败")
return ''
def fillUnivList(ulist,urlText,num=20):
if(urlText==None):
return False
soup=BeautifulSoup(urlText,"html.parser")
for i in range(1,num+1):
start=soup.find('td',string=i)
infoList=list(start.next_siblings)
rank=i
univName=infoList[1].string
score=infoList[7].string
ulist.append([rank,univName,score])
def printUnivList(nlist):
for num in nlist:
print("{0:{3}^10}\t{1:{3}^10}\t{2:{3}^10}".format(num[0],num[1],num[2],chr(12288)))
def main():
url='http://www.zuihaodaxue.cn/zuihaodaxuepaiming2020.html'
ulist=[['名次','学校','总分'],]
html=getHTMLText(url)
fillUnivList(ulist,html)
printUnivList(ulist)
if __name__=='__main__':
main() | true |
d1bbdff3fa6ab7ceab8ab33123a58c089abe84b5 | Python | Kim-Taeyeon/Python | /Socket/Server.py | UTF-8 | 429 | 2.65625 | 3 | [] | no_license | # -*- coding:utf-8 -*-
# 多线程并发:服务器
import socket
class Mysocket(SocketServer.BaseRequestHandler):
def handle(self):
print "Got a connect from", self.client_address
while True:
data = self.request.recv(1024)
print "recv", data
self.request.send(data.upper())
if __name__ == "__main__" :
host = "0.0.0.0"
port = 9001
s = SocketServer.ThreadingTCPServer((host, port), Mysocket) # 多线程并发
| true |
1246df8047e76309d8517e5ffdc2a2516c57d19e | Python | wotmd/cryptopals | /set4/c27_Recover_the_key_from_CBC_with_IV_equal_Key.py | UTF-8 | 2,416 | 3.3125 | 3 | [] | no_license | from c10_Implement_CBC_mode import AES_cbc_decrypt, AES_cbc_encrypt
from Crypto.Util.strxor import strxor
import os
def Check_ASCII(plaintext):
"""Returns true if all the characters of plaintext are ASCII compliant (ie are in the ASCII table)."""
return all(c < 128 for c in plaintext)
def decrypt_and_check_admin(iv, key, ciphertext):
"""Decrypts the ciphertext and: if the decrypted message is not ASCII compliant, raises an exception
and returns the bad plaintext; otherwise returns whether the characters ";admin=true;" are in the string.
"""
plaintext = AES_cbc_decrypt(ciphertext, key, iv)
if not Check_ASCII(plaintext):
raise Exception("The message is not valid", plaintext)
return b';admin=true;' in plaintext
def get_key_from_insecure_cbc(iv, key):
"""Recovers the key from the lazy encryption oracle using the key also as iv.
The approach used is the simple one outlined in the challenge description.
"""
block_length = 16 #find_block_length(AES_cbc_decrypt)
prefix_length = 0#find_prefix_length(AES_cbc_decrypt, block_length)
# Create three different blocks of plaintext and encrypt their concatenation
p_1 = 'A' * block_length
p_2 = 'B' * block_length
p_3 = 'C' * block_length
p_4 = ";admin=true;" + "\x00"*4
ciphertext = AES_cbc_decrypt(p_1 + p_2 + p_3 + p_4, key, iv)
# Force the ciphertext to be "C_1, 0, C_1"
forced_ciphertext = ciphertext[:block_length]
forced_ciphertext += b'\x00' * block_length
forced_ciphertext += ciphertext[:block_length]
# Expect an exception from the lazy oracle
try:
decrypt_and_check_admin(iv, key, forced_ciphertext)
except Exception as e:
forced_plaintext = e.args[1]
print(e)
# Compute the key and return it
# The first block of the plaintext will be equal to (decryption of c_1 XOR iv).
# The last block of the plaintext will be equal to (decryption of c_1 XOR 0).
# Therefore, to get the iv (which we know is equal to the key), we can just
# xor the first and last blocks together.
return strxor(forced_plaintext[:block_length], forced_plaintext[-block_length:])
raise Exception("Was not able to hack the key")
def main():
KEY = os.urandom(16)
IV = KEY
hacked_key = get_key_from_insecure_cbc(IV, KEY)
# Check that the key was recovered correctly
if KEY == hacked_key:
print("success")
if __name__ == '__main__':
main() | true |
f5dffd2bb7c098ef89952a3193d72b44307df0d6 | Python | humanfans/ncep-displayer | /myTools.py | UTF-8 | 5,074 | 2.75 | 3 | [] | no_license | #! /usr/bin/env python
# -*- coding: utf-8
from __future__ import unicode_literals
import os
import sys
import random
import json
import codecs
import logging as log
import logging.handlers as handlers
# Author: Laisky
# Version: 1.1
# Date: 2013-06-18
def IntStr( num, dec=None ):
"""
return a str(int), in dec character
"""
num = int(num)
if not dec: return str(num)
if dec <= len(str(num)):
return str(num)
ans = "0" * (dec - (len(str(num)))) + str(num)
return ans
def PathSwitch( path ):
if not isinstance( path, unicode ):
try: path = path.decode( 'gbk' )
except: pass
try: path = path.decode( 'utf-8' )
except: pass
if sys.platform == 'win32':
return path.encode( 'gbk' )
else: return path.encode( 'utf-8' )
def RandomChar( length ):
alphabet = 'AaBbCcDdEeFfGgHhIiJjKkLlMmNnOoPpQqRrSsTtUuVvWwXxYyZz0123456789'
ans = ''
for i in range(1, length):
ans += random.choice(alphabet)
return ans
def Utf( var ):
try: var = var.decode('gbk')
except: pass
try: var = var.decode('utf-8')
except: pass
return var.encode('utf-8')
def LoadConfig( file_path, multi_arg=False, coding="GBK", split="\t", comment="#" ):
config_data = codecs.open(file_path, "r", coding).readlines()
result = {}
for each_config in config_data:
each_config = each_config.strip()
if comment in each_config: each_config = each_config[: each_config.index(comment)]
if not each_config: continue
arg_key, arg_cont = [i for i in each_config.split(split) if i]
if not multi_arg:
result.update({arg_key: arg_cont})
elif multi_arg:
if arg_key not in result:
result.update({arg_key: [arg_cont]})
elif not isinstance(result[arg_key], list):
result[arg_key] = [result[arg_key], arg_cont]
else:
result[arg_key].append(arg_cont)
return result
def MemManager( doc_path, max_mem=2.0 ):
"""
manage the doc's memory
remove the oldest files
if max_men = 0
----------
doc_path - str: the doc's path
max_mem - int: mBytes
"""
if max_mem == 0: return None
the_files = os.walk(doc_path).next()[2]
file_dic = {}
for each_file in the_files:
file_path = os.path.join(doc_path, each_file)
file_date = os.stat(file_path).st_ctime
file_dic.update({file_date: file_path})
time_sort = file_dic.keys()
time_sort.sort(reverse=True)
file_size = 0
for each_time in time_sort:
file_path = file_dic[each_time]
file_size += os.stat(file_path).st_size / 1024. / 1024.
if file_size > max_mem:
try:
file_path = PathSwitch( file_path )
os.remove(file_path)
except: pass
class MyException( Exception ):
def __init__( self, num, text='none' ):
Exception.__init__( self )
self.num = num
self.text = text
def __repr__( self ):
ans = repr(self.num) + ': ' + self.text
return ans
def __str__( self ):
ans = repr(self.num) + ': ' + self.text
return ans
def __int__( self ):
try: return int(self.num)
except: return 0
class logging():
def __init__(
self, path, log_name, log=log, handlers=handlers,
disabled=False
):
"""
create 3 log file:
$log_name + .debug.txt
$log_name + .info.txt
$log_name + .error.txt
"""
self.disabled = disabled
if not os.path.exists(path):
raise MyException( 001, 'logging path: %s' % path )
logger = log.getLogger(log_name)
logger.setLevel(log.DEBUG)
debug_logger = log.getLogger('debug')
debug_logger.setLevel(log.DEBUG)
log_formatter = log.Formatter(
'%(name)s - %(asctime)s - %(levelname)s - %(message)s'
)
debug_formatter = log.Formatter('%(asctime)s\n%(message)s')
info_handler = handlers.RotatingFileHandler(
os.path.join(path, log_name) + '.info.txt', \
mode='a', \
maxBytes=2097152, \
backupCount=1
)
info_handler.setLevel(log.INFO)
info_handler.setFormatter(log_formatter)
error_handler = handlers.RotatingFileHandler(
os.path.join(path, log_name) + '.error.txt', \
mode='a', \
maxBytes=5242880, \
backupCount=1
)
error_handler.setLevel(log.ERROR)
error_handler.setFormatter(log_formatter)
debug_handler = handlers.RotatingFileHandler(
os.path.join(path, log_name) + '.debug.txt', \
mode='a', \
maxBytes=5242880, \
backupCount=2
)
debug_handler.setLevel(log.DEBUG)
debug_handler.setFormatter(debug_formatter)
logger.addHandler(info_handler)
logger.addHandler(error_handler)
debug_logger.addHandler(debug_handler)
self.info_logger = logger
self.debug_logger = debug_logger
def debug( self, text ):
if self.disabled: return None
text = json.dumps(text, sort_keys=True, indent=2)
self.debug_logger.debug(text)
def info( self, text ):
if self.disabled: return None
self.info_logger.info(text)
def error( self, text ):
if self.disabled: return None
self.info_logger.error(text)
| true |
db8a331fbcfcf05105ca5d0020c6ca4b9f3da78d | Python | etangreal/compiler | /test/tests/10bench/fact.py | UTF-8 | 149 | 3.328125 | 3 | [] | no_license | class Fact:
def __init__(self):
pass
def fact(self, n):
if n == 0:
return 1
return self.fact(n-1) * n;
print Fact().fact(10)
| true |
53afa8e277c735b76cf2c0b171e027f0d18b5ff7 | Python | adamnfish/PythonGit | /git/__init__.py | UTF-8 | 2,286 | 2.65625 | 3 | [
"Unlicense"
] | permissive | import subprocess
import os
from version import VERSION
class Repository(object):
"""
Instances of this class refer to a git repository on this machine.
"""
path = ""
git = ""
def __init__(self, path, git="git"):
"""
Sets up this repository.
"""
self.path = os.path.abspath(path)
if git.endswith("git"):
self.git = git
else:
raise ValueError("Please provide a valid Git command")
def cmd(self, command, *args):
"""
Runs a git command on this repository.
"""
arg_list = [self.git, command] + list(args)
process = subprocess.Popen(arg_list,
cwd=self.path,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,)
(out, err) = process.communicate()
self.out = out
self.err = err
return (out, err)
# shortcuts
def init(self, *args):
"""
Helper for `git init`.
"""
return self.cmd('init', *args)
def add(self, filename, *args):
"""
Helper for `git add`.
"""
return self.cmd('add', filename, *args)
def commit(self, message, author, *args):
"""
Helper for `git commit`.
"""
return self.cmd('commit', '-m ' + message, '--author=', *args)
def status(self, *args):
"""
Helper for `git status`.
"""
return self.cmd('status', *args)
def checkout(self, checkout, *args):
"""
Helper for `git checkout`.
"""
return self.cmd('checkout', checkout, *args)
def pull(self, remote, branch, *args):
"""
Helper for `git pull`.
"""
return self.cmd('pull', remote, branch, *args)
def push(self, remote, branch, *args):
"""
Helper for `git pull`.
"""
return self.cmd('push', remote, branch, *args)
def fetch(self, remote, *args):
"""
Helper for `git fetch`.
"""
return self.cmd('fetch', remote, *args)
def merge(self, ref, *args):
"""
Helper for `git merge`.
"""
return self.cmd('merge', ref, *args)
| true |
66858c07ada89061322b79ebf6898778d2bce2b8 | Python | aricaldoni/band-name-generator | /main.py | UTF-8 | 390 | 4.03125 | 4 | [] | no_license | #Welcome message
print("Welcome to the authomatic Band Name generator.")
#Ask the user for the city that they grew up in
city = input("What city did you grow up in?: \n")
#Ask the user for the name of a pet
pet = input("What is the name of your pet: \n")
#Combine the name of their city and pet and show them their band name.
print("The suggested name for your Band is "+ city + " " + pet)
| true |
9d09e01c1aeb226db63cfd96b10c29f436c8ab56 | Python | dgpllc/leetcode-python | /learnpythonthehardway/insert-into-a-cycle-sorted-list.py | UTF-8 | 2,750 | 4.375 | 4 | [] | no_license | # Given a node from a cyclic linked list which is sorted in ascending order, write a function to insert a value into
# the list such that it remains a cyclic sorted list. The given node can be a reference to any single node in the
# list, and may not be necessarily the smallest value in the cyclic list.
#
# If there are multiple suitable places for insertion, you may choose any place to insert the new value. After the
# insertion, the cyclic list should remain sorted.
#
# If the list is empty (i.e., given node is null), you should create a new single cyclic list and return the
# reference to that single node. Otherwise, you should return the original given node.
#
# The following example may help you understand the problem better:
#
#
#
#
#
# In the figure above, there is a cyclic sorted list of three elements. You are given a reference to the node with
# value 3, and we need to insert 2 into the list.
#
#
#
#
#
# The new node should insert between node 1 and node 3. After the insertion, the list should look like this,
# and we should still return node 3.
#
# Seen this question in a real interview before?
# Definition for a Node.
class Node(object):
def __init__(self, val, next):
self.val = val
self.next = next
class Solution(object):
def insert(self, head, insertVal):
"""
:type head: Node
:type insertVal: int
:rtype: Node
"""
# empty
if not head:
n = Node(insertVal, None)
n.next = n # get a cycle
return n
cur = head
while True:
# 1+------->3
# ^ +
# | |
# | |
# | |
# <---+4<---+
# insert 2 , cur is 1, cur.next is 3
if cur.val <= insertVal <= cur.next.val:
cur.next = Node(insertVal, cur.next)
return head
# insert 0, then 4->0->1, insert 5 , then 4->5->1
elif cur.val > cur.next.val and (insertVal <= cur.next.val or insertVal >= cur.val):
cur.next = Node(insertVal, cur.next)
return head
# 3--->3
# \ /
# 3
# insert 5
elif cur.next is head:
cur.next = Node(insertVal, cur.next)
return head
cur = cur.next
return head
if __name__ == '__main__':
n1, n2, n3 = Node(1, None), Node(3, None), Node(4, None)
n1.next, n2.next, n3.next = n2, n3, n1
Solution().insert(n3, 2)
Solution().insert(n3, 0)
n1, n2, n3 = Node(1, None), Node(1, None), Node(1, None)
n1.next, n2.next, n3.next = n2, n3, n1
Solution().insert(n3, 0)
| true |
2c2e5d1d48033a3f03aae257847b80f5c98fac85 | Python | esinghroy/python-learning-cohort | /malte/dotnet/ch04_oop/models/danish_car.py | UTF-8 | 185 | 2.96875 | 3 | [] | no_license | from models.car import Car
class DanishCar(Car):
def drive(self):
print("Det køre bare!!!!")
def refuel(self):
print(f"Danish Car: Skal du ha' en pølse med?") | true |
56b34db54ce4c2381b0ff243f7e50d3bfab1dca6 | Python | suryaansh2002/Cryptoss_2020 | /Easy2.py | UTF-8 | 503 | 2.8125 | 3 | [] | no_license | s=input()
d=int(s.split()[0])
m=int(s.split()[1])
s2=input()
l1=s2.split()
l2=[]
for i in l1:
l2.append(int(i))
count=0
rem=0
for i in range(len(l2)):
if l2[i]<m:
rem=l2[i]
#count+=1
continue
elif l2[i]>=m:
count+= l2[i]//m
rem=l2[i]%m
if i<len(l2)-1:
if l2[i+1]+rem>=m:
l2[i+1]=l2[i+1]+rem
else:
count+=1
else:
if rem>0:
count+=1
print(count) | true |
aa89f2680bf330a60e1707d16b37f677b682b2b7 | Python | louishopgood/trading_bot | /PDR_get_data_Yahoo_graph_prices_calc_MAs_returns.py | UTF-8 | 5,162 | 3.5 | 4 | [] | no_license | # =============================================================================
# Import OHLCV data using Pandas Datareader
# =============================================================================
#this script gets around the problem of one of the API connections failing and causing the entire data import to stop meaning the algorithm would fail
# Import necesary libraries
import pandas as pd
import pandas_datareader.data as pdr
import datetime
from matplotlib import pyplot as plt
# Download historical data for NIFTY constituent stocks
tickers = ["MSFT","AMZN","AAPL","CSCO","IBM","FB"]
stock_cp = pd.DataFrame() # dataframe to store close price of each ticker
attempt = 0 # initializing passthrough variable
extracted_succesfully = [] # initializing list to store tickers whose close price was successfully extracted
failed_list = []
while len(tickers) != 0 and attempt <= 5: #will only loop through a maximum of 5 times, if not limited will cause stack overflow if even 1 API returns an error
#this list concetation loops through tickers only keeping the stocks that are not in the drop list
#the w hile loop runs a maximum of 5 times, stopping if all APIs are successfully accessed
tickers = [j for j in tickers if j not in extracted_succesfully] # removing stocks whose data has been extracted from the ticker list
#the for loop runs through each stock that the updated tickers list contains
for i in range(len(tickers)):
# tries to open API connection for current ticker
try:
#creates a temp variable which is the OCHLV data for that stock between the start and end datetimes from Yahoo Finance
temp = pdr.get_data_yahoo(tickers[i],datetime.date.today()-datetime.timedelta(10000),datetime.date.today())
# .dropna eliminates any rows (by default) that contain a NaN or None
temp.dropna(inplace = True)
#adds an item to the close price dataframe which will have the column heading as the ticker name and the column will contain the Adjusted Closing Prices
stock_cp[tickers[i]] = temp["Adj Close"]
#adds the stock to the list of succesfully extracted stocks so that if the while loop needs to repeat it will not re-access the stocks already extracted
extracted_succesfully.append(tickers[i])
#if there is an error when accessing the API of the for loops current stock then it will print a retry message and then continue to the next stock
#since if the try fails the current stock will not be on the succesfully extracted list the while loop will re-try including that stock
except:
print(tickers[i]," :failed to fetch data...retrying")
failed_list.append(tickers[i]+": pass-through "+str(attempt))
continue
print(failed_list)
print(stock_cp)
attempt+=1
stock_cp.fillna(method = "bfill", inplace = True, axis = 0)
#we would rather fill in the NaN values, we here do a backfill so the values before a stock is listed is just its opening price
#its daily return would therefore be zero
#stock_cp.dropna(axis=0, inplace = False) #if it finds an NaN it deletes that row as axis = 0 not 1. This makes a copy and doesnt
#replace the dataframe as inplace = False. You would lose data for the other stocks if you just did dropna here.
plt.figure(figsize = (12, 8))
ax1 = plt.subplot(2,3,1)
stock_cp["AAPL"].plot()
plt.title("AAPL")
ax2 = plt.subplot(2,3,2)
stock_cp["AMZN"].plot()
plt.title("AMZN")
ax3 = plt.subplot(2,3,3)
stock_cp["MSFT"].plot()
plt.title("MSFT")
ax4 = plt.subplot(2,3,4)
stock_cp["CSCO"].plot()
plt.title("CSCO")
ax5 = plt.subplot(2,3,5)
stock_cp["IBM"].plot()
plt.title("IBM")
ax6 = plt.subplot(2,3,6)
stock_cp["FB"].plot()
plt.title("FB")
plt.tight_layout()
#Mean, Median, Standard Deviation, Daily Return
stock_cp.mean()
stock_cp.median()
stock_cp.std()
daily_return = stock_cp.pct_change()
(((stock_cp/stock_cp.shift(1))-1)) == daily_return # two ways of getting daily returns
#.shift() shifts the whole dataframe down by what you specify
print("Mean Daily Return: ")
print(daily_return.mean())
print("Standard Deviation of Returns: ")
print(daily_return.std())
#rolling mean and standard deviation - e.g. how does the 20 day mean change over the time period
daily_return.rolling(window = 20, min_periods = 20).mean() #this calculates a 20 day rolling mean for each stock
#if over a few months the rolling mean has been rising then it is a good indication of positive movement in the stock
#min periods sets the min number of values needed to display an average. Here, the 2nd date has a MA value even though it is not a 20 day moving average but a two day one
#The first 20 values are not 20-day MA's but are averages of the number of data points then available.
daily_return.rolling(window = 20, min_periods = 20).std()
#exponential MA gives more weight to the more recent dates
daily_return.ewm(span = 20, min_periods = 20).mean()
daily_return.ewm(span = 20, min_periods = 20).std() | true |
c496f1f32e8876233ec2cf7ebc38021eecfaa6b8 | Python | 24foxtrot/code-improv | /quiz_3.py | UTF-8 | 921 | 3.28125 | 3 | [] | no_license | import webbrowser
class Movie():
def __init__(self, movie_title, movie_poster_image, movie_storyline, movie_trailer_url):
'''This function contains the following details of the movie:
Arguments:
self:It points to a specific movie, ie.the current instance
movie_title: the movie title
movie_poster_image:is the poster image url
movie_storyline: the movie story line
movie_trailer_url: the youtube trailer url of that particular movie'''
self.title=movie_title
self.poster_image_url=movie_poster_image
self.storyline= movie_storyline
self.trailer_url=movie_trailer_url
def show_trailer(self):
'''This function is used to play the trailer of the movie
Arguments:
self:It points to the movie whose trailer has to played'''
webbrowser.open(self.trailer_url)
| true |
e582f8554490ca9e01026407c749d23e30869613 | Python | GregMasterBr/Python-Exercicios-CeV-Gustavo-Guanarabara | /exerc082.py | UTF-8 | 652 | 3.984375 | 4 | [] | no_license | print('-==-'*50)
print(' EXERCÍCIO 82 - LISTAS LER VARIOS NÚMEROS NÚMEROS E SEPARAAR EM PARE E IMPAR')
print('-==-'*50)
numeros = []
par = []
impar = []
while True:
n = int(input(f'Digite um número: '))
if n < 0:
break
else:
numeros.append(n)
if n%2==0:
par.append(n)
else:
impar.append(n)
print(f'Quantidade de números digitados: {len(numeros)}')
print(f'Ordem Original: {numeros}')
if len(par)>0:
print(f'PARES: {par}')
else:
print('NÃO FOI INFORMADO NÚMERO PAR')
if len(impar)>0:
print(f'IMPARES: {impar}')
else:
print('NÃO FOI INFORMADO NÚMERO ÍMPAR')
| true |
ae7caaa173efa5d09ab394ff9880a17ea448de5e | Python | 21eleven/leetcode-solutions | /python/0227_basic_calculator_ii/loops.py | UTF-8 | 1,689 | 4.0625 | 4 | [] | no_license | """
227. Basic Calculator II
Medium
Implement a basic calculator to evaluate a simple expression string.
The expression string contains only non-negative integers, +, -, *, / operators and empty spaces . The integer division should truncate toward zero.
Example 1:
Input: "3+2*2"
Output: 7
Example 2:
Input: " 3/2 "
Output: 1
Example 3:
Input: " 3+5 / 2 "
Output: 5
Note:
You may assume that the given expression is always valid.
Do not use the eval built-in library function.
"""
class Solution:
def calculate(self, s: str) -> int:
tkns = list(s)
tkns = [tkn for tkn in tkns if tkn != " "]
ops = set(["*", "/", "-", "+"])
ntkns = []
x = ""
for t in tkns:
if t not in ops:
x += t
if t in ops:
ntkns.append(x)
x = ""
ntkns.append(t)
if x: ntkns.append(x)
tkns = ntkns
N = len(tkns)
i = 0
while i < N:
if tkns[i] in ["*","/"]:
a = int(tkns[i-1])
b = int(tkns[i+1])
if tkns[i] == "*":
c = a * b
else:
c = a//b
t0 = tkns[:i-1]
t1 = tkns[i+2:]
tkns = t0 + [str(c)] + t1
N = len(tkns)
i -= 1
else:
i += 1
out = 0
op = "+"
for x in tkns:
if x in ["+","-"]:
op = x
else:
if op == "+":
out += int(x)
else:
out -= int(x)
return out
| true |
213e4376c905f33a3c937511eca8a24bfbad024e | Python | vivekkkumar/boto_aws_automation | /testDeployment.py | UTF-8 | 4,057 | 2.640625 | 3 | [] | no_license | from src.ec2.VPC import vpc
from src.ec2.EC2 import ec2
from src.clientLocator import EC2Client
def main():
ec2_client = EC2Client().get_client()
myvpc = vpc(ec2_client)
response = myvpc.create_vpc()
# using inbuilt str magic method
print ('VPC created' + str(response))
vpc_name = 'Boto3-VPC'
# check documentation for actual value in the dictionary
vpc_id = response['Vpc']['VpcId']
myvpc.add_name_tag(vpc_id, vpc_name)
igw_obj = myvpc.create_internet_gateway()
# more information on the off documentation
igw_id = igw_obj['InternetGateway']['InternetGatewayId']
myvpc.attach_igw_to_vpc(vpc_id, igw_id)
public_subnet_response = myvpc.create_subnet(vpc_id, '10.0.1.0/24')
public_subnet_id = public_subnet_response['Subnet']['SubnetId']
print ("subnet created for vpc {} with id {}".format(vpc_id, str(public_subnet_response)))
myvpc.add_name_tag(public_subnet_id, 'Boto3-Public-Subnet')
''' In order for a subnet to be communicated from outside, an Internet Gateway is needed along with a routing table'''
route_table_response = myvpc.create_public_route_table(vpc_id)
rtbl_id = route_table_response['RouteTable']['RouteTableId']
myvpc.create_igw_route_on_public_route_table(rtbl_id, igw_id)
# Associate public subnet with the route table
myvpc.associate_subnet_with_route_table(public_subnet_id, rtbl_id)
# Allow auto assign public ip address for subnet
myvpc.allow_auto_assign_ip_address_for_subnet(public_subnet_id)
# create a private subnet
private_subnet_response = myvpc.create_subnet(vpc_id, cidr_block='10.0.2.0/24')
private_subnet_id = private_subnet_response['Subnet']['SubnetId']
# not associating or attaching to route table since this is a private subnet
print ("created private subnet {} for vpc{}".format(private_subnet_id, vpc_id))
# add name tag to the subnet
myvpc.add_name_tag(private_subnet_id, 'Boto3-Private-Subnet')
# EC2 instance
ec2obj = ec2(ec2_client)
# Create a key pair
key_pair_name = 'Boto3-key-pair'
ec2_key_pair_resp = ec2obj.create_key_pair(key_pair_name)
print ("Created Key pair with name {} and key pair response {}".format(key_pair_name, str(ec2_key_pair_resp)))
# Create a security group
public_security_group = 'Boto-public-sg'
pub_sg_response = ec2obj.create_security_group(public_security_group, "Public Security group for public subnet", vpc_id)
pub_sg_response_id = pub_sg_response['GroupId']
# add public access to security group
ec2obj.add_inbound_rule_sg(pub_sg_response_id)
print ('Added public access rule to security group {}'.format(public_security_group))
# starting script can start any application, commands to start a service or run a web app
user_data = """#!/bin/bash
yum update -y
yum install httpd24 -y
service httpd start
chkconfig httpd on
echo "<html><body><h1> Hello from Boto3 </h1></body></html>" > /var/www/html/index.html"""
# launch public EC2 instance
ec2obj.launch_ec2_instance('ami-0d2692b6acea72ee6', key_pair_name, 1, 1, pub_sg_response_id, public_subnet_id , user_data)
print ("Launching public ec2 instance using AMI ami-0d2692b6acea72ee6")
# adding another security group
private_security_group_name = "Boto3-private-sg"
private_security_group_description = 'Private security group for priveta subnet'
private_security_group_response = ec2obj.create_security_group(private_security_group_name, private_security_group_description, vpc_id)
private_security_group_id = private_security_group_response['GroupId']
# add rule to private security group
ec2obj.add_inbound_rule_sg(private_security_group_id)
# launch a private EC2 instance
ec2obj.launch_ec2_instance('ami-0d2692b6acea72ee6', key_pair_name,1, 1, private_security_group_id, private_subnet_id, "")
if __name__ == '__main__':
main() | true |
b5f55de8ea97ca35342cdbe27724b7652a32477c | Python | roddyvitali/python-bases | /97-deco.py | UTF-8 | 432 | 3.890625 | 4 | [] | no_license | def deco(func):
def action(a, b, veces):
for i in range(veces):
a = a*b
func(a)
return action
@deco
def multiply(parametro):
print("El resultado es {}".format(parametro))
if __name__ == '__main__':
times = int(input("Veces que debe repetirse la operación: "))
num1 = int(input("Primer número: "))
num2 = int(input("Segundo número: "))
multiply(num1, num2, times) | true |
7f5585deee33216fad50b5e7d0dd7d84d4d9e414 | Python | jiruiz/Repaso-Recuperatorio-PAR | /parcial_juan_ignacio_ruiz.py | UTF-8 | 3,714 | 3.5625 | 4 | [] | no_license | import csv
import os
def pedirNombre():
while True:
nombre = input("Se creará un archivo \n\tingrese nombre del archivo que desea crear: ")
try:
return str(f"{nombre}.csv")
except ValueError:
print("error al nombrar archivo")
def pedirArchivo():
while True:
nombre = input("\tingrese nombre del archivo que desea recuperar: ")
try:
return str(f"{nombre}.csv")
except ValueError:
print("error al nombrar archivo")
def cargardatos(archivo,campos):
guardar = "si"
filasCarga = []
while guardar == "si":
empleado = {}
for campo in campos:
empleado[campo] = input(f"Ingrese {campo} del Empleado: ")
filasCarga.append(empleado)
guardar = input("Desea seguir agregando empleados? Si/No")
try:
nombreArchivo = pedirNombre()
hayAlgunArchivo = os.path.isfile(archivo)
with open(archivo, 'a', newline='') as file:
archivoAGrabar = csv.DictWriter(file, fieldnames=campos)
if not hayAlgunArchivo:
archivoAGrabar.writeheader()
archivoAGrabar.writerows(filasCarga)
print("Empleado Cargado Exitosamente!")
return
except IOError:
print("no se reconoce el archivo.")
def recupero(archivo):
archivo2 = pedirArchivo()
try:#intentar
with open(archivo, 'r', newline='') as file:
with open(archivo2, "r", newline='') as file2:
fileCSV = csv.reader(file, delimiter=";")
file2CSV = csv.reader(file2)
itemEmpleados = next(file2, None)
itemLegajos = next(file, None)
busqueda = input("legajo a buscar: ")
contador = 0
diasAdeudados = 0
diasTotalesDeVacaciones = 0
numeroLegajo = 0
for linea in fileCSV:#POR CADA LINEA EN EL ARCHIVO DE LEGAJOS, ENTONCES
if busqueda in linea[0]:# SE HACE LA BUSQUEDA EN LA POSICION 0 DE LA LINEA, SI ESTO SE DA,
contador += 1# SE SUMA UNO AL CONTADOR DE CADA DIA
for vuelta in file2CSV:#PARA CADA VUELTA(LINEA) EN EL ARCHIVO DE EMPLEADOS,
numeroLegajo = int(vuelta[0])#LA POSICION 0 DE ESA LINEA, SE ALMACENA EN LA VARIABLE NUMEROlEGAJOS
diasTotalesDeVacaciones = int(vuelta[3])#LOS DIAS TOTALES DE
if busqueda in vuelta[0]:
diasAdeudados = diasTotalesDeVacaciones - contador
print(f"Legajo N°: {numeroLegajo}: {vuelta[1]}")
print(f'\tse tomó: {contador} dias, debe {diasAdeudados}')
#print(f"el consumo total general es de {totalGeneral}")
except IOError:
print("Hubo un error al abrir el archivo.")
except ValueError:
print("debe ingresar un entero")
def main():
LEGAJOS= "legajo.csv"
CAMPOS = ['Legajo','Apellido','Nombre','Total Vacaciones']
CAMPOSLEGAJOS = ['Legajo','Fecha']
while True:
print("\tElija una opcion:\n\t 1.Cargar datos de Empleados \n\t 2.Consulta dias de VacacionesPendientes\n\t 3.Salir")
opcion = input("")
if opcion == "1":
archivo = pedirNombre()
cargardatos(archivo, CAMPOS)
if opcion == "2":
archivo = input("ingrese nombre del archivo a recuperar")
try:
recupero(f"{archivo}.csv")
except IOError:
print("error de lectura I/O")
# except:
# print("otro tipo de error")
if opcion == "3":
exit()
else:
print("elija una opcion valida")
main()
| true |
7c5d11dd6c07c368db415d282d393e4d7dd0455b | Python | hoainho/python-learining | /Lession1.py | UTF-8 | 125 | 3.4375 | 3 | [] | no_license | # name = 'Anh Da Đen';
name = 'AAA';
name2 = name.isupper()
print(name2)
#List
n = 30;
a = [i for i in range(10)];
print(a)
| true |
4e4a4f6a28aa5f7d44eae198bd7e2ecf6ab06942 | Python | adeemm/Apples2Oranges | /network.py | UTF-8 | 6,298 | 3.328125 | 3 | [] | no_license | import numpy as np
class Network(object):
# initialize a network with a list of layer sizes (number of neurons), then generate random weights and biases
def __init__(self, sizes):
self.sizes = sizes
self.num_layers = len(sizes)
self.biases = [np.random.randn(y, 1) for y in sizes[1:]]
self.weights = [np.random.randn(y, x) for x, y in zip(sizes[:-1], sizes[1:])]
# for plotting data
self.losses = []
self.acc1s = []
self.acc2s = []
# feeds a feature matrix (image) into the network and returns the activations / outputs at each layer of the network
def feedforward(self, x):
activation = x
activations = [x]
outputs = []
for bias, weight in zip(self.biases, self.weights):
output = np.dot(weight, activation) + bias
outputs.append(output)
activation = self.sigmoid(output)
activations.append(activation)
return outputs, activations
# calculate gradients of weights and biases (partial derivatives) and update weights / biases
def backpropagate(self, x, y):
# initialize partial derivatives (gradients) of biases and weights
delta_bias = [np.zeros(bias.shape) for bias in self.biases]
delta_weight = [np.zeros(weight.shape) for weight in self.weights]
# get outputs of neural net and calculate loss / cost
outputs, activations = self.feedforward(x)
loss = self.loss(activations[-1], y)
# calculate derivative of loss
delta_loss = activations[-1] - y
delta = delta_loss
delta_bias[-1] = delta
delta_weight[-1] = np.dot(delta, activations[-2].T)
# update gradients of each layer in the network using reverse / negative indexing
for l in range(2, self.num_layers):
output = outputs[-l]
delta_activation = self.sigmoid_prime(output)
delta = np.dot(self.weights[-l + 1].T, delta) * delta_activation
delta_bias[-l] = delta
delta_weight[-l] = np.dot(delta, activations[-l - 1].T)
return loss, delta_bias, delta_weight
# train the network (update weights / biases) with stochastic gradient descent using backpropagation to compute gradients
def train(self, x, y, x_validation, y_validation, learning_rate, epochs, batch_size, app, ui):
batches = x.shape[0] / batch_size
self.losses = []
self.acc1s = []
self.acc2s = []
for e in range(epochs):
batch_gen = self.batch(x, y, batch_size)
for b in range(int(batches)):
batch = batch_gen.__next__()
delta_bias = [np.zeros(bias.shape) for bias in self.biases]
delta_weight = [np.zeros(weight.shape) for weight in self.weights]
# calculate the change (delta) for weights and biases
for batch_x, batch_y in batch:
loss, delta2_bias, delta2_weight = self.backpropagate(batch_x, batch_y)
delta_bias = [db + d2b for db, d2b in zip(delta_bias, delta2_bias)]
delta_weight = [dw + d2w for dw, d2w in zip(delta_weight, delta2_weight)]
# update weights / biases by multiplying the gradients with the ratio of learning rate to batch size
self.weights = [weight - (learning_rate / batch_size) * dw for weight, dw in zip(self.weights, delta_weight)]
self.biases = [bias - (learning_rate / batch_size) * db for bias, db in zip(self.biases, delta_bias)]
# output info to gui
output = "<strong>Epoch {}</strong> - Loss: {:f}".format(e, loss)
ui.netOutput.append(output)
app.processEvents()
print("Epoch {} - Loss: {:f}".format(e, loss))
acc1 = self.validate(x, y)
output = "- Train Accuracy: {:f}".format(acc1)
ui.netOutput.append(output)
acc2 = self.validate(x_validation, y_validation)
output = "- Valid Accuracy: {:f}\n".format(acc2)
ui.netOutput.append(output)
# update gui (training occurs on main thread and hangs gui updates)
app.processEvents()
# epoch results used for visualization
self.losses.append(loss)
self.acc1s.append(acc1)
self.acc2s.append(acc2)
# using validation data, predict the label for input, then compare it with the actual label and calculate accuracy
def validate(self, x, y):
count = 0
for x2, y2 in zip(x, y):
outputs, activations = self.feedforward(x2)
# check if predicted output (output layer neuron with stronger activation) matches the actual label
if np.argmax(activations[-1]) == np.argmax(y2):
count += 1
accuracy = self.accuracy(count, x.shape[0])
print(" - Accuracy: {:f}".format(accuracy))
return accuracy
# predict the label for an unlabeled input image
def predict(self, x):
outputs, activations = self.feedforward(x)
prediction = np.argmax(activations[-1])
# convert activation to string representation of the category
if prediction == 0:
return "apple"
elif prediction == 1:
return "orange"
# generator that yields label and feature lists in batches
@staticmethod
def batch(x, y, batch_size):
for i in range(0, x.shape[0], batch_size):
batch = zip(x[i:i + batch_size], y[i:i + batch_size])
yield batch
# activation function (output between 0 and 1) for neurons
@staticmethod
def sigmoid(x):
return 1.0 / (1.0 + np.exp(-x))
# derivative of sigmoid function
@staticmethod
def sigmoid_prime(x):
s = Network.sigmoid(x)
return s * (1 - s)
# calculate loss by comparing the output with the one hot vector target (using sigmoid cross entropy)
@staticmethod
def loss(prediction, target):
return np.sum(np.nan_to_num(-target * np.log(prediction) - (1 - target) * np.log(1 - prediction)))
# calculate total accuracy
@staticmethod
def accuracy(correct, total):
return (float(correct) / total) * 100
| true |
a35df9d16aad9329d6ea6fe0b9b815731856c19f | Python | kfair/16-311-Lab-3 | /Lab-9/path_planning.py | UTF-8 | 7,287 | 3 | 3 | [] | no_license | import math
from shapely.geometry import LineString
import copy
# Measurements in inches
l1 = 3.75
l2 = 2.5
start = (0, 0) #(t1, t2) degrees
A = (3.75, 2.5) #(x, y) inches
B = (-3.75, 2.5) #(x, y) inches
obstacle = [LineString([(-2.5, 8.5), (2.5, 8.5)]),
LineString([(2.5, 8.5), (2.5, 5)]),
LineString([(2.5, 5), (-2.5, 5)]),
LineString([(-2.5, 5), (-2.5, 8.5)]),
# Workspace boundaries:
LineString([(-7.1, 8.1), (7.1, 8.1)]),
LineString([(7.1, 8.1), (7.1, -0.1)]),
LineString([(7.1, -0.1), (-7.1, -0.1)]),
LineString([(-7.1, -0.1), (-7.1, 8.1)])
]
t1range, t2range = 180, 360;
cspace = [[0 for y in range(0, t2range)] for x in range(0, t1range)]
def t1_to_i(t):
return round(t)
def t2_to_i(t):
return round(t + 180)
def i_to_t1(i):
return i
def i_to_t2(i):
return i - 180
for t1 in range(0, 180):
for t2 in range(-180, 180):
t1r = math.radians(t1)
t2r = math.radians(t2)
x2 = l1 * math.cos(t1r) + l2 * math.cos(t1r + t2r)
y2 = l1 * math.sin(t1r) + l2 * math.sin(t1r + t2r)
x1 = l1 * math.cos(t1r)
y1 = l1 * math.sin(t1r)
ls1 = LineString([(0, 0), (x1, y1)])
ls2 = LineString([(x1, y1), (x2, y2)])
# If either linestring intersects a line in the rectangle, we know the
# point is invalid.
valid = 0
for ol in obstacle:
if ol.crosses(ls1) or ol.crosses(ls2):
valid = 1
cspace[t1_to_i(t1)][t2_to_i(t2)] = valid
def inv_kinematics(p):
t21 = math.acos((p[0] * p[0] + p[1] * p[1] - l1 * l1 - l2 * l2) \
/(2 * l1 * l2))
t22 = -t21
t11 = math.atan2(p[1], p[0]) \
- math.asin(l2 * math.sin(t21)/math.sqrt(p[0]*p[0] + p[1] * p[1]))
t12 = math.atan2(p[1], p[0]) \
- math.asin(l2 * math.sin(t22)/math.sqrt(p[0]*p[0] + p[1] * p[1]))
return [(t11, t21), (t12, t22)]
# t11, t21, t12, t22 are the endpoint coordinates
# st1, st2 are the starting angles.
def pick_better_config(points, start):
[(t11, t21), (t12, t22)] = points
(st1, st2) = start
firstValid = 0 <= t11 and t11 < 180 and cspace[t1_to_i(t11)][t2_to_i(t21)] == 1
secondValid = 0 <= t12 and t12 < 180 and cspace[t1_to_i(t12)][t2_to_i(t22)] == 1
if firstValid and not secondValid:
return (t11, t21)
elif not firstValid and secondValid:
return (t21, t22)
else:
# We'll use the L1 metric wrt the configuration space for closest point.
d1 = abs(t11 - st1) + abs(t21 - st2)
d2 = abs(t12 - st1) + abs(t22 - st2)
if d1 < d2:
return (t11, t21)
else:
return (t12, t22)
print(inv_kinematics(B))
(At1, At2) = pick_better_config(inv_kinematics(A), start)
(Bt1, Bt2) = pick_better_config(inv_kinematics(B), (At1, At2))
(Ct1, Ct2) = pick_better_config(inv_kinematics(A), (Bt1, Bt2))
At1 = math.degrees(At1)
At2 = math.degrees(At2)
Bt1 = math.degrees(Bt1)
Bt2 = math.degrees(Bt2)
Ct1 = math.degrees(Ct1)
Ct2 = math.degrees(Ct2)
def wavefront(startAngles, endAngles):
(st1, st2) = startAngles
(et1, et2) = endAngles
# Get the starting indices
st1i = t1_to_i(st1)
st2i = t2_to_i(st2)
startOnGrid = i_to_t1(st1i) == st1 and i_to_t2(st2i) == st2
et1i = t1_to_i(et1)
et2i = t2_to_i(et2)
endOnGrid = i_to_t1(et1i) == et1 and i_to_t2(et2i) == et2
# Make a copy of the space.
cspace2 = copy.deepcopy(cspace)
cspace2[et1i][et2i] = 2
while cspace2[st1i][st2i] == 0:
for t1 in range(0, len(cspace2)):
for t2 in range(0, len(cspace2[t1])):
v = cspace2[t1][t2]
if v >= 2:
# 4 point connectivity.
if t1 + 1 < t1range and cspace2[t1 + 1][t2] == 0:
cspace2[t1 + 1][t2] = v + 1
if t1 - 1 >= 0 and cspace2[t1 - 1][t2] == 0:
cspace2[t1 - 1][t2] = v + 1
if t2 + 1 < t2range and cspace2[t1][t2 + 1] == 0:
cspace2[t1][t2 + 1] = v + 1
if t2 - 1 >= 0 and cspace2[t1][t2 - 1] == 0:
cspace2[t1][t2 - 1] = v + 1
direction = None
(t1, t2) = (st1i, st2i)
positions = [startAngles]
if not startOnGrid:
positions.append((i_to_t1(st1i), i_to_t2(st2i)))
v = cspace2[t1][t2];
while v > 2:
v = v-1
if not((direction=='t1+1' and t1+1<t1range and cspace2[t1+1][t2]==v) or\
(direction=='t1-1' and t1-1>=0 and cspace2[t1-1][t2]==v) or \
(direction=='t2+1' and t2+1<t2range and cspace2[t1][t2+1]==v) or \
(direction=='t2-1' and t2-1>=0 and cspace2[t1][t2-1]==v)):
# Change directions.
if t1+1<t1range and cspace2[t1+1][t2] == v:
if direction is not None:
positions.append((i_to_t1(t1), i_to_t2(t2)))
direction = 't1+1'
t1 = t1+1
elif t1-1>=0 and cspace2[t1-1][t2] == v:
if direction is not None:
positions.append((i_to_t1(t1), i_to_t2(t2)))
direction = 't1-1'
t1 = t1-1
elif t2+1<t2range and cspace2[t1][t2+1] == v:
if direction is not None:
positions.append((i_to_t1(t1), i_to_t2(t2)))
direction = 't2+1'
t2 = t2+1
elif t2-1>=0 and cspace2[t1][t2-1] == v:
if direction is not None:
positions.append((i_to_t1(t1), i_to_t2(t2)))
direction = 't2-1'
t2 = t2-1
else:
print("Error changing direction")
print(t1, t2)
break
else:
if direction == 't1+1':
t1 = t1 + 1
elif direction == 't1-1':
t1 = t1 - 1
elif direction == 't2+1':
t2 = t2 + 1
elif direction == 't2-1':
t2 = t2 - 1
else:
print("Error going in same direction")
print(direction)
break
positions.append((i_to_t1(et1i), i_to_t2(et2i)))
if not endOnGrid:
positions.append(endAngles)
return positions
moves1 = wavefront(start, (At1, At2))
moves2 = wavefront((At1, At2), (Bt1, Bt2))
moves3 = wavefront((Bt1, Bt2), (Ct1, Ct2))
# Moves 4 gets us back to (0, 0)
moves4 = wavefront((Ct1, Ct2), (0, 0))
for a, b in moves1:
if abs(a) < 0.01:
a = 0
if abs(b) < 0.01:
b = 0
print("move(" + str(a) + ", " + str(b) + ");")
print("wait1Msec(3000);")
print("// At point A")
for a, b in moves2:
if abs(a) < 0.01:
a = 0
if abs(b) < 0.01:
b = 0
print("move(" + str(a) + ", " + str(b) + ");")
print("wait1Msec(3000);")
print("// At point B")
for a, b in moves3:
if abs(a) < 0.01:
a = 0
if abs(b) < 0.01:
b = 0
print("move(" + str(a) + ", " + str(b) + ");")
print("wait1Msec(3000);")
print("// At point A")
for a, b in moves4:
if abs(a) < 0.01:
a = 0
if abs(b) < 0.01:
b = 0
print("move(" + str(a) + ", " + str(b) + ");")
| true |
390af846e70b14e74e31a44d128feb263d257411 | Python | ValerioB88/generators | /utils_generator.py | UTF-8 | 5,875 | 2.875 | 3 | [] | no_license | from enum import Enum
import numpy as np
def get_background_color(background_type):
if background_type == BackGroundColorType.WHITE:
background_color = 254
elif background_type == BackGroundColorType.BLACK:
background_color = 0
elif background_type == BackGroundColorType.GREY:
background_color = 170
elif background_type == BackGroundColorType.RANDOM:
background_color = np.random.randint(0, 254)
return background_color
def get_range_translation(translation_type, size_object_y, size_canvas, size_object_x, jitter=0):
# translation here is always [minX, maxX), [minY, maxY)
# sy = size_object_y / 10
# sx = size_object_x / 10
sx = 0
sy = 0
if translation_type == TranslationType.LEFT:
minX = int(size_object_x / 2 + sx)
maxX = int(size_canvas[0] / 2 - 0) # ((size_object_x / 2) if middle_empty else 0))
minY = int(size_object_y / 2 + sy)
maxY = int(size_canvas[1] - size_object_y / 2 - sy)
elif translation_type == TranslationType.RIGHT:
# we use that magic pixel to make the values exactly the same when right or whole canvas
minX = int(size_canvas[0] / 2 + 0) # ((size_object_x / 2) if middle_empty else 0) - 1)
maxX = int(size_canvas[0] - size_object_x / 2 - sx)
minY = int(size_object_y / 2 + sy)
maxY = int(size_canvas[1] - size_object_y / 2 - sy)
elif translation_type == TranslationType.WHOLE:
minX = int(size_object_x / 2 + sx) + jitter
maxX = int(size_canvas[0] - size_object_x / 2 - sx) - jitter
# np.sum(x_grid < np.array(size_canvas)[0] / 2) == np.sum(x_grid > np.array(size_canvas)[0] / 2)
minY = int(size_object_y / 2 + sy) + jitter
maxY = int(size_canvas[1] - size_object_y / 2 - sy) - jitter
#
elif translation_type == TranslationType.SMALL_AREA_RIGHT:
minX = int(size_canvas[1] / 2 + (size_canvas[1] / 2) * (1 / 3))
maxX = int(size_canvas[1] / 2 + (size_canvas[1] / 2) * (2 / 3)) + 1
minY = int(0 + (size_canvas[0] / 2) * (1 / 3))
maxY = int(0 + (size_canvas[0] / 2) * (2 / 3)) + 1
elif translation_type == TranslationType.VERY_SMALL_AREA_RIGHT:
minX = int(size_canvas[1] / 2 + (size_canvas[1] / 2) * (4 / 10))
maxX = int(size_canvas[1] / 2 + (size_canvas[1] / 2) * (6 / 10)) + 1
minY = int(0 + (size_canvas[0] / 2) * (4 / 10))
maxY = int(0 + (size_canvas[0] / 2) * (6 / 10)) + 1
elif translation_type == TranslationType.ONE_PIXEL:
minX = int(size_canvas[1] * 0.74)
maxX = int(size_canvas[1] * 0.74) + 1
minY = int(size_canvas[0] * 0.25)
maxY = int(size_canvas[0] * 0.25) + 1
elif translation_type == TranslationType.CENTER_ONE_PIXEL:
minX = size_canvas[1] // 2
maxX = size_canvas[1] // 2 + 1
minY = size_canvas[0] // 2
maxY = size_canvas[0] // 2 + 1
elif translation_type == TranslationType.HLINE:
# idx =[i for i in range(20) if size_canvas[0] // 2 + (size_object_x // 2 * i) < size_canvas[0] - (size_object_x // 2 + jitter) + 1][-1]
# minX = size_canvas[0] // 2 - (size_object_x // 2 * idx)
# maxX = size_canvas[0] // 2 + (size_object_x // 2 * idx) + 1
minX = size_object_x // 2
maxX = size_canvas[0] - size_object_x // 2 + 1
minY = size_canvas[1] // 2
maxY = size_canvas[1] // 2 + 1
elif translation_type == TranslationType.LEFTMOST: # add 10 pixels for possible jitter
minX = size_object_x // 2 + jitter
maxX = size_object_x // 2 + 1 + jitter
minY = size_canvas[1] // 2
maxY = size_canvas[1] // 2 + 1
else:
assert False, 'TranslationType not recognised'
return minX, maxX, minY, maxY
def split_canvas_in_n_grids(num_classes, size_canvas, buffer=0):
# buffer is the space left on the borders. Usually equal to the object size / 2
num_grids_each_side = np.sqrt(num_classes)
if num_grids_each_side % 1 != 0:
assert False, 'You need a square number, but sqrt({}) is {}'.format(num_classes, num_grids_each_side)
num_grids_each_side = int(num_grids_each_side)
grid_size = (size_canvas[0] - buffer * 2) // num_grids_each_side
minX_side = np.linspace(buffer, size_canvas[0]-buffer-grid_size, num_grids_each_side)
minY_side = np.linspace(buffer, size_canvas[1]-buffer-grid_size, num_grids_each_side)
maxX_side = minX_side + grid_size
maxY_side = minY_side + grid_size
minX_m, minY_m = np.meshgrid(minX_side, minY_side)
maxX_m, maxY_m = np.meshgrid(maxX_side, maxY_side)
minX, minY = minX_m.flatten(), minY_m.flatten()
maxX, maxY = maxX_m.flatten(), maxY_m.flatten()
grids = []
for i in range(num_classes):
grids.append((int(minX[i]), int(maxX[i]), int(minY[i]), int(maxY[i])))
return grids
def get_translation_values(translation_type, length_face, size_canvas, width_face, grid_size):
minX, maxX, minY, maxY = get_range_translation(translation_type, length_face, size_canvas, width_face)
trX, trY = np.meshgrid(np.arange(minX, maxX, grid_size),
np.arange(minY, maxY, grid_size))
return trX, trY
def random_colour(N, range_col=(0, 254)):
def random_one(N):
return np.random.choice(np.arange(range_col[0], range_col[1]), N, replace=False)
col = np.array([random_one(N), random_one(N), random_one(N)]).reshape((-1, 3))
return col
def numpy_tuple_to_builtin_tuple(nptuple):
return tuple([i.item() for i in nptuple])
class TranslationType(Enum):
LEFT = 0
RIGHT = 1
WHOLE = 2
CUSTOM = 3
SMALL_AREA_RIGHT = 4
VERY_SMALL_AREA_RIGHT = 5
ONE_PIXEL = 6
MULTI = 7
CENTER_ONE_PIXEL = 8
HLINE = 10
LEFTMOST = 11
class BackGroundColorType(Enum):
WHITE = 0
BLACK = 1
GREY = 2
RANDOM = 3 | true |
1baeb5efdae8662c550e503f398bd588c79f0d96 | Python | anilguptafuas/Automotive-Sensor-for-Object-Recognition-using-RedPitaya-and-Raspberry-Pi | /codes/SvmClassifier.py | UTF-8 | 952 | 2.671875 | 3 | [] | no_license | import numpy as np
from numpy import genfromtxt
from sklearn import svm
from sklearn.model_selection import train_test_split
from sklearn import metrics
from joblib import dump, load
data1 = np.array(genfromtxt('FeaturedData/WallFeatures.csv',delimiter=','))
data2 = np.array(genfromtxt('FeaturedData/HumanFeatures.csv',delimiter=','))
data3 = np.array(genfromtxt('FeaturedData/CarFeatures.csv',delimiter=','))
x = np.concatenate((data1,data2,data3))
label1 = np.repeat(-1,data1.shape[0])
label2 = np.repeat(0,data2.shape[0])
label3 = np.repeat(1,data3.shape[0])
y = np.concatenate((label1,label2,label3))
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2)
clf = svm.SVC(kernel='linear')
clf.fit(x_train, y_train)
dump(clf,'SvmClassifierModel.joblib')
clf_loaded = load('SvmClassifierModel.joblib')
y_pred = clf_loaded.predict(x_test)
print("Accuracy:",metrics.accuracy_score(y_test, y_pred)) | true |
84fe1bb0f344ebb8cb71d8bfaed4df5e36e56f31 | Python | vipulhld001/Bag_Detection | /badvsnobag.py | UTF-8 | 1,974 | 2.703125 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
import os
import cv2
DATADIR ="D:\DeepLear\Sendex"
CATEGORIES = ["bag","nbag"]
for category in CATEGORIES:
path = os.path.join(DATADIR, category) #my path to data images
for img in os.listdir(path):
img_array = cv2.imread(os.path.join(path,img), cv2.IMREAD_GRAYSCALE)
IMG_SIZE = 80
new_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))
#plt.imshow(img_array, cmap="gray")
#plt.show()
break
break
#print(img_array) #showing original data for sample
#Shape training to reduce the size of data images
#plt.imshow(new_array, cmap="gray")
#plt.show()
training_data = []
def create_training_data():
for category in CATEGORIES:
path = os.path.join(DATADIR, category) # my path to data images
class_num = CATEGORIES.index(category)
for img in os.listdir(path):
try:
img_array = cv2.imread(os.path.join(path, img), cv2.IMREAD_GRAYSCALE)
new_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))
training_data.append([new_array,class_num])
except Exception as e:
pass
#plt.imshow(img_array, cmap="gray")
#plt.show()
#break
#break
create_training_data()
print(len(training_data))
#Doing Shuffle so that I can provide diffent data at diff time
import random
random.shuffle(training_data)
for sample in training_data[:10]:
print(sample[1])
X = []
y = []
for features, lable in training_data:
X.append(features)
y.append(lable)
X = np.array(X).reshape(-1, 80, 80,1)
#np.save('features.npy',X) #saving
#X=np.load('features.npy') #loading
import pickle
pickle_out = open("X.pickle","wb")
pickle.dump(X,pickle_out)
pickle_out.close()
pickle_out = open("y.pickle", "wb")
pickle.dump(y, pickle_out)
pickle_out.close()
#to load data
pickle_in =open("X.pickle","rb")
X = pickle.load(pickle_in)
X[1] | true |
b4f98c486ef3d727d64ad05cd6ad3b8841ab742c | Python | patrickbuess/srmi | /scraper/moduleScraperPart2.py | UTF-8 | 16,751 | 2.59375 | 3 | [] | no_license | from bs4 import BeautifulSoup
import random
import requests
from fake_useragent import UserAgent
import datetime
import traceback
from scraper.functionScraper import *
from scraper.classListingObject import *
from scraper.classHelpClasses import *
# ####### SECOND PART SCRAPER ######
def scraper2():
uncheckedUrls = True
# INITIALISE DATABASE CONNECTION
urlsToScrape = UrlList(DBOperations("kezenihi_srmidb3"))
while uncheckedUrls is True:
try:
# SET RANDOM PROXY AND FAKE USER AGENT
proxies = get_proxies()
# GET FAKE USERAGENT VIA FAKE_USERAGENT PACKAGE
ua = UserAgent()
headers = ua.random
# TEST PROXY
url = 'https://httpbin.org/ip'
proxyWorks = False
print("GET PROXY")
while proxyWorks is False:
global proxy
print("Request")
proxy = random.choice(proxies)
try:
print("TRY")
response = requests.get(url, proxies={"http": proxy, "https": proxy}, timeout=10)
proxyWorks = True
print(response.json())
except:
# Most free proxies will often get connection errors. You will have retry the entire request using another proxy to work.
# We will just skip retries as its beyond the scope of this tutorial and we are only downloading a single url
print("Skipping. Connnection error")
proxies = {
"http": proxy,
"https": proxy,
}
# GET UNCHECKED URLS
urlsList = urlsToScrape.getUrlsID()
urlsList = random.sample(urlsList, 10)
# STORE URLS IN PROGRESS, SO THEY CAN BE UNCHECKED AT THE END
urlsIDs = [item[0] for item in urlsList]
# CHECK IF THERE ARE UNSCRAPED URLS AVAILABLE
if (len(urlsList) == 0):
uncheckedUrls = False
else:
urlsToScrape.markInProgress(1, urlsIDs)
try:
# SCRAPE
url = urlsList[0]
for url in urlsList:
checkedURL = False
print("--> Check URL: "+url[1])
# CREATE LISTING OBJECT
listing = listingObject(DBOperations("kezenihi_srmidb3"))
listing.listingID = url[0]
listing.address = url[2]
listing.postalCode = url[3]
# c = requests.get(url[1], proxies=proxies, headers={'user-agent': headers}).content
c = None
try:
c = requests.get(url[1], proxies=proxies, headers={'user-agent': headers}, timeout=10).content
except Exception as e:
print("An error occured")
print(e)
if (c is not None):
print("Url checked successfully")
checkedURL = True
soup = BeautifulSoup(c, 'html.parser')
descrCheck = False
try:
listing.description = soup.select('#div_Description')[0].text.strip().replace("\n", "")
descrCheck = True
except:
print("No description available")
# CONVERT ADDRESS VIA GOOGLE GEOLOCATION API
print("RUN GEOLOCATION API")
api_key = "AIzaSyDLpLwScHEHrVpIQOVjSj0RaCOwrkuViNI"
query = (listing.address+", "+str(listing.postalCode)+", Switzerland").replace(" ","+")
try:
googleurl = 'https://maps.googleapis.com/maps/api/geocode/json?address=' + query + '&lang=de&key=' + api_key
result = requests.get(googleurl)
data = result.json()
location = data['results'][0]
listing.latitude = location['geometry']['location']['lat']
listing.longitude = location['geometry']['location']['lng']
except:
print("Google API was not successful")
# GET ATTRIBUTES GRID INFOS
listingAvailable = False
try:
infos = get_list_content_pairs(soup)
listingAvailable = True
except:
print("Listing is not available anymore.")
if (listingAvailable is True):
for info in infos:
type = info.find('dt').string.strip()
if (type == "Property type"):
listing.category = info.find('dd').string.strip()
if (type == "Rent per month"):
result = info.find('dd').string.strip()
if(result != "On request"):
substitutions = {"CHF ": "", ",": ""}
listing.price = replaceMultiple(result, substitutions)
if (type == "Rent per day"):
result = info.find('dd').string.strip()
if(result != "On request"):
substitutions = {"CHF ": "", ",": ""}
listing.pricePerDay = replaceMultiple(result, substitutions)
if (type == "Rent per week"):
result = info.find('dd').string.strip()
if(result != "On request"):
substitutions = {"CHF ": "", ",": ""}
listing.pricePerWeek = replaceMultiple(result, substitutions)
if (type == "Annual rent per m²"):
result = info.find('dd').string.strip()
if(result != "On request"):
substitutions = {"CHF ": "", ",": ""}
listing.pricePerYear = replaceMultiple(result, substitutions)
if (type == "Rent per month (without charges)"):
result = info.find('dd').string.strip()
if(result != "On request"):
substitutions = {"CHF ": "", ",": ""}
listing.primaryCosts = replaceMultiple(result, substitutions)
if (type == "Supplementary charges"):
result = info.find('dd').string.strip()
if(result != "On request"):
substitutions = {"CHF ": "", ",": ""}
listing.additionalCosts = replaceMultiple(result, substitutions)
if (type == "Living space"):
result = info.find('dd').string.strip().replace(" m²", "")
if(result != "On request"):
listing.size = result
if (type == "Floor space"):
result = info.find('dd').string.strip().replace(" m²", "")
if(result != "On request"):
listing.floorSpace = result
if (type == "Property area"):
result = info.find('dd').string.strip().replace(" m²", "")
if(result != "On request"):
listing.propertyArea = result
if (type == "Rooms"):
listing.rooms = info.find('dd').string.strip().replace("½", ".5")
if (type == "Floor"):
result = info.find('dd').string.strip().replace(". floor", "")
if (result == "Ground floor"):
listing.floor = 0
elif (result == "Basement"):
listing.floor = -1
else:
listing.floor = result
if (type == "Available"):
listing.available = info.find('dd').string.strip()
if (type == "Year of construction"):
listing.construction = info.find('dd').string.strip()
if (type == "Lift"):
listing.elevator = 1
if (type == "Balcony/ies"):
listing.balconies = 1
if (type == "Motorway"):
listing.motorway = info.find('dd').string.strip().replace(" m", "")
if (type == "Shops"):
listing.shops = info.find('dd').string.strip().replace(" m", "")
if (type == "Public transport stop"):
listing.publicTransport = info.find('dd').string.strip().replace(" m", "")
if (type == "Kindergarten"):
listing.kindergarten = info.find('dd').string.strip().replace(" m", "")
if (type == "Primary school"):
listing.primarySchool = info.find('dd').string.strip().replace(" m", "")
if (type == "Secondary school"):
listing.secondarySchool = info.find('dd').string.strip().replace(" m", "")
if (type == "Minergie certified"):
listing.minergie = 1
if (type == "Pets allowed"):
listing.pets = 1
if (type == "Child-friendly"):
listing.childFriendly = 1
if (type == "Cable TV"):
listing.cableTV = 1
if (type == "New building"):
listing.newBuilding = 1
if (type == "Wheelchair accessible"):
listing.wheelchair = 1
if (type == "Outdoor parking"):
listing.parkingOutdoor = 1
if (type == "Indoor parking"):
listing.parkingIndoor = 1
if (type == "Veranda"):
listing.veranda = 1
if (type == "Swimming pool"):
listing.pool = 1
# CREATE LIST WITH ONLY FILLED OUT VALUES, MAKES IT EASIER TO SEND PER SQL
insertDetails = []
insertDetailsKeys = []
for k, v in vars(listing).items():
if (k in ["listingID", "category", "postalCode", "address", "latitude", "longitude", "price", "pricePerDay", "pricePerWeek", "pricePerYear", "primaryCosts", "additionalCosts", "size", "floorSpace", "propertyArea", "rooms", "floor", "available", "construction"]):
if (v is not None):
insertDetailsKeys.append(k)
insertDetails.append(v)
insertDetailsKeys = str(tuple(insertDetailsKeys)).replace("'", "")
insertDistances = []
insertDistancesKeys = []
for k, v in vars(listing).items():
if (k in ["listingID", "motorway", "shops", "publicTransport", "kindergarten", "primarySchool", "secondarySchool"]):
if (v is not None):
insertDistancesKeys.append(k)
insertDistances.append(v)
len(insertDistancesKeys)
if (len(insertDistancesKeys) > 1):
insertDistancesKeys = str(tuple(insertDistancesKeys)).replace("'", "")
# INSERT listingDetails
listing.insertInfos(table="listingDetails",
columns=str(insertDetailsKeys),
listings=[tuple(insertDetails)])
# INSERT listingDistances
if (len(insertDistancesKeys) > 1):
listing.insertInfos(table="listingDistances",
columns=str(insertDistancesKeys),
listings=[tuple(insertDistances)])
# CHECK IF ATTRIBUTES ARE AVAILABLE
attributesAvailable = sum([listing.elevator, listing.balconies, listing.minergie, listing.pets, listing.childFriendly, listing.cableTV, listing.newBuilding, listing.wheelchair, listing.parkingOutdoor, listing.parkingIndoor, listing.veranda, listing.pool])
if (attributesAvailable > 0):
# INSERT listingAttributes
listing.insertInfos(table="listingAttributes",
columns="(listingID, elevator, balconies, minergie, pets, childFriendly, cableTV, newBuilding, wheelchair, parkingIndoor, parkingOutdoor, veranda, pool)",
listings=[(listing.listingID, listing.elevator, listing.balconies, listing.minergie, listing.pets, listing.childFriendly, listing.cableTV, listing.newBuilding, listing.wheelchair, listing.parkingOutdoor, listing.parkingIndoor, listing.veranda, listing.pool)])
# INSERT listingDescription
if(descrCheck is True):
listing.insertInfos(table="listingDescription",
columns="(listingID, description)",
listings=[(listing.listingID, listing.description)])
# UPDATE LISTING URL
if (checkedURL is True):
d = datetime.datetime.today()
urlsToScrape.updateUrl(date=d.strftime('%Y-%m-%d'), id=listing.listingID)
except Exception as e:
print("THIS URL HAD AN ERROR")
print(traceback.format_exc())
print(e)
urlsToScrape.markInProgress(0, urlsIDs)
print("\n\n")
except Exception as e:
print("SCRAPING ERROR \n\n")
print(traceback.format_exc())
print(e)
| true |
422c106557b96738219aea33bdf6879f20f07199 | Python | mathman93/RoombaCI-Clemson | /Python_Files/Roomba_DHTurn_Test.py | UTF-8 | 4,817 | 2.9375 | 3 | [] | no_license | ''' Roomba_DHTurn_Test.py
Purpose: Test DHTurn() function using IMU magnetometer data
IMPORTANT: Must be run using Python 3 (python3)
Last Modified: 6/6/2019
'''
## Import libraries ##
import serial
import time
import RPi.GPIO as GPIO
import RoombaCI_lib # Make sure this file is in the same directory
from RoombaCI_lib import DHTurn
## Variables and Constants ##
global Xbee # Specifies connection to Xbee
Xbee = serial.Serial('/dev/ttyUSB0', 115200) # Baud rate should be 57600
# LED pin numbers
yled = 5
rled = 6
gled = 13
# Timing Counter Parameters
data_timer = 0.2
reset_timer = 10
# DH_Turn Parameters
epsilon = 1.0 # (Ideally) smallest resolution of magnetometer
data_counter = 0 # Data number counter
## Functions and Definitions ##
''' Displays current date and time to the screen
'''
def DisplayDateTime():
# Month day, Year, Hour:Minute:Seconds
date_time = time.strftime("%B %d, %Y, %H:%M:%S", time.gmtime())
print("Program run: ", date_time)
## -- Code Starts Here -- ##
# Setup Code #
GPIO.setmode(GPIO.BCM) # Use BCM pin numbering for GPIO
DisplayDateTime() # Display current date and time
# LED Pin setup
GPIO.setup(yled, GPIO.OUT, initial=GPIO.LOW)
GPIO.setup(rled, GPIO.OUT, initial=GPIO.LOW)
GPIO.setup(gled, GPIO.OUT, initial=GPIO.LOW)
# Wake Up Roomba Sequence
GPIO.output(gled, GPIO.HIGH) # Turn on green LED to say we are alive
print(" Starting ROOMBA... ")
Roomba = RoombaCI_lib.Create_2("/dev/ttyS0", 115200)
Roomba.ddPin = 23
GPIO.setup(Roomba.ddPin, GPIO.OUT, initial=GPIO.LOW)
Roomba.WakeUp(131) # Start up Roomba in Safe Mode
# 131 = Safe Mode; 132 = Full Mode (Be ready to catch it!)
Roomba.BlinkCleanLight() # Blink the Clean light on Roomba
if Roomba.Available() > 0: # If anything is in the Roomba receive buffer
x = Roomba.DirectRead(Roomba.Available()) # Clear out Roomba boot-up info
#print(x) # Include for debugging
print(" ROOMBA Setup Complete")
GPIO.output(yled, GPIO.HIGH) # Indicate within setup sequence
# Initialize IMU
print(" Starting IMU...")
imu = RoombaCI_lib.LSM9DS1_I2C() # Initialize IMU
time.sleep(0.1)
# Clear out first reading from all sensors
x = imu.magnetic
x = imu.acceleration
x = imu.gyro
# Calibrate IMU
print(" Calibrating IMU...")
Roomba.Move(0,75) # Start Roomba spinning
imu.CalibrateMag() # Calculate magnetometer offset values
Roomba.Move(0,0) # Stop Roomba spinning
time.sleep(0.5)
imu.CalibrateGyro() # Calculate gyroscope offset values
# Display offset values
print("mx_offset = {:f}; my_offset = {:f}; mz_offset = {:f}"\
.format(imu.m_offset[0], imu.m_offset[1], imu.m_offset[2]))
print("gx_offset = {:f}; gy_offset = {:f}; gz_offset = {:f}"\
.format(imu.g_offset[0], imu.g_offset[1], imu.g_offset[2]))
print(" IMU Setup Complete")
time.sleep(3) # Gives time to read offset values before continuing
GPIO.output(yled, GPIO.LOW) # Indicate setup sequence is complete
if Xbee.inWaiting() > 0: # If anything is in the Xbee receive buffer
x = Xbee.read(Xbee.inWaiting()).decode() # Clear out Xbee input buffer
#print(x) # Include for debugging
GPIO.output(yled, GPIO.LOW) # Indicate setup sequence complete
# Main Code #
angle = imu.CalculateHeading() # Get initial heading information
forward = 0
desired_heading = 0
data_base = time.time()
reset_base = time.time()
while True:
try:
# Update heading of Roomba
angle = imu.CalculateHeading()
spin = DHTurn(angle, desired_heading, epsilon) # Value needed to turn to desired heading point
Roomba.Move(forward, spin) # Move Roomba to desired heading point
if spin == 0:
GPIO.output(yled, GPIO.LOW) # Indicate Roomba is not turning
else:
GPIO.output(yled, GPIO.HIGH) # Indicate Roomba is turning
if (time.time() - reset_base) > reset_timer:
desired_heading += 90
if desired_heading >= 360:
desired_heading -= 360
reset_base += reset_timer
# Print heading data to monitor every second
if (time.time() - data_base) > data_timer: # After one second
[mx,my,mz] = imu.magnetic # Read magnetometer component values
angle = imu.CalculateHeading() # Calculate heading
# Note: angle may not correspond to mx, my, mz
#[ax,ay,az] = imu.acceleration # Read accelerometer component values
#[gx,gy,gz] = imu.gyro # Read gyroscope component values
print("{0:.4f}, {1:.4f}, {2:.5f}, {3:.5f}, {4:.5f}".format(angle,desired_heading,mx,my,mz))
data_base += data_timer
except KeyboardInterrupt:
print('') # print new line
break # exit while loop
## -- Ending Code Starts Here -- ##
# Make sure this code runs to end the program cleanly
Roomba.Move(0,0) # Stop Roomba movement
#Roomba.PlaySMB()
GPIO.output(gled, GPIO.LOW) # Turn off green LED
GPIO.output(yled, GPIO.LOW) # Turn off yellow LED
Roomba.ShutDown() # Shutdown Roomba serial connection
Xbee.close()
GPIO.cleanup() # Reset GPIO pins for next program
| true |
5e994fbcda9fc7f6dad0b1f9f6c3978138abcd2e | Python | h3nok/MLIntro | /Notebooks/interop/test_executable.py | UTF-8 | 785 | 2.625 | 3 | [] | no_license | from unittest import TestCase
from interop.executable import Executable
class TestExecutable(TestCase):
def test_run(self):
# exe = Executable('echo', shell_cmd=True)
# exe.run(args=['test', 'test2'])
# ret = exe.pipe_output(block=True)[1]
#
# assert 'test' in ret
# assert 'test2' in ret
exe = Executable('echo', shell_cmd=True)
exe.run(args=['test3', 'test4'], block=True)
# ret = exe.pipe_output()[1]
# assert 'test3' in ret
# assert 'test4' in ret
# t0 = time.time()
# exe = Executable('timeout')
# exe.run(args=['2'], block=True)
# test = exe.pipe_output()
#
# assert time.time()-t0 > 1.5
def test_pipe_output(self):
pass
| true |
c0024fda9f2bcaa663c03acba1aa82ff28e9c725 | Python | mikejune/Chew | /server/food/utils.py | UTF-8 | 1,694 | 2.765625 | 3 | [] | no_license | import math
import requests
import os
from food.models import Image
def get_distance_from_long_lat_in_miles(lat1, long1, lat2, long2):
degrees_to_radians = math.pi/180.0
phi1 = (90.0 - lat1)*degrees_to_radians
phi2 = (90.0 - lat2)*degrees_to_radians
theta1 = long1*degrees_to_radians
theta2 = long2*degrees_to_radians
cos_arc = (math.sin(phi1)*math.sin(phi2)*math.cos(theta1 - theta2) +
math.cos(phi1)*math.cos(phi2))
return math.acos(cos_arc) * 3960
def prepare_food(request, foods):
location_lat = None
location_lng = None
coords = request.query_params.get('coords')
location = request.query_params.get('location')
if coords is not None:
coords = coords.split(',')
location_lat, location_lng = coords[0], coords[1]
if coords is None and location is not None:
google_r = requests.get('https://maps.googleapis.com/maps/api/geocode/json?', params={'address': location, 'key': os.getenv('GOOGLE_MAPS_API_KEY')})
if google_r.status_code is 200 and google_r.json():
google_location = google_r.json()['results'][0]['geometry']['location']
location_lat, location_lng = google_location['lat'], google_location['lng']
for food in foods:
food.preview_image = Image.objects.filter(food=food).order_by('votes').first() or Image.objects.get(id=1)
if location_lat is not None and location_lng is not None:
food.distance = get_distance_from_long_lat_in_miles(float(food.restaurant.address.latitude), float(food.restaurant.address.longitude), float(location_lat), float(location_lng))
if location_lat is not None and location_lng is not None:
foods = sorted(foods, key=lambda food: food.distance)
return foods
| true |
919e1f8a4b021d75496f3bcff369261a09362a65 | Python | pavponn/optimization-methods | /lab1/src/grad/grad_descent.py | UTF-8 | 1,961 | 2.984375 | 3 | [
"MIT"
] | permissive | from typing import Callable, List, Optional
import numpy as np
import lab1.src.grad.grad_step_strategy as st
import lab1.src.grad.stop_criteria as sc
DEFAULT_EPSILON = 1e-9
DEFAULT_MAX_ITERATIONS = 1e5
def gradient_descent(f: Callable[[np.ndarray], float],
f_grad: Callable[[np.ndarray], np.ndarray],
start: np.ndarray,
step_strategy: st.StepStrategy,
stop_criteria: sc.StopCriteria,
eps_strategy: float = DEFAULT_EPSILON,
eps_stop_criteria: float = DEFAULT_EPSILON,
max_iterations_strategy=DEFAULT_MAX_ITERATIONS,
max_iterations_criteria=DEFAULT_MAX_ITERATIONS,
trajectory: Optional[List] = None):
strategy = st.get_step_strategy(step_strategy, f, f_grad, eps_strategy, max_iterations_strategy)
criteria = sc.get_stop_criteria(stop_criteria, f, f_grad, eps_stop_criteria, max_iterations_criteria)
cur_x = start
iters = 0
if trajectory is not None:
trajectory.append(cur_x)
while True:
iters += 1
cur_grad = f_grad(cur_x)
step = strategy.next_step(cur_x)
next_x = cur_x - step * cur_grad
if criteria.should_stop(cur_x, next_x):
return cur_x, iters
cur_x = next_x
if trajectory is not None:
trajectory.append(cur_x)
if iters == max_iterations_criteria:
return cur_x, iters
if __name__ == '__main__':
def foo(p):
return p[0] ** 2 + p[1] ** 2
def foo_grad(p):
x, y = p[0], p[1]
return np.array([2 * x, 2 * y])
res, _ = gradient_descent(foo,
foo_grad,
start=np.array([3, 4]),
step_strategy=st.StepStrategy.DIVIDE_STEP,
stop_criteria=sc.StopCriteria.BY_GRAD)
print(res)
| true |
eb32aae1c7d9cc2bd1c0019dc4c24d73e3b3fee1 | Python | piotrpatrzylas/Repl.it | /POP1 Part-time/Session 1 Problem 13: Leap year.py | UTF-8 | 614 | 4.5 | 4 | [] | no_license | """
Given the year number. You need to check if this year is a leap year. If it is, print LEAP, otherwise print COMMON.
The rules in Gregorian calendar are as follows:
a year is a leap year if its number is exactly divisible by 4 and is not exactly divisible by 100
a year is always a leap year if its number is exactly divisible by 400
Warning. The words LEAP and COMMON should be printed all caps.
For example, on input
2000
output must be
LEAP
"""
year = int(input())
if year % 400 == 0:
print ("LEAP")
else:
if year % 4 == 0 and year % 100 != 0:
print("LEAP")
else:
print("COMMON")
| true |
43e22fd03e7b52ea60a83ca5bed9b82afb285ac0 | Python | Aasthaengg/IBMdataset | /Python_codes/p04001/s514570777.py | UTF-8 | 189 | 2.75 | 3 | [] | no_license | s=list(input())
n=len(s)
ans=0
for i in range(2**(n-1)):
ball=s[0]
for j in range(n-1):
if (i>>j)&1:
ball+="+"+s[j+1]
else:
ball+=s[j+1]
ans+=eval(ball)
print(ans) | true |
ede028b7a1b63728c52988333c47411b702b57f0 | Python | Newcomer03/Crypto | /One_Time_Pad/OTP.py | UTF-8 | 1,835 | 3.6875 | 4 | [] | no_license | """
Q1: Encryption and Decryption using One Time Pad cipher.
"""
import math, random
alphabet = list("abcdefghijklmnopqrstuvwxyz")
def read_input():
"""
Reading input from files
"""
tx = []
with open("1_input.txt", "r") as file:
data = file.readlines()
for line in data:
tx.append(line.strip())
return tx
def genKey(n):
out = ""
for i in range(n):
out += alphabet[math.floor(random.randint(0, 25))]
return out
def encrypt(st, key):
nText = []
kText = []
for i in range(len(st)):
nText.append(alphabet.index(st[i].lower()))
kText.append(alphabet.index(key[i].lower()))
out = ""
for i in range(len(nText)):
out += alphabet[(nText[i] + kText[i]) % 26]
return out
def write_output(x, y, key):
"""
Output to a file
"""
f = open("1_output.txt", "w")
print('\n#====== OTP Cipher ======#', file=f)
print('Text: ' + x, file=f)
print('Key: ', key, file=f)
print('Encrypted: ' + y, file=f)
print('Decrypted: ' + x, file=f)
print('#----------------------------#\n',file=f)
f.close()
print('\n#====== OTP Cipher ======#')
print('Text: ' + x)
print('Key: ', key)
print('Encrypted: ' + y)
print('Decrypted: ' + x)
print('#----------------------------#\n')
def decrypt(st, key):
nText = []
kText = []
for i in range(len(st)):
nText.append(alphabet.index(st[i].lower()))
kText.append(alphabet.index(key[i].lower()))
out = ""
for i in range(len(nText)):
op = (nText[i] - kText[i])
if op < 0:
x = 26 + op
else:
x = op % 26
out += alphabet[x]
return out
#_________ Main
inp = read_input()[0]
key = genKey(len(inp))
ecpt = encrypt(inp,key)
dcpt = decrypt(ecpt,key)
write_output(inp, ecpt, key) | true |
c58ac75d2e492047134cbdb256b99a0b0d16add6 | Python | JacobWhite-dev/bespoke-fractals | /visualiser.py | UTF-8 | 10,415 | 3.578125 | 4 | [] | no_license | """
Visualiser module for visualising latent spaces using dimensionality
reduction.
Created on Wed Jan 29 2020
@author: uqjwhi35
"""
import math
import numpy as np
import pandas as pd
import sklearn.manifold
import metrics
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
class Visualiser():
'''
Class for the visualising latent spaces using dimensionality reduction.
To do this data, labels and a reducer must be provided.
Data:
Data are the points in the latent space we want to visualise. They should
be given as a pandas DataFrame with each point occupying its own row, and
each column representing a specific dimension in the latent space, e.g.
____ | dim0 | dim1 | dim2 | ... | dimN
pt 0 | 0.11 | 1.34 | 1.24 | ... | 4.55
pt 1 | 5.67 | 4.55 | 0.00 | ... | 6.77
... | ... | ... | ... | ... | ...
pt N | 2.56 | 1.11 | 6.33 | ... | 1.23
The actual headers provided for each column are not important, but it
never hurts to label your data.
Labels:
Labels are what we will use to colour each point in our visualisation.
They normally are used to distinguish between input classes, e.g. digits
in the MNIST digits dataset. A separate sub-plot will be created for each
type of label supplied. Labels should also be provided in the form of
a pandas DataFrame, with each point occupying its own row, and each column
representing the value of a given label, e.g.
____ | height | weight | sex | ... | GPA
pt 0 | 0.11 | 1.34 | M | ... | 4.55
pt 1 | 5.67 | 4.55 | F | ... | 6.77
... | ... | ... | ... | ... | ...
pt N | 2.56 | 1.11 | F | ... | 1.23
Note that unlike in the data DataFrame, the column headers do matter
as they are used to generate the titles for any plots. Note also that the
order of rows should match that of the data DataFrame. Finally, labels may
be numerical or strings (as in the sex column above).
Reducer:
The reducer is the object that will be used to perform the dimensionality
reduction on our data. This can be any sklearn pipeline, UMAP object or
many more! If it has a fit_transform method that takes in data and spits
out data, then it will work. Useful techniques can be found in the
sklearn.manifold module. My recommended reducers can be generated using the
helper functions in this module.
Simple Use:
Once you have initialised your Visualiser with data, labels and a reducer,
calling the visualise method will reduce your data and plot it in the
correct number of dimensions (1, 2 or 3) for you. It's as easy as that!
Less Simple Use (But Still Pretty Simple):
This class also has some other methods that can be called, and the comments
on each describe what they do.
'''
def __init__(self, data, labels, reducer):
self._labels = None
self._data = None
self._result = None
self.set_data(data)
self.set_labels(labels)
self.set_reducer(reducer)
def set_data(self, data):
# Check if data are dataframe
if not isinstance(data, pd.DataFrame):
print("Data have not been provided as a pandas DataFrame",
"and will be cast to one. Errors may occur.")
data = pd.DataFrame(data = data)
self._data = data
if not self.data_consistent():
print("The data and labels DataFrames are different sizes.")
def get_data(self):
return self._data
def set_labels(self, labels):
# Check if labels are dataframe
if not isinstance(labels, pd.DataFrame):
# Convert to dataframe with default titles
print("Labels have not been provided as a pandas DataFrame",
"and will be cast to one. Default label names will be",
"generated and errors may occur.")
labels = pd.DataFrame(data = labels)
self._labels = labels
if not self.data_consistent():
print("The data and labels DataFrames are different sizes.")
def get_labels(self):
return self._labels
def set_reducer(self, reducer):
# Check that the reducer has the required methods
fit = getattr(reducer, "fit", None)
if not callable(fit):
print("Reducer does not possess a fit method")
fit_transform = getattr(reducer, "fit_transform", None)
if not callable(fit_transform):
print("Reducer does not possess a fit_transform method")
transform = getattr(reducer, "transform", None)
if not callable(transform):
print("Reducer does not possess a transform method")
self._reducer = reducer
def get_reducer(self):
return self._reducer
def set_result(self, result):
self._result = result
def get_result(self):
return self._result
def data_consistent(self):
if self._labels is None or self._data is None:
return True
# Check that labels and data have same number of points
numLabels, _ = self._labels.shape
numDataPts, _ = self._data.shape
return numLabels == numDataPts
def fit(self):
self._reducer.fit(self._data)
def transform(self):
self._result = self._reducer.transform(self._data)
def fit_transform(self):
self._result = self._reducer.fit_transform(self._data)
# def process_labels(self):
# is_numeric = True
# try:
# labels = labels.astype(float)
# except:
# is_numeric = False
# if is_numeric:
# #print("Numeric")
# max_label = np.amax(labels)
# min_label = np.amin(labels)
# delta_labels = max_label - min_label + 1
# c = self._labels.iloc[:, index].astype(np.float32)
# boundaries = np.arange(delta_labels + 1) - 0.5
# ticks = np.arange(delta_labels)
# else:
# #print("Non-numeric")
# unique = labels.unique()
# c = np.array([int((unique == label)[0]) for label in labels])
# boundaries = np.arange(unique.size + 1) - 0.5
# ticks = np.arange(unique.size + 1)
def __plot_result_1d(self, fig, rows, cols, index):
points = np.size(self._result)
ax = fig.add_subplot(rows, cols, index + 1)
ax.scatter(self._result, np.zeros((points, )), c= self._labels[index, :], cmap = 'Spectral', s = 5)
def __plot_result_2d(self, fig, rows, cols, index):
# Get labels for plot
labels = self._labels.iloc[:, index]
is_numeric = True
try:
labels = labels.astype(float)
except:
is_numeric = False
if is_numeric:
print("Numeric")
max_label = np.amax(labels)
min_label = np.amin(labels)
delta_labels = max_label - min_label + 1
c = self._labels.iloc[:, index].astype(np.float32)
boundaries = np.arange(delta_labels + 1) - 0.5
ticks = np.arange(delta_labels)
else:
print("Non-numeric")
unique = labels.unique()
c = np.array([int((unique == label)[0]) for label in labels])
boundaries = np.arange(unique.size + 1) - 0.5
ticks = np.arange(unique.size + 1)
colourBarOn = True
ax = fig.add_subplot(rows, cols, index + 1)
im = ax.scatter(self._result[:, 0], self._result[:, 1], c= c, cmap = 'Spectral', s = 5)
if colourBarOn:
cbar = fig.colorbar(im, boundaries = boundaries, ax = ax)
cbar.set_ticks(ticks)
if not is_numeric:
cbar.set_ticklabels(unique)
ax.set_title(self._labels.columns[index])
plt.gca().set_aspect('equal', 'datalim')
def __plot_result_3d(self, fig, rows, cols, index):
labels = self._labels.iloc[:, index]
# Handle non-numeric and numeric labels
#if issubclass(labels.dtype.type, int):
is_numeric = True
try:
labels = labels.astype(float)
except:
is_numeric = False
if is_numeric:
print("Numeric")
max_label = np.amax(labels)
min_label = np.amin(labels)
delta_labels = max_label - min_label + 1
c = self._labels.iloc[:, index].astype(np.float32)
boundaries = np.arange(delta_labels + 1) - 0.5
ticks = np.arange(delta_labels)
else:
print("Non-numeric")
unique = labels.unique()
c = np.array([int((unique == label)[0]) for label in labels])
boundaries = np.arange(unique.size + 1) - 0.5
ticks = np.arange(unique.size + 1)
colourBarOn = True
#colourBarOn = True if delta_labels <= 30 else False
ax = fig.add_subplot(rows, cols, index + 1, projection='3d')
im = ax.scatter(self._result[:, 0], self._result[:, 1], self._result[:, 2], c = c, cmap = 'Spectral', s = 5)
if colourBarOn:
cbar = fig.colorbar(im, boundaries = boundaries, ax = ax)
cbar.set_ticks(ticks)
if not is_numeric:
cbar.set_ticklabels(unique)
ax.set_title(self._labels.columns[index])
def __invalid_dimensions(self, *argv, **kwargs):
print("Data cannot be plotted.")
def plot_result(self):
# Check dimension of result and plot accordingly
# Make sure to account for subplots based on dimensions of labels
if self._result is None:
print("No result has been calculated.")
return
numPlots = 1 if self._labels.ndim == 1 else self._labels.shape[1]
rows = math.ceil(math.sqrt(numPlots))
cols = math.ceil(numPlots / rows)
dim = self._result.shape[1]
plot_funcs = {1: self.__plot_result_1d, 2: self.__plot_result_2d, 3: self.__plot_result_3d}
plot_func = plot_funcs.get(dim, self.__invalid_dimensions)
fig = plt.figure()
for i in range(numPlots):
plot_func(fig, rows, cols, i)
plt.show()
return fig
def visualise(self):
self.fit_transform()
self.plot_result()
| true |
26ada62107634f8b13b095f983811c34c52e0d4f | Python | Romalimon03/practicas-lb-pc | /practica8/envio_correo.py | UTF-8 | 3,914 | 2.84375 | 3 | [] | no_license | # Romario Guadalupe Limón Hernández
# Librerias necesarias
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import smtplib, ssl
import json
import argparse
import sys
# Creamos los parametros
parser = argparse.ArgumentParser(description="Envio de correos con diferentes funcionalidades")
parser.add_argument("--env", "-e", help="Enviar mensaje", action="store_true")
parser.add_argument("--res", "-r", help="Correr el script (informa si hay un error)", action="store_true")
parser.add_argument("--var", "-v", help="Variable para opcional", default=0)
parser.add_argument("--usr", "-u", help="Usuario o remitente", type=str)
parser.add_argument("--des", "-d", help="Correo del destinatario", type=str, default="enviocorreos0prueba@gmail.com")
parser.add_argument("--pws", "-p", help="Password del usuario o remitente", type=str)
def envio_correo():
# debemos crear un archivo formato json
"""
{
"usr":"correo@algo.obligatorio
"pws":"contraseña_ultra_duper_secreta"
}
"""
with open("datos.json") as f:
data = json.load(f)
# Creacion del cuerpo del mensaje
msg_enviar = MIMEMultipart("alternative")
msg_enviar["From"] = data["usr"]
receipents = ["enviocorreos0prueba@gmail.com"]
msg_enviar["To"] = ", ".join(receipents)
msg_enviar["Subject"] = "Mensaje de prueba"
# Este sera el mensaje con estructura html
html_msg = f"""
<html>
<body>
Hola como te mando este mensaje <i>{data["usr"]}</i><br>
Si te llego significa que si jalo todo el procedimiento
</body>
</html>
"""
# Especificamos el formato del texto y lo identificamos con nueva variable
msg_html = MIMEText(html_msg, "html")
# Adjuntamos el mensaje (msg_html) al curpo del correo
msg_enviar.attach(msg_html)
# ************** Parte de enviar el correo *************
# Se crea un contexto
context = ssl.create_default_context()
# Levantamos el servidor
with smtplib.SMTP_SSL("smtp.gmail.com", 465, context=context) as server:
server.login(data["usr"], data["pws"])
print("se inicio session")
server.sendmail(msg_enviar["From"], msg_enviar["To"], msg_enviar.as_string())
print("Se envio el mensaje")
def correo_resultado(x, usuario, password, destinatario):
try:
return 1/x
except (ValueError,TypeError,IndexError,ZeroDivisionError):
if sys.exc_info()[0] == ValueError:
mensaje = "de ValueError"
elif sys.exc_info()[0] == TypeError:
mensaje = "de TypeError"
elif sys.exc_info()[0] == IndexError:
mensaje = "de IndexError"
elif sys.exc_info()[0] == ZeroDivisionError:
mensaje = "de ZeroDivisionError"
else:
mensaje = "INDEFINIDO"
# Creamos el cuerpo del correo en caso de que salga todo bien
msg_enviar = MIMEMultipart("alternative")
msg_enviar["From"] = usuario
msg_enviar["To"] = destinatario
msg_enviar["Subject"] = "Error en la ejecucion"
html = f"""
<html>
<body>
Hola mi estimado <i>{usuario}</i><br>
La divicion fue <b>Fallida</b><br>
El error fue <b>{mensaje}</b>
</body>
</html>
"""
html_msg = MIMEText(html, "html")
msg_enviar.attach(html_msg)
context=ssl.create_default_context()
with smtplib.SMTP_SSL("smtp.gmail.com", 465, context=context) as server:
server.login(usuario, password)
print("Se inicio session")
server.sendmail(msg_enviar["From"], msg_enviar["To"], msg_enviar.as_string())
print("Se envio el correo")
if __name__=='__main__':
args = parser.parse_args()
if args.env:
envio_correo()
elif args.res:
correo_resultado(args.var, args.usr, args.pws, args.des)
else:
print("Intenta con -h")
| true |
767a2a9d4d5aa1558806c98ee710556b53f10d54 | Python | MUSKANJASSAL/PythonTraining2019 | /Session32A.py | UTF-8 | 725 | 3.359375 | 3 | [] | no_license | from sklearn.cluster import KMeans
# Representation of Data
data = [
[100, 110],
[120, 150],
[150, 200],
[180, 220],
[1000, 800],
[1200, 1200],
[1500, 1400],
[2000, 1800]
]
# Lets do not keep any labels for our data
# and k means clustering should do the labeling for us
# labels = [0, 0, 0, 0, 1, 1, 1, 1]
clusture = 2
# Model Creation
model = KMeans(n_clusters=clusture)
# Model Training
# We are not mentioning labels :)
model.fit(data)
# predictedClass = model.predict(data)
# print(predictedClass)
sampleInput = [800, 1000]
predictedClass = model.predict([sampleInput])
print(predictedClass) | true |
95ab62e93f9257945f059e4a66ddc96846e26d8c | Python | dhineshns/reinvent_the_wheel | /algorithms/arrays.py | UTF-8 | 164 | 3.078125 | 3 | [] | no_license | list_apples = ["malgudi", "kansas", "kashmir"];
i = 0;
while (i<len(list_apples)):
print (list_apples[i]);
i=i+1;
for item in list_apples:
print (item); | true |
fe6e5e8bff5e1f8ecb440891a7df64861dca57b8 | Python | krishns18/Business_Analytics_Projects | /Open_Domain_QA_Chatbot/checkStemmer.py | UTF-8 | 1,372 | 3.203125 | 3 | [] | no_license | #!/usr/bin/python3
from nltk.stem import PorterStemmer
from nltk.tokenize import sent_tokenize, word_tokenize
# Dependencies.
from DataLoader import DataLoader
from Stemmer import Stemmer
dm = DataLoader()
stemmer = Stemmer()
porter_stemmer = PorterStemmer()
#print(stemmer.process('caresses'))
#print(stemmer.process('cats'))
#print(stemmer.process('happy'))
#print(stemmer.process('relational'))
#print(stemmer.process('digitizer'))
#print(stemmer.process('vietnamization'))
#print(stemmer.process('operator'))
#print(stemmer.process('callousness'))
#print(stemmer.process('electrical'))
#print(stemmer.process('revival'))
#print(stemmer.process('bowdlerize'))
#print(stemmer.process('adoption'))
#print(stemmer.process('cease'))
#print(stemmer.process('rate'))
#print(stemmer.process('controll'))
# Checking the performance of Porter Stemmer created.
paragraphs = dm.get_paragraphs('Marvel_Comics')
total_words = 0
matching_words = 0
for para in paragraphs:
words = word_tokenize(para)
for item in words:
if len(item) > 2:
total_words += 1
word = stemmer.process(item.lower())
if word == porter_stemmer.stem(item):
matching_words += 1
print("Total word count is: {}".format(total_words))
print("Total matching word count is: {}".format(matching_words))
print("Accuracy is: {}".format((matching_words/total_words)* 100))
| true |
7d6bb22f1eb5b742d6c883b3d1142ffe099cf253 | Python | nikhil7d/Cybrillia-Answers | /answer1.py | UTF-8 | 278 | 2.96875 | 3 | [] | no_license | t = int(input())
for _ in range(t):
n, k = map(int, input().split())
a = list(map(int, input().split()))
b = Counter(a)
result = []
for key, value in b.items():
result.append(key * value)
result.sort(reverse=True)
print(sum(result[0:k])) | true |
877aa11e9d5ef92ba0023434bc6caec13d22c606 | Python | Aasthaengg/IBMdataset | /Python_codes/p04046/s568419484.py | UTF-8 | 875 | 2.53125 | 3 | [] | no_license | h, w, a, b = map( int, input().split() )
SIZE = h+w+2
MOD = 10**9 + 7
MOD_Farmer = 10**9+5
fact = [0] * SIZE
inv = [0] * SIZE
fact_inv = [0] * SIZE
# prepare --------------------------------
inv[0] = 0
inv[1] = 1
fact[0] = fact[1] = 1
fact_inv[0] = fact_inv[1] = 1
for i in range( 2, SIZE ):
inv[i] = MOD - (MOD//i)*inv[MOD%i]%MOD
fact[i] = fact[i-1] * i % MOD
fact_inv[i] = fact_inv[i-1] * inv[i] % MOD
# fact_inv[i] = fact[i] **( 10**4 ) \
# * fact[i] **( 10**5 ) \
# * fact[i] ** 5 % MOD % MOD
# ----------------------------------------
def comb( n, r ):
if r >= 0 and n >= 0:
return fact[n] * fact_inv[n-r]%MOD * fact_inv[r]%MOD
else :
return 0.0
# ----------------------------------------
#print( fact_inv )
ans = 0
for i in range( h-a ):
ans += comb(i+b-1, b-1) * comb(h-i-1+w-b-1,w-b-1)
ans %= MOD
print( ans ) | true |
0aef2c143c6ff6915ec96d19e5c4a05e3c5a7bbb | Python | jerryfeng007/pythonCodes | /basic/list.py | UTF-8 | 7,919 | 4.28125 | 4 | [] | no_license | import copy
print('-------------------------------------------列表的定义---------------------------------------------------')
# 方式1
list1 = [1, 2, 3] # 相比方式2,效率更高
# 方式2
# 把字符串转变为列表
list2 = list('abcde')
print(list2)
print('-------------------------------------------列表转化为字符串--------------------------------------------------')
s = ''.join(list2)
print(s)
print('-------------------------------------------列表的索引--------------------------------------------------')
list2 = [1, 2, 3, 4, 5]
print(list2[0])
print(list2[-1])
print(list2[:])
print(list2[::])
print(list2[::2])
print(list2[::-1]) # 反转
print(list2[4:1:-1])
print(list2[-1:-4:-1])
print(list2[4:1:1]) # 空,因为取不到
print('-------------------------------------------列表的修改--------------------------------------------------')
list2[0] = 'a'
print(list2)
print('-------------------------------------------列表的添加--------------------------------------------------')
# append
list3 = [1, 2]
list3.append(3)
list3.append([11, 22, 33])
print(list3)
# extend
list3.extend('tom') # 分别把t、o、m插入尾部
print(list3)
# insert
list3.insert(1, 'kk') # 索引为1的地方插入kk
print(list3)
print('-------------------------------------------列表的删除--------------------------------------------------')
# pop 删除最后一个
list3.pop()
print(list3)
list3.pop(5)
print(list3)
# remove
list3.remove([11, 22, 33])
print(list3)
# clear
l4 = [1, 3, 5]
l4.clear() # 清空列表 {}
print(l4)
# del
del list3[4]
print(list3)
# del list3 # 删除这个列表对象
print('-------------------------------------------列表的拼接--------------------------------------------------')
list4 = [1, 2, 3]
list2 = [4, 5]
list3 = list4 + list2
print(list3)
print('-------------------------------------------列表的方法--------------------------------------------------')
# max min sum
list5 = [1, 2, 3, 4, 5]
print(max(list5))
print(min(list5))
print(sum(list5))
# index
# 不同于字符串,列表只有index,没有find
list3 = [1, 2, 3, 4, 5, 1, 2, 1, 3, 1]
print(list3.index(5)) # 返回索引,如果不存在就会报错
# count
print(list3.count(1))
# in not in
print(6 in list3)
print('-------------------------------------------列表的排序--------------------------------------------------')
# sort
list4 = [1, 2, 9, 4, 0]
list4.sort() # 默认升序
print(list4) # 改变了原列表
list4 = [1, 2, 9, 4, 0]
list4.sort(reverse=True) # 降序
print(list4)
# sorted
list4 = [1, 2, 9, 4, 0]
print(sorted(list4)) # 不会改变原列表
print(list4)
# reverse 反转
list5 = [1, 2, 'a', 'b']
list5.reverse()
print(list5)
print('-------------------------------------------列表的遍历--------------------------------------------------')
# while
list6 = [1, 2, 3, 4]
i = 0
while i < len(list1):
print(list1[i])
i += 1
# for
for i in list1:
print(i)
# for
for i in range(len(list1)):
print(list1[i])
print('-------------------------------------------深浅拷贝--------------------------------------------------')
'''
深拷贝:拷贝所有对象,包括顶级对象、嵌套对象,所有原始对象的改变不会造成深拷贝里任何子元素的改变
浅拷贝:只拷贝顶级对象,不拷贝嵌套对象,所以原始数据改变,嵌套对象会改变
'''
a = [1, 2, 3, [4, 5]]
b = copy.deepcopy(a)
c = copy.copy(a)
print(a, b, c) # 虽然都相等
print(id(a), id(b), id(c)) # 但既然是copy,id都不一样
a.append(7) # 既不影响浅拷贝,也不影响深拷贝
a[3].append(6) # 会影响浅拷贝,不会影响深拷贝
print(a)
print(b)
print(c)
print('-------------------------------------------如何理解列表的可变--------------------------------------------------')
# 场景1
l8 = [1, 3, 4]
print(id(l8))
# 如果想改变l8,比如变为 [1, 3, 4, 5]
# 直接在后面append即可,但是id是不变的
l8.append(5)
print(l8)
print(id(l8))
# 场景2
l1 = [1, 2, 3]
l2 = [1, 2, 3] # l1和l2的 id不一样
print(id(l1), id(l2))
# 场景3
l3 = l4 = [5, 1, 'a', 3, 4] # l3和l4的id一样
print(id(l3), id(l4))
l3.extend('ab')
print(l4) # l4也会变,因为和l3是一个地址
# 场景4
l5 = [1, 2, 3]
l6 = l5
l7 = l5.copy()
print(id(l5), id(l6), id(l7)) # l5和l6的id一样,但与l7的id不一样
l5.append(4)
print(l6) # l6也变了, 因为l6和l5是一个地址
print(l7) # l7没变,因为l7地址不一样
# 场景5
l = [1, 2]
print(id(l))
l.append(3)
print(id(l)) # id未改变
l = l + [4]
print(id(l)) # id改变了
l += [5]
print(id(l)) # id同上
# 结论-----------------------------------------------------
# append操作,不会改变原列表的id
# += 操作,也不会改变原列表的id
# +操作,会改变原列表的id
print('-------------------------------------------列表嵌套元组--------------------------------------------------')
l12 = [1, 2, (3, 4, 5), 6]
print(id(l12))
l12[2] = (0, 1)
print(id(l12)) # 操作之后,id不会变
print('-------------------------------------------列表嵌套列表--------------------------------------------------')
# [['语文', '数学', '英语'], ['语文', '数学', '英语'], ['语文', '数学', '英语']]
student = ['张三', '李四', '王五']
kecheng = ['语文', '数学', '英语']
l = []
for s in student:
temp = []
for k in kecheng:
temp.append(k)
l.append(temp)
print(l)
print('----------------------------------列表生成式(列表推导式)-------------------------------------')
# 根据已有的列表,生成新的列表----------------------------------------------------
# 普通写法
ll = []
for x in [1, 2, 3]:
ll.append(x*2)
print(ll)
# 等价于 --- 使用匿名函数
l = [(lambda x: x * 2)(x) for x in [1, 2, 3]]
print(l)
# 等价于 --- 列表推导式
l = [x*2 for x in [1, 2, 3]] # 中括号
print(l)
# 以下都是列表推导式
l = [x * 2 for x in [1, 2, 3] if x % 2 != 0] # 只有if,没有else
print(l)
l = [i * 2 if i > 2 else i * 3 for i in [1, 2, 3]] # 有if,有else
print(l)
l = [x * j for x in [1, 2] for j in [2, 3]] # 嵌套循环
print(l)
l = [x * j for x in [1, 2] for j in [2, 3] if j < 3]
print(l)
print('------------------------列表生成式、生成器------------------------------------------------------')
# 列表生成式
# 用这种语法创建列表之后元素已经准备就绪所以需要耗费较多的内存空间
l1 = [x for x in [1, 2, 3]]
print(l1)
l2 = [x for x in range(100000)]
print(l2)
# 生成器 -------与生成式的区别是,这里用的是()
# 请注意下面的代码创建的不是一个列表而是一个生成器对象
# 通过生成器可以获取到数据但它不占用额外的空间存储数据
# 每次需要数据的时候就通过内部的运算得到数据(需要花费额外的时间)
# 相比生成式生成器不占用存储数据的空间
l3 = (x for x in range(100000))
print(l3) # l3是生成器对象
# 可以遍历得到数据
for i in l3:
print(i)
print('----------------------------------enumerate方法的使用,取索引和元素-------------------------------------')
# 可以是字符串、列表、元组
l = [1, 2, 3, 4]
for index, item in enumerate(l):
print(index, item)
# 案例
# 给定一个list和一个指定的数字,求这个数字在list中的位置
# 注意,列表中可能有多个相同的元素
# 方法1
def jjj(l, num):
ll = []
for i in range(len(l)):
if l[i] != num:
continue
ll.append(i)
return ll
ll = jjj([1, 2, 3, 2, 2, 2, 2], 2)
print(ll)
# 方法2
def mmm(l, num):
index_list = []
for index, item in enumerate(l):
if item == num:
index_list.append(index)
return index_list
lll = mmm([1, 2, 3, 2, 2, 2, 2], 2)
print(lll)
| true |
6983bea9b9c9d891aa54f5493e4fc29f5f53ffa7 | Python | Brad-Kent/CSSE1001_Assignment-1 | /a1.py | UTF-8 | 4,695 | 4.03125 | 4 | [] | no_license | #!/usr/bin/env python3
"""
Assignment 1
CSSE1001/7030
Semester 2, 2018
"""
from a1_support import is_word_english
import a1_support as a1
__author__ = "Brad Kent, s45355194"
def procedual_menu():
print("Welcome")
while True:
# Display Menu Options
display_user_options()
# Get User Menu Option
user_option = input(">")
# Check to see if input is a valid command
if not is_user_option_valid(user_option):
print("Invalid Command \n")
continue
# Execute user specified sub-program
if user_option == "q":
break
# if not 'Quit', execute crypto program
user_input = get_user_input(user_option)
text = user_input[0]
if not is_user_input_valid(user_input[1]):
# TODO: Check documentation to see what to do
print("Bit shift range is 1-25")
continue
offset = int(user_input[1])
text_mutated = ""
if user_option == 'e':
text_mutated = encrypt(text, offset)
elif user_option == 'd':
text_mutated = decrypt(text, offset)
else:
text_mutated = auto_decrypt(text, offset)
print(text_mutated)
def display_user_options():
""" This could be a modular method"""
print("Please choose an option [e/d/a/q]:")
print(" e) Encrypt some text")
print(" d) Decrypt some text")
print(" a) Automatically decrypt English text")
print(" q) Quit")
def is_user_option_valid(user_input):
menu_options = ['e', 'd', 'a', 'q']
if user_input in menu_options:
return True
return False
def get_user_input(user_option):
options = {"e": "text to encrypt", "d": "text to decrypt", "a": "encrypted text"}
action = options[user_option]
text = input("Please enter some {}".format(action))
# This needs to be protected with int wrapping
offset = input("Please enter a shift offset (1-25):")
return [text, offset]
def is_user_input_valid(offset):
if offset.isdigit():
return False
elif offset > 25 or offset < 1:
return False
return True
def format_text(text, offset):
# Range: ord('A') -- ord('Z')
formated_text = ""
for char in text:
# If Current Char is not in Range, skip over it
## is_char_a_letter()
if char < 'A' or char > 'Z':
formated_text += char
print("FUCJ")
continue
offset_char = ord(char) + offset
## is_char_out_of_range
### if T: calculate new char range -> new char value in range
if offset_char > ord('Z'):
print(">")
offset_char -= 26 # ord('A') + offset:::: new_offset = offset_char - ord('Z') + ord('A')
elif offset_char < ord('A'):
print("<")
offset_char += 26 # ord('Z') + offset
formated_text += chr(offset_char)
return formated_text
# Assignment: 4 Functions
def main():
procedual_menu()
def encrypt(text, offset):
"""
Encrypts by replacing each letter with the letter some fixed number of positions down the alphabet. Returns the encrypted text.
:param text: The text to be encrypted
:param offset: Shift text offset amounts of times
:return: Encrypted Text
"""
return format_text(text, offset)
def decrypt(text, offset):
"""
Decrypts text that was encrypted by the encrypt function above. Returns the decrypted text.
:param text: Encrypted Text
:param offset: Letter Offset
:return: Decrypted Text
"""
return format_text(text, -offset)
def auto_decrypt(text, offset):
# Test Text with offset entire range
# if text >> Offset is in words(), then is valid offset
## Many text inputs are multi valued, so\ have to check each <> per white space
# Split string into List. or could keep state of index, then splice string for each word then push to func:: Lambda in function argument
return "auto"
##################################################
# !! Do not change (or add to) the code below !! #
#
# This code will run the main function if you use
# Run -> Run Module (F5)
# Because of this, a "stub" definition has been
# supplied for main above so that you won't get a
# NameError when you are writing and testing your
# other functions. When you are ready please
# change the definition of main above.
###################################################
if __name__ == '__main__':
main()
| true |