blob_id stringlengths 40 40 | language stringclasses 1
value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30
values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2
values | text stringlengths 12 5.47M | download_success bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
6f603c6bfeb168089e47e62c9a64d4b6bd87b29a | Python | moosahmed/log_monitor_debug | /log_parsing/video_log/video_parser.py | UTF-8 | 1,736 | 3.203125 | 3 | [] | no_license | import re
from datetime import datetime, timedelta
import operator
def parser(file):
pattern = re.compile(r'(.*) *(userid.*) *(start|stop) *(\d{2}/\d{2}/\d{4}-\d{2}:\d{2}:\d{2})')
start = {}
stop = {}
for line in file:
match = pattern.search(line)
if match:
dt_obj = datetime.strptime(match.group(4), '%m/%d/%Y-%H:%M:%S') # Read time as datetime_obj
if datetime.today() - dt_obj <= timedelta(7): # Use only the dates within the last 7 days
# Within the start and stop dictionary, initiate a dictionary per video_id
# Where the key is user id and value is a list of times
if match.group(3) == 'start':
start.setdefault(match.group(1), {}).setdefault(match.group(2), []).append(dt_obj)
elif match.group(3) == 'stop':
stop.setdefault(match.group(1), {}).setdefault(match.group(2), []).append(dt_obj)
return start, stop
def calc_duration(start_dic, stop_dic):
duration = {}
for video_id in start_dic:
for user_id in start_dic[video_id]:
# Zip start and stop times for the same user for a particular video
for start_time, stop_time in zip(start_dic[video_id][user_id], stop_dic[video_id][user_id]):
td = stop_time - start_time
duration[video_id] = duration.get(video_id, timedelta()) + td # Add all durations for the same video
max2 = dict(sorted(duration.items(), key=operator.itemgetter(1), reverse=True)[:3]) # Selecting top 2
print(max2)
if __name__ == '__main__':
file = open('./video.log', 'r')
start, stop = parser(file)
calc_duration(start, stop)
file.close()
| true |
91292538e9aabdebac00bed5430fb52acbb478f6 | Python | snowdj/course | /lectures/basics/object_oriented_programming/modules/_checks.py | UTF-8 | 3,235 | 2.65625 | 3 | [
"MIT"
] | permissive | """ This module contains all the checks related to economy and agent class.
"""
# standard library
import numpy as np
def integrity_checks(str_, *args):
""" Check integrity of interface and computed results for the methods of
the agent and economy class.
"""
''' AgentCls'''
if str_ == 'set_type':
type_, = args
assert (type_ in ['random', 'rational'])
elif str_ == 'set_endowment':
y, = args
assert (isinstance(y, float))
assert (y >= 0)
elif str_ == 'set_preference_parameter':
alpha, = args
assert (isinstance(alpha, float))
assert (0.0 < alpha < 1.0)
elif str_ == 'choose':
p1, p2 = args
assert (np.all([p1, p2] > 0))
assert (np.all(np.isfinite([p1, p2])))
assert (isinstance(p1, float) and isinstance(p2, float))
elif str_ == 'get_individual_demand':
rslt, = args
assert (isinstance(rslt, list))
assert (np.all(rslt > 0))
elif str_ == 'spending':
x, p1, p2 = args
assert (np.all(x > 0))
assert (isinstance(p1, float) and isinstance(p2, float))
assert (np.all([p1, p2] > 0))
assert (np.all(np.isfinite([p1, p2])))
assert (isinstance(p1, float) and isinstance(p2, float))
elif str_ == '_choose_random_in':
y, p1, p2 = args
assert (isinstance(y, float))
assert (y > 0)
assert (isinstance(p1, float) and isinstance(p2, float))
assert (np.all([p1, p2] > 0))
assert (np.all(np.isfinite([p1, p2])))
assert (isinstance(p1, float) and isinstance(p2, float))
elif str_ == '_choose_random_out':
x, = args
assert isinstance(x, list)
assert (np.all(x > 0))
elif str_ == '_choose_rational_in':
y, p1, p2 = args
assert (isinstance(y, float))
assert (y > 0)
assert (isinstance(p1, float) and isinstance(p2, float))
assert (np.all([p1, p2] > 0))
assert (np.all(np.isfinite([p1, p2])))
assert (isinstance(p1, float) and isinstance(p2, float))
elif str_ == '_choose_rational_out':
x, = args
assert isinstance(x, list)
assert (np.all(x > 0))
elif str_ == '_criterion':
x, = args
assert (isinstance(x, np.ndarray))
assert (np.all(np.isfinite(x)))
assert (x.ndim == 1)
elif str_ == '_constraint':
x, p1, p2 = args
assert (np.all(np.isfinite(x)))
assert (isinstance(p1, float) and isinstance(p2, float))
assert (np.all([p1, p2] > 0))
assert (np.all(np.isfinite([p1, p2])))
assert (isinstance(p1, float) and isinstance(p2, float))
elif str_ == '__init__':
agent_objs, = args
assert (isinstance(agent_objs, list))
elif str_ == 'get_aggregate_demand_in':
p1, p2 = args
assert (np.all([p1, p2] > 0))
assert (np.all(np.isfinite([p1, p2])))
assert (isinstance(p1, float) and isinstance(p2, float))
elif str_ == 'get_aggregate_demand_out':
rslt, = args
assert (isinstance(rslt, dict))
assert (np.all(rslt > 0))
else:
raise AssertionError
| true |
d39ac4812233cb04f89f3a8f49204a954b04302d | Python | JayRich/Server-with-HTTP-Protocols | /server.py | UTF-8 | 3,590 | 3.34375 | 3 | [] | no_license | """
Jason Richardson
"""
import socket
host = '127.0.0.1' # address of local host
port = 8080 # port the server will be accepting requests
# intitialize socket
make_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
make_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
make_socket.bind((host, port)) # bind the host to port
make_socket.listen(1) # tell the server to start listening for requests
while True:
conn, addr = make_socket.accept() # if connection established accept
request = conn.recv(1024).decode('utf-8') # get the request and store it as a string for parsing
parse_request = request.split(' ') # parse requests using spaces in the string
requesting_file = parse_request[1] # the second parsed string will be the html file requested
myfile = requesting_file.lstrip('/') # parse the file name from the foward slash symbol so we can find the file
# if files are found handle the 200 response and send the data
if(myfile == 'index.html' or myfile == 'sunset.jpg' or myfile == 'sunrise.jpg'): # if the requested file is corresponding to any from the base html then open and load them in this block
file = open(myfile, 'rb') # open requested file and read as binary
html_data = file.read() # read the data from the file and temporarily store
file.close() # close the requested file
if(myfile.endswith(".html")): # determine what type of file to send Content type in header
file_type = "html"
else: # if the file isn't html then it is one of the jpg files so send Content type jpg in header
file_type = "jpg"
server_response = 'HTTP/1.1 200 OK\n' + 'Content type: '+ str(file_type) + '\n\n' # make response with status code and content type headers
server_response = server_response.encode() # make sure it is the correct format by using encode
server_response += html_data # append the data from the file to the end of the response message
conn.send(server_response) # send the response message to the client
# if the requested file is an old url then handle the 301 status code response and send the new updated url
elif(myfile == 'oldindex.html'): # determine if old url is being requested from file name
server_response = 'HTTP/1.0 301 Moved Permanently\n\nMoved Permanently\nLocation: localhost:8000/index.html' # make header with 301 response and new location
conn.sendall(server_response.encode()) # send the client the 301 respone with the proper format to display on the page
# if the file isn't found and the url is not old then handle the 404 status code response
else:
server_response = 'HTTP/1.0 404 Not Found\n\n404\n\nRequested file not found on this server' # make header with 404 response
conn.sendall(server_response.encode()) # send the client the 404 respone with the proper format to display on the page
conn.close() # after a request has been handled and the appropriate response sent then close the connection
# if loop exits stop the server
make_socket.close() | true |
67d1663516d4baa15e6fbfd478bce11a6225236d | Python | aleksandrawy/flight_optimization | /dijkstra.py | UTF-8 | 3,436 | 3.59375 | 4 | [] | no_license | import json
import sys
class Dijkstra():
done = False
def unroll_shortest_path(self, current, optimal_parent_map, path=()):
if current is None: # Reached the start node
return path
else:
return self.unroll_shortest_path(optimal_parent_map[current], optimal_parent_map, (current,) + path)
def dijkstra(self, start_city, end_city, city_data, verbose=False):
if start_city == end_city:
return (start_city,)
# Inefficiency: should be implemented as a priority queue
start_city_distance_entry = [0, start_city]
city_node_lookup = {start_city: start_city_distance_entry}
unvisited = [start_city_distance_entry]
visited = set()
optimal_parent = {start_city: None}
for city_name in city_data.keys():
if city_name != start_city:
city_distance_entry = [999999999, city_name]
city_node_lookup[city_name] = city_distance_entry
unvisited.append(city_distance_entry)
destination_reached = False
while not destination_reached and unvisited != []:
(distance_to_current, current) = unvisited.pop(0)
if verbose:
print("CURRENT: {}, DISTANCE: {:,} meters".format(current, distance_to_current))
visited.add(current)
neighbors = city_data[current].keys()
if verbose:
print("\tNEIGHBORS:", list(neighbors))
for neighbor in neighbors:
if verbose:
print("\t\tNEIGHBOR: {}".format(neighbor))
if neighbor == end_city:
destination_reached = True
optimal_parent[neighbor] = current
break
elif neighbor not in visited:
total_distance_to_neighbor = distance_to_current + city_data[current][neighbor]
# Changing the distance here changes the distance in unvisited
city_distance_entry = city_node_lookup[neighbor]
if city_distance_entry[0] > total_distance_to_neighbor:
if verbose:
print("\t\t\tNEW OPTIMAL PARENT ({}) TO {}".format(current, neighbor))
city_distance_entry[0] = total_distance_to_neighbor
optimal_parent[neighbor] = current
unvisited.sort() # Needed in the abscence of heap
if destination_reached:
self.done = True
return self.unroll_shortest_path(end_city, optimal_parent)
else:
self.done = True
return None
def get_city_data(self):
with open("graf.json","r") as f:
city_data = json.loads(f.read())
return city_data
# if __name__ == '__main__':
# dij = Dijkstra()
# city_data = dij.get_city_data()
#
# # try:
# # city_from = sys.argv[1]
# # city_to = sys.argv[2]
# # except IndexError:
# # print("Usage:", sys.argv[0], "\"from city\" \"to city>\"")
# # print("City choices:")
# # for city in city_data:
# # print(" -", city)
# # sys.exit(1)
#
# city_from = 'Gdańsk Lech Wałęsa Airport'
# city_to = 'John Paul II International Airport Kraków-Balice Airport'
# print(dij.dijkstra(city_from, city_to, city_data, False))
| true |
c64ad14ef6ebead754cf5af5f86a096131af00bd | Python | almaan/SMC2019-course | /scripts/utils.py | UTF-8 | 12,595 | 2.53125 | 3 | [] | no_license | #!/usr/bin/env python3
import numpy as np
import scipy.stats as st
import matplotlib.pyplot as plt
from scipy.special import loggamma
class InverseGamma:
def __init__(self, a, b):
self.a = a
self.b = b
self.log_z = a*np.log(self.b) - loggamma(self.a)
def rvs(self,):
pass
def logpdf(self,x):
return self.log_z + np.log(x) * (-self.a - 1) - self.b/x
def pdf(self,x):
return np.exp(self.logpdf(x))
class BSPF:
def __init__(self,
xt_xtm1_dist,
yt_xt_dist,
x0_dist,
resampling_method = 'multinomial',
):
self.eps = 10e20
self.xt_xtm1_dist = xt_xtm1_dist
self.yt_xt_dist = yt_xt_dist
self.x0_dist = x0_dist
self.resample = eval(''.join(['self.',
resampling_method,
'_resampling']
))
def multinomial_resampling(self,ws,n):
if np.abs(ws.sum() - 1) > self.eps:
print('Normalizing Weights')
ws = expNormalize(ws,inlog = True)
nxs = np.random.choice(np.arange(n),
p = ws,
size = n,
replace = True)
return nxs.astype(int)
def run(self,ys,N):
ll = 0.0
T = ys.shape[0]
wss = np.zeros((N,T+1))
xts = np.zeros((N,T+1))
aidxs = np.zeros((N,T+1)).astype(int)
xt = self.x0_dist().rvs(N)
xts[:,0] = xt
wss[:,0] = np.ones(N) / N
for t in range(1,T+1):
# Resample
aidx = self.resample(wss[:,t-1],N)
aidxs[:,1:(t-1)] = aidxs[aidx,1:(t-1)]
aidxs[:,t] = aidx
# Propagate
xts[:,t] = self.xt_xtm1_dist(xts[aidx,t-1]).rvs()
# Weight
wss[:,t] = self.yt_xt_dist(xts[:,t]).logpdf(ys[t-1])
# Compute LL
ll += np.log(np.exp(wss[:,t]).sum()) - np.log(N)
# Normalize Weights
wss[:,t] = expNormalize(wss[:,t],inlog = True)
res = {'xs':xts,
'weights':wss,
'loglikelihood':ll,
'aidxs':aidxs.astype(int),
}
return res
def to_long_format(xs,
aidx,
T = None):
if T is None:
T = xs.shape[0]
N = xs.shape[1]
survivors = aidx[0,:]
for t in range(1,xs.shape[0]):
survivors = survivors[aidx[t,:]]
edges = []
sedges = []
coords = []
origin = aidx[0,:]
for t in range(T):
origin = origin[aidx[t,:]]
opos = len(coords)
for k,n in enumerate(aidx[t,:]):
coords.append((t,xs[t,k]))
if len(coords) > N:
source = opos - N + int(n)
target = opos + k
if origin[k] in survivors and k in aidx[t+1,:]:
sedges.append((source,target))
else:
edges.append((source,target))
edges = np.array(edges).astype(int)
sedges = np.array(sedges).astype(int)
coords = np.array(coords)
long_pos = coords[:,1].flatten()
long_time = coords[:,0].flatten()
return {'xst':long_pos,
'time':long_time,
'dead_edges':edges,
'alive_edges':sedges}
def visualize_genealogy(xst,
time,
dead_edges,
alive_edges,
window,
xs_true = None,
):
fig, ax = plt.subplots(1,3, figsize = (15,6))
tmax = time.max()
delta = 0.5
tlims = [[-delta*5 ,tmax + delta*5] ,
[-delta,window + delta],
[tmax-window-delta,tmax + delta]
]
titles = ['Full','Initial','End']
for k in range(3):
ax[k].set_xlim(tlims[k])
ax[k].set_title(titles[k])
ax[k].plot(time[dead_edges].T,
xst[dead_edges].T,
'-',
color = 'black',
alpha = 0.2,
)
ax[k].plot(time[alive_edges].T,
xst[alive_edges].T,
'-',
color = 'red',
alpha =0.2,
)
nitems = dead_edges.shape[0] + alive_edges.shape[0]
order = np.arange(nitems)
np.random.shuffle(order)
for l,p in zip(ax[k].get_lines(),order):
l.set_zorder(p)
ax[k].plot(time,
xst,
'o',
markerfacecolor = None,
markeredgecolor = 'black',
alpha = 0.1,
markersize = 2,
)
for pos in ['top','right']:
ax[k].spines[pos].set_visible(False)
ax[k].set_xlabel('Time')
ax[k].set_ylabel(r'State $x_t$')
tmin = time.min().astype(int)
tmax = time.max().astype(int)
if xs_true is not None:
ax[k].plot(np.arange(tmin,tmax+1),
xs_true[tmin:tmax+1],
'--',
color = 'blue',
alpha = 0.2,
label = 'True',
)
fig.tight_layout()
return fig, ax
class APF:
def __init__(self,
xt_yt_xtm1_dist,
yt_xtm1_dist,
yt_xt_dist,
x0_dist,
):
self.yt_xtm1_dist = yt_xtm1_dist
self.xt_yt_xtm1_dist = xt_yt_xtm1_dist
self.yt_xt_dist = yt_xt_dist
self.x0 = x0_dist
def _omega(self,xt,yt,xtm1):
return np.ones(xt.shape[0])/xt.shape[0]
def _nu_yt_xtm1(self,yt,xtm1,islog =False):
nu_tm1 = self.yt_xtm1_dist(xtm1).logpdf(yt)
if not islog:
nu_tm1 = np.exp(nu_tm1)
return nu_tm1
def systematic(self,ww):
N = len(ww)
cs = np.cumsum(ww)
a = np.zeros(N, 'i')
u = (np.random.random() + np.arange(N)) / N
r, s = 0, 0
while r < N:
if u[r] < cs[s]:
a[r] = s
r += 1
else:
s += 1
return a
def multinomial(self,ww):
cs = np.cumsum(ww)
cs[-1] = 1.
u = np.random.random(len(ww))
a = np.searchsorted(cs,u)
return a
def _set_resampler(self,name):
allowed = ['multinomial',
'stratified',
'systematic',
]
if name in allowed:
self.resample = eval('self.' + name)
print('Using {} resampling'.format(name))
elif name is None:
self.resample = lambda w: np.arange(w.shape[0])
else:
self.resample = self.multinomial
print('Using multinomial resampling')
def run(self,N,ys,
resampler = 'multinomial',
adaptive_resampling = False,
ess_thrs = 50):
self._set_resampler(resampler)
if not adaptive_resampling:
ess_thrs = np.inf
T = ys.shape[0]
xtmat = np.zeros((T,N))
wss = np.zeros((T,N))
wss[0,:] = np.ones(N) / N
xt = self.x0.rvs(N)
xtmat[0,:] = xt
aidxs = np.zeros((T,N),dtype = np.int)
aidxs[0,:] = np.arange(N).astype(int)
ess_vec = np.zeros(T)
ess = N
ess_vec[0] = ess
for t in range(1,T):
# Resample
if ess < ess_thrs:
# Compute adjustment mulitpliers
nus = self._nu_yt_xtm1(ys[t],xtmat[t-1,:],islog = True)
nus = expNormalize(nus + np.log(wss[t-1,:]),inlog = True)
aidx = self.resample(nus)
wss[t-1,:] = np.ones(N) / N
else:
aidx= np.arange(N)
aidxs[t,:] = aidx
# Propagate
xtmat[t,:] = self.xt_yt_xtm1_dist(ys[t],xtmat[t-1,aidx]).rvs()
# Weight
if not adaptive_resampling:
wss[t,:] = 1/N
else:
wss[t,:] = self.xt_yt_xtm1_dist(ys[t],xtmat[t-1,aidx]).logpdf(xtmat[t,:])
wss[t,:] = expNormalize(wss[t,:] + np.log(wss[t-1,:]),inlog = True)
# Compute effective sample size
ess = 1.0 / (wss[t,:] ** 2).sum()
ess_vec[t] = ess
return {'xts':xtmat,
'weights':wss,
'aidxs':aidxs,
'ess':ess_vec,
}
def expNormalize(ws, inlog = False):
if not inlog:
ws = np.log(ws)
wmax = ws.max()
ws = np.exp(ws - wmax)
ws = ws / ws.sum()
return ws
class KalmanFilter:
def __init__(self,
A,
C,
Q,
R,
P0,
x0,
):
self.A = A
self.C = C
self.Q = Q
self.R = R
self.P0 = P0
self.x0 = x0
def _getKt(self,Pt_tm1,):
Kt = Pt_tm1*self.C / (self.C*Pt_tm1*self.C + self.R)
return Kt
def _getPt_tm1(self,Ptm1_tm1):
Pt_tm1 = self.A*Ptm1_tm1*self.A + self.Q
return Pt_tm1
def _getPt_t(self,Pt_tm1,Kt):
Pt_t = Pt_tm1 - Kt*self.C*Pt_tm1
return Pt_t
def _getxt_t(self,xtm1_tm1,yt,Kt):
xt_t = self.A*xtm1_tm1 + Kt*(yt-self.C*self.A*xtm1_tm1)
return xt_t
def estimate_traj(self,ys,T = None):
if not T:
T = ys.shape[0]
xs,ps = [],[]
Pt_t = self.P0
xt_t = self.x0
xs.append(xt_t)
ps.append(Pt_t)
for t in range(1,T):
Pt_tm1 = self._getPt_tm1(Pt_t)
Kt = self._getKt(Pt_tm1)
Pt_t = self._getPt_t(Pt_tm1,Kt)
xt_t = self._getxt_t(xt_t,ys[t],Kt)
xs.append(xt_t)
ps.append(Pt_t)
xs = np.array(xs)
ps = np.array(ps)
return xs,ps
class GaussianStateSpaceModel:
def __init__(self,
xtm1_coef,
yt_coef,
Vt,
Et,
X0,
):
self.a = xtm1_coef
self.b = yt_coef
self.Vt = Vt
self.Et = Et
self.X0 = X0
def xt_xtm1(self,xtm1):
return self.a*xtm1 + self.Vt.rvs()
def yt_xt(self,xt):
return self.b*xt + self.Et.rvs()
def generate_trajectory(self,T):
ts,xs,ys = [],[],[]
xt = self.X0.rvs()
yt = self.yt_xt(xt)
xs.append(xt)
ys.append(yt)
ts.append(0)
for t in range(1,T):
xt = self.xt_xtm1(xt)
yt = self.yt_xt(xt)
xs.append(xt)
ys.append(yt)
ts.append(t)
for it in [xs,ys,ts]:
it = np.array(it)
return ts,xs,ys
class Cauchy:
def __init__(self,
gamma,
unscaled = False,
):
self.gamma = gamma
if unscaled:
self.Z = 1.0
else:
self.Z = np.pi
def rvs(self,N):
u = np.random.uniform(0,1,size = N)
x = self.gamma * np.tan(np.pi*(u-0.5))
return x
def logpdf(self,x):
return np.log(self.gamma) - np.log(self.Z) - np.log(self.gamma**2 + x**2)
def pdf(self,x):
return np.exp(self.logpdf(x))
class Normal:
def __init__(self,
mu,
std,
unscaled = False):
self.mu = mu
self.std = std
if unscaled:
self.Z = 1.0
else:
self.Z = np.sqrt(2*np.pi)*self.std
def rvs(self,N):
return np.random.normal(self.mu,self.std,size = N)
def logpdf(self,x):
return -(x - self.mu)**2 / (2*self.std**2) - np.log(self.Z)
def pdf(self,x):
return np.exp(self.logpdf)
class ImportanceSampler:
def __init__(self,
target_dist,
proposal_dist,
):
self.target_dist = target_dist
self.proposal_dist = proposal_dist
def sample(self,N):
x = self.proposal_dist.rvs(N)
return x
def compute_weights(self,x,inlog = False):
w = self.target_dist.logpdf(x) - self.proposal_dist.logpdf(x)
if not inlog:
w = np.exp(w)
return w
def normalize_weights(self,):
pass
| true |
98ac4ef1ccf66dd23346545684a86e19b5d6eab9 | Python | larsendt/xhab-spot | /ros/talker.py | UTF-8 | 563 | 2.96875 | 3 | [] | no_license | #!/usr/bin/env python
import rospy
from std_msgs.msg import String
def talker():
pub = rospy.Publisher("chatter", String)
rospy.init_node("talker")
while not rospy.is_shutdown():
try:
value = raw_input("value (0,1)> ")
if value != "0" and value != "1":
print "bad value '%s'" % value
raise ValueError("Input must be 0 or 1.")
rospy.loginfo(value)
pub.publish(String(value))
print "wrote", value
except ValueError:
print "input 0 or 1"
if __name__ == "__main__":
try:
talker()
except rospy.ROSInterruptException:
pass
| true |
9db4e30fd197f454bb5764e30a723a4f0816a8c3 | Python | shunsunsun/AgentVillage | /World.py | UTF-8 | 6,223 | 2.90625 | 3 | [] | no_license | from Pencil import Pencil
import pygame
from Timer import Timer
from Vector2 import Vector2
import matplotlib.pyplot as plt
class World:
def __init__(self, world_bg, WIDTH_HEIGHT, image_class, WHOLE_MAP_SIZE):
self.world_bg = world_bg
self.entity_group = {}
self.entity_id = 0
self.WIDTH_HEIGHT = WIDTH_HEIGHT
self.sub_map_width_height = [400, 270]
self.sub_map_surface = self.set_sub_map()
self.rect_in_sub_map_width_height = []
self.rect_in_sub_map_pos = []
self.image_class = image_class
self.WHOLE_MAP_SIZE = WHOLE_MAP_SIZE
self.timer = Timer()
self.wood_history = []
self.mine_history = []
self.population_history = []
self.food_history = []
self.update_state_window_frequency = 100
self.update_state_window_step = 0
self.set_plt()
def set_plt(self):
plt.ion()
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
plt.rcParams['lines.linewidth'] = 1
plt.style.use("ggplot")
plt.rcParams['figure.figsize'] = (5, 5)
fig = plt.gcf()
fig.canvas.set_window_title("Stone Age State History")
def update_state_window(self):
plt.clf()
plt.suptitle("World State")
gragh = plt.subplot(1, 1, 1)
gragh.set_xlabel("Time Elapsed(ticks)")
gragh.set_ylabel("Values")
gragh.plot(self.food_history, label="Food", linestyle='--', color='orange')
gragh.plot(self.wood_history, label="Wood", linestyle='--', color='purple')
gragh.plot(self.mine_history, label="Mine", linestyle='--', color='c')
gragh.plot(self.population_history, label="Population", color='r')
self.update_state_window_step = 0
plt.legend(loc='upper left')
plt.pause(0.001)
def set_sub_map(self):
sub_map_surface = pygame.Surface(self.sub_map_width_height)
sub_map_surface.fill(color=(100, 100, 100))
sub_map_surface.set_alpha(100)
return sub_map_surface
def add(self, entity):
self.entity_group[self.entity_id] = entity
entity.id = self.entity_id
self.entity_id += 1
def remove(self, entity_id):
del self.entity_group[entity_id]
def render(self, screen, start_draw_pos):
screen.blit(self.world_bg, start_draw_pos)
# Draw Entity && sort the entity by their y position
for tuple_element in sorted(self.entity_group.items(), key=lambda item: item[1].location.get_xy()[1]):
entity = tuple_element[1]
entity.render(screen, start_draw_pos)
# Draw Sub Map
screen.blit(self.sub_map_surface, (0, self.WIDTH_HEIGHT[1] - self.sub_map_width_height[1]))
Pencil.draw_rect(screen, [*self.rect_in_sub_map_pos, *self.rect_in_sub_map_width_height], (200, 200, 200), 1)
# Draw entity on Sub Map
for entity in self.entity_group.values():
if entity.color:
x, y = entity.location.get_xy()
entity_in_sub_map_rect = [int(x / 9600 * self.sub_map_width_height[0]),
self.WIDTH_HEIGHT[1] - self.sub_map_width_height[1] + int(
y / 5400 * self.sub_map_width_height[1]), 3, 3]
Pencil.draw_rect(screen, entity_in_sub_map_rect, entity.color)
# Write State of Main Tower.
main_tower = self.get_nearest_entity(Vector2(0, 0), "main_tower")
Pencil.write_text(screen, "wood:%d" % main_tower.wood, [0, 0], 15, color=(255, 255, 255))
Pencil.write_text(screen, "food:%d" % main_tower.food, [0, 15], 15, color=(255, 255, 255))
Pencil.write_text(screen, "mine:%d" % main_tower.mine, [0, 30], 15, color=(255, 255, 255))
Pencil.write_text(screen, "farmland:%d" % main_tower.get_building_entity_number("planting"), [0, 45], 15,
color=(255, 255, 255))
Pencil.write_text(screen, "population:%d" % len(main_tower.people_list), [0, 60], 15,
color=(255, 255, 255))
Pencil.write_text(screen, "[%02d:%02d:%02d]" % (self.timer.get_hour(), self.timer.get_minute(), self.timer.get_second()), [0, 75], 15, color=(255, 255, 255))
# Update the World State Window
if self.update_state_window_step % self.update_state_window_frequency == 0:
self.food_history.append(main_tower.food)
self.wood_history.append(main_tower.wood)
self.mine_history.append(main_tower.mine)
self.population_history.append(len(main_tower.people_list))
self.update_state_window()
self.update_state_window_step += 1
def process(self, start_draw_pos, WIDTH_HEIGHT, time_passed):
# Update the rect pos in sub map
self.rect_in_sub_map_width_height = [self.WIDTH_HEIGHT[0] / 9600 * self.sub_map_width_height[0],
self.WIDTH_HEIGHT[1] / 5400 * self.sub_map_width_height[1]]
self.rect_in_sub_map_pos = [int(abs(start_draw_pos[0]) / 9600 * self.sub_map_width_height[0]),
self.WIDTH_HEIGHT[1] - self.sub_map_width_height[1] + int(
abs(start_draw_pos[1]) / 5400 * self.sub_map_width_height[1])]
self.WIDTH_HEIGHT = WIDTH_HEIGHT
time_passed = time_passed
for entity in list(self.entity_group.values()):
entity.process(time_passed)
self.timer.update_timer(time_passed)
def get_nearest_entity(self, location, name):
location = location.copy()
nearest_entity = None
min_d = 9999999
for entity in self.entity_group.values():
if entity.name == name:
distance = abs(entity.location - location)
if distance < min_d:
min_d = distance
nearest_entity = entity
return nearest_entity
def get_entity_number(self, name):
number = 0
for entity in self.entity_group.values():
if entity.name == name:
number += 1
return number
| true |
4d8e48fbea3b9ed38d4f4657835044c228b07bf3 | Python | jeffery1236/Stock-Price-Predictor | /stock_price_LinearRegression.py | UTF-8 | 4,445 | 2.984375 | 3 | [] | no_license | import pandas as pd
import quandl, math, datetime
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import style
from sklearn import preprocessing, svm
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
import pickle
'''
Stock price predictor based on LinearRegression and achieves accuracy of up to 99.05% on Coca Cola.co
Dataset is obtained from quandl
'''
def getStockData(ticker):
df = quandl.get(ticker, authtoken="h2rGzLgJ8dnFfsfXtDAV")
#get dataframe from quandl ticker
return df
def dataframe_manipulation(df, forecast_out):
df = df[['Adj_Open', 'Adj_High', 'Adj_Low', 'Adj_Close', 'Adj_Volume']]
#restrict collumns to the following
df['HL_pct'] = ((df['Adj_High'] - df['Adj_Low']) / df['Adj_Low'])*100
df['Pct_change'] = ((df['Adj_Close'] - df['Adj_Open']) / df['Adj_Open'])*100
#Create 2 new data collumns HL_pct and Pct_change
new_df = df[['Adj_Close','HL_pct', 'Pct_change', 'Adj_Volume']]
#set new dataframe with relevant features
forecast_col = 'Adj_Close'
new_df.fillna('-99999', inplace=True)
#treat rows with missing data as outlier
forecast_out = int(math.ceil(0.005*len(df)))
#set forecast length to be 0.5% of dataframe length
new_df['label'] = df[forecast_col].shift(-forecast_out)
#creates new collumn with adj_close data shifted upwards by forecast length
#this means all features is attached to a label, adj_close x number of days into the future
#new_df.dropna(inplace=True) -> removes the data values with no labels
return new_df
def data_processing_training_prediction(new_df, forecast_out):
X = np.array(new_df.drop(['label'], 1))
#first array -> features only
#removes 'label' collumn from dataframe and assigns arrayX with new dataframe returned
#the '1' in the drop method specifies the axis of the dataframe that will be dropped, i.e. the top-down axis
X = preprocessing.scale(X)
#scales data in X (to what scale specifically?)
X_withoutlabels = X[-forecast_out:]
X_withlabels = X[:-forecast_out]
y = np.array(new_df['label'])
#second array -> label
y = y[:-forecast_out]
X_train, X_test, y_train, y_test = train_test_split(X_withlabels, y, test_size = 0.2)
#shuffles and splits data into training and testing with test size = 20% of total data
clf = LinearRegression(n_jobs=-1)
#use linearRegression as classification model
#always check documentation to see if model can accept parameter 'n_jobs'
#n_jobs means number of threads run by processor at any point in time, -1 is to use as many as possible by your cpu
clf.fit(X_train, y_train)
#use X-train and y_train to train model
with open('CocaColaLinearRegression','wb') as f:
pickle.dump(clf, f) #saves classifier object into a pickle to cut down on training time
accuracy = clf.score(X_test, y_test)
#tests model using X_test and y_test then get accuracy
'''
file = open('CocaColaLinearRegression','rb')
clf = pickle.load(file)
'''
accuracy = clf.score(X_test, y_test)
forecast_set = clf.predict(X_withoutlabels)
print(accuracy, forecast_out)
new_df['Forecast'] = np.nan
last_date = new_df.iloc[-1].name
last_unix = last_date.timestamp()
one_day = 86400
if last_date.weekday() == 4:
next_unix = last_unix + one_day*3
elif last_date.weekday() == 5:
next_unix = last_unix + one_day*2
else:
next_unix = last_unix + one_day
for i in forecast_set:
next_date = datetime.datetime.fromtimestamp(next_unix)
if next_date.weekday() == 4:
next_unix += one_day*3
elif last_date.weekday() == 5:
next_unix += one_day*2
else:
next_unix += one_day
new_df.loc[next_date] = [np.nan for a in range(len(new_df.columns)-1)] + [i]
#new_df.loc creates a new row if the index does not exist
#assign nan collumns to each row of the new dates except label column where i value in forecast_set is assigned
return new_df
def plot_results(new_df):
style.use('ggplot')
new_df['Adj_Close'].plot()
new_df['Forecast'].plot()
plt.xlabel('Date')
plt.ylabel('Price')
plt.show()
def main():
df = getStockData('EOD/KO')
forecast_out = int(math.ceil(0.005*len(df)))
#set forecast length to be 0.5% of dataframe length
new_df = dataframe_manipulation(df, forecast_out)
results = data_processing_training_prediction(new_df, forecast_out)
plot_results(results)
if __name__ == '__main__':
main()
| true |
316a2ff386015586437c9fb903b1a774cc4058a9 | Python | rifaasyari/Bioimpedance-classifier | /std-deviation.py | UTF-8 | 542 | 2.546875 | 3 | [
"MIT"
] | permissive | import numpy as np
import pandas as pd
df = pd.read_csv('data-sets/impedance-transpose.csv').dropna().reset_index(drop=True)
label = ['100_THP', '90_THP', '80_THP', '70_THP', '65_THP', '50_THP', '30_THP', '15_THP', '10_THP', '100_PHA',
'90_PHA', '80_PHA', '70_PHA', '65_PHA', '50_PHA', '30_PHA', '15_PHA', '10_PHA']
x = df.drop(columns=['Frequency', 'Label']).to_numpy(dtype='float64')
y = df['Label'].to_numpy(dtype='float64')
for i in range(len(x) - 1):
print("{std}".format(at=label[i], std=np.std(x[i], dtype=np.float32)))
| true |
fbdfdb9a654cafc3fff128d87c0eb2a5c2e39b51 | Python | RYANCOX00/programming2021 | /JustForFun/functions.py | UTF-8 | 148 | 3.40625 | 3 | [] | no_license | # messing around with functions and different variable types
def my_function(fname, lname):
print(fname, " ", lname)
my_function(26, "Refsnes")
| true |
b2e5649f05a3e3bd42811b2d913e5cfe6605ea7e | Python | hunse/speedtest | /test_numpy.py | UTF-8 | 1,599 | 3.078125 | 3 | [
"MIT"
] | permissive | from __future__ import print_function
import timeit
import numpy as np
import numpy.random as npr
def test(fn, number):
timer = timeit.Timer('fn()', 'from __main__ import fn')
return min(timer.repeat(repeat=number, number=1))
print("Numpy location: %s" % np.__file__)
print("Numpy version: %s" % np.__version__)
print("Numpy config:")
np.show_config()
# --- test matrix-matrix dot
n = 1000
A = npr.randn(n,n)
B = npr.randn(n,n)
fn = lambda: np.dot(A, B)
t = test(fn, number=10)
print("multiplied two (%d,%d) matrices in %0.1f ms" % (n, n, 1e3 * t))
# --- test matrix-vector dot
n = 2000
A = npr.randn(n, n)
B = npr.randn(n)
fn = lambda: np.dot(A, B)
t = test(fn, number=100)
print("multiplied (%d,%d) matrix and (%d) vector in %0.1f ms" % (
n, n, n, 1e3 * t))
# --- test vector-vector dot
n = 4000
A = npr.randn(n)
B = npr.randn(n)
fn = lambda: np.dot(A, B)
t = test(fn, number=1000)
print("dotted two (%d) vectors in %0.2f us" % (n, 1e6*t))
# --- test SVD
m, n = (2000,1000)
A = npr.randn(m, n)
fn = lambda: np.linalg.svd(A, full_matrices=False)
t = test(fn, number=1)
print("SVD of ({:d},{:d}) matrix in {:0.3f} s".format(m, n, t))
# --- test Eigendecomposition
n = 1500
A = npr.randn(n, n)
fn = lambda: np.linalg.eig(A)
t = test(fn, number=1)
print("Eigendecomposition of ({:d},{:d}) matrix in {:0.3f} s".format(n, n, t))
# --- My results
# --- New lab computer, OpenBLAS from source
# multiplied two (1000,1000) matrices in 13.1 ms
# dotted two (4000) vectors in 0.95 us
# SVD of (2000,1000) matrix in 0.504 s
# Eigendecomposition of (1500,1500) matrix in 3.671 s
| true |
2863bdfeba7273bfd8bd373a50b1989c0ebcad9b | Python | thijshosman/ThijsTempApp | /main.py | UTF-8 | 3,878 | 2.859375 | 3 | [] | no_license | import time
import lib.observer as observer
import lib.temp as temp
import lib.plotlyObserver as plotly
import lib.lcdObserver as lcdO
import lib.lcdHardware as lcdH
class mainLoop(observer.MultiObserver):
'''main event loop'''
def __init__(self):
super(mainLoop,self).__init__()
# init hardware class for lcd display and temp sensor
self.lcd1 = lcdH.LCDHardware()
self.aSensor = temp.tempSensor()
# create a poller for the temp sensorz
self.aTempPoller = temp.sensorPoller(self.aSensor,interval=60)
# log temperatures to plotly by adding plotlyobserver
self.plotlyobstemp = plotly.plotlyObserver(self.aTempPoller.observable,'config.json','live temp plot')
# add the temppoller observable to the list to be observed
self.add_observable(self.aTempPoller.observable)
# init listener thread and start listening to button presses on lcd1
self.lcdlistener1 = lcdO.LCDButtonListener(1,'button',self.lcd1)
# add the lcdlistener to the list to be observed
self.add_observable(self.lcdlistener1.observable)
# start the buttonlisten thread
self.lcdlistener1.start()
self.aTempPoller.start()
def notify(self,observable, *args, **kwargs):
# gets called by observables registered in constructor (atemppoller and buttonlistener)
print('Got', args, kwargs, 'From', observable.name)
# temp updated
if observable.name == 'temp':
self.lcd1.update(line1='temp=%.1fC/%.0fF' % (kwargs['value'],kwargs['value']*9.0/5.0+32))
t = kwargs['timestamp']
self.lcd1.update(line2='%02d:%02d' % (time.localtime(t).tm_hour,time.localtime(t).tm_min) )
elif observable.name == 'button':
buttonName = kwargs['button']
if buttonName == 'Right':
newcolor = self.lcd1.nextColor()
self.lcd1.update(line2=newcolor)
elif buttonName == 'Left':
newcolor = self.lcd1.previousColor()
self.lcd1.update(line2=newcolor)
def stop(self):
# stop polling temp
self.aTempPoller.stop()
# stop listening for button input
self.lcdlistener1.stop()
if __name__=='__main__':
handler = mainLoop()
do_exit = False
while do_exit == False:
try:
# sleep some time
time.sleep(0.1)
except KeyboardInterrupt:
# Ctrl+C was hit - exit program
do_exit = True
handler.stop()
# ### LCD stuff
# # init hardware
# lcd1 = LCDHardware()
# # create listener/observable for buttons that listens to lcd1
# lcdlistener1 = LCDButtonListener(1,'testlistenerthread',lcd1)
# # create display observer and let it listen to the lcdlistener observable
# # display1 = LCDDisplay(lcdlistener1.LCDButtonObservable,lcd1)
# log1 = LogObserver(lcdlistener1.LCDButtonObservable)
# # start listening for buttons
# lcdlistener1.start()
# # lcdlistener1.broadcast(button='left')
# ### Temp stuff
# aSensor = tempSensor()
# aTempPoller = sensorPoller(aSensor,interval=2)
# # register default observer with the poller
# # firstobserver = Observer(aTempPoller.observable)
# # add the temppoller to the list of observabes display1 observes
# # aTempPoller.registerExtraObserver(display1)
# display1 = LCDDisplay(aTempPoller.observable,lcd1)
# # start polling temperature
# aTempPoller.start()
# do_exit = False
# while do_exit == False:
# try:
# # sleep some time
# time.sleep(0.1)
# except KeyboardInterrupt:
# # Ctrl+C was hit - exit program
# do_exit = True
# # stop all running threads
# lcdlistener1.stop()
# aTempPoller.stop()
| true |
3bd00f19be37a2b8f591ef39b4eaed9b0c2d5f41 | Python | Aasthaengg/IBMdataset | /Python_codes/p03386/s919296793.py | UTF-8 | 248 | 2.890625 | 3 | [] | no_license | import sys
lines = [s.rstrip("\n") for s in sys.stdin.readlines()]
a, b, k = [int(num) for num in lines.pop(0).split(" ")]
s1 = set(range(a, min(b, a + k)))
s2 = set(range(max(a, b - k + 1), b + 1))
lis = sorted(s1 | s2)
for i in lis:
print(i)
| true |
bc77e1526e06c7ecfa715e8c871b9eb80f0cd615 | Python | manaswini-b/ml_binary_forest_classifier | /random.py | UTF-8 | 729 | 3.15625 | 3 | [] | no_license | from sklearn import metrics
from sklearn.ensemble import RandomForestClassifier
from sklearn.cross_validation import train_test_split
from sklearn import datasets
#Load the iris flower data set
iris = datasets.load_iris()
X, y = iris.data, iris.target
#spit data
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.7)
#Insert a Bayesian model into the data
classification_model = RandomForestClassifier()
classification_model.fit(X_train, y_train)
print(classification_model)
#Produce estimations
forecast = X_test
estimate = classification_model.predict(y_test)
#Summarize the fit of the model
print(metrics.classification_report(forecast, estimate))
print(metrics.confusion_matrix(forecast, estimate))
| true |
ea4687f354a85b2d3f43a3cc642f0971f03bf697 | Python | peltonen/physiology | /nbphys.py | UTF-8 | 2,312 | 2.546875 | 3 | [] | no_license | """
A collection of tools for analysis of electrophysiological data that are specific to Ginty lab applications.
These methods mostly wrap tools built by others.
"""
import numpy as np
import traceRoutines as tm
import scipy.signal as signal
import matplotlib as plt
__all__ = ['trace2spikes', 'mergeXSGs', 'parseXSGHeader', 'lpFilter', 'pGrid', 'iGrid']
def trace2spikes(trace, minThresh = 200):
all_spike_peaks = []
all_thresholds = []
all_spike_boundaries = []
b, a = signal.butter(8, 0.025, 'high')
hp_trace = signal.filtfilt(b,a,trace, padlen=150)
hp_thresh = hp_trace.max() * 0.66
if hp_thresh < minThresh:
return([-1])
spike_boundaries = zip(tm.findLevels(hp_trace, hp_thresh, mode='rising')[0], tm.findLevels(hp_trace, hp_thresh, mode='falling')[0])
spike_peaks = [np.argmax(trace[s[0]:s[1]])+s[0] for s in spike_boundaries]
return spike_peaks
def pltTS(trace, spikes, Fs):
t = np.linspace(0, len(trace) / Fs, len(trace))
plt.plot(trace)
plt.vlines(sTimes, hp_trace.max() * 1.1,hp_trace.max() * 1.8)
return
def iGrid(startIndex, row=28, col=5, intervals=[[0, 10000],[10600, 19000],[19000, 28000],[28000,35900]]):
iGrid = np.zeros(shape=(row,col, len(intervals)))
ri, ci = [0,0]
for x in xsgs[startIndex:(startIndex + row * col)]:
spikes = nbp.trace2spikes(x['ephys']['chan0'])
temp = []
for i in intervals:
temp.append(len(filter(lambda s: s > i[0] and s < i[1], spikes)))
iGrid[ri,ci] = temp
if ri%2 == 0: #we are traveling right
if ci == (col-1): # we are at the end of the range, so we should move down
ri = ri + 1
else:
ci = ci + 1
else: #we are traveling left
if ci == 0: # we are at the end of the range, so we should move down
ri = ri + 1
else:
ci = ci - 1
return(iGrid)
def pGrid(startIndex, row=28, col=5, intervals=[[0, 10000],[10600, 19000],[19000, 28000],[28000,35900]]):
return
def hpFilter(trace):
b, a = signal.butter(8, 0.025, 'high')
hp_trace = signal.filtfilt(b,a,trace, padlen=150)
return hp_trace
def lpFilter(trace, Fs, Fcut):
b, a = signal.bessel(4, Fcut/(Fs * 1.0), 'low')
lp_trace = signal.filtfilt(b,a,trace, padlen=150)
return lp_trace | true |
1648cbabed2b238ac04170353302f544d1806ea9 | Python | DrScince/Bauernschach_TIT19_1 | /chessgame_main/chess_logik/figure.py | UTF-8 | 1,754 | 3.609375 | 4 | [] | no_license | """Figure base class for the chess game
"""
import sys
try:
from chess_logik import consts
from chess_logik.position import Position
except ImportError as err:
print("ImportError "+str(err))
sys.exit()
class Figure:
"""Figure base class for the chess game
"""
def __init__(self, color, position):
"""
Arguments:
color {str} -- COLOR_BLACK or COLOR_WHITE
position{Position} -- switched to ERROR_CODES["None"] if argument is None
"""
assert isinstance(color, str), "color is not a str" + str(type(color))
assert len(color) == 1, "color doesn't have the length 1, the length is: " + str(len(color))
assert isinstance(position, Position), "position is not a Position" + str(type(position))
if color == consts.COLOR_WHITE:
self.__color = consts.COLOR_WHITE
elif color == consts.COLOR_BLACK:
self.__color = consts.COLOR_BLACK
if position is not None:
self.__position = position
def get_color(self):
""" Gets the color of the Figure
Return:
COLOR_BLACK or COLOR_WHITE
"""
return self.__color
def get_position(self):
""" Gets the position of the Figure
Return:
position {Position}
"""
return self.__position
def do_move(self, new_position):
"""
Arguments:
new_position {Position}
Returns:
consts.ERROR_CODES["Success"] {str} -- if successfull
"""
assert isinstance(new_position, Position), "new_position is not a Position" + str(type(new_position))
self.__position = new_position
return consts.ERROR_CODES["Success"]
| true |
83f24547a1e8fa5c528631a849e4633e6f3d8ae1 | Python | edigiorgio/PythonCode | /Password_protected_encryption3.py | UTF-8 | 6,282 | 3.453125 | 3 | [] | no_license |
import sys
from time import sleep
prompt = ''
option = ''
output = ''
output2 = ''
userPassword = ''
readData = ''
readData2 = ''
input1 = ''
#This is an encryption/decryption program. You will need to set a password, once
#your password is set you will use it to lock/unlock your files. everything you enter
#will encrypted and transfered to a file. Enjoy
def main():
getPassword()
choice = 0
print("Welcome to the encryption/decryption program")
sleep(1)
print("Please make a selection below")
sleep(1)
print("1.)Encrypt some text")
print("2.)Decrypt whats in the file")
print("3.)Change your password")
print("4.)Exit the program")
choice = eval(input("Please make your selection:"))
if choice == 1:
enterEncryption(input1)
elif choice == 2:
decryptFile(output)
elif choice == 3:
changePassword()
elif choice == 4:
sys.exit()
else:
print("Oops something went wrong \nRestarting program")
main()
def passCheck():
output = ''
attempts = 3
counter = 0
print("Please confirm your password to continue")
pwdFile = open('uspwdbfg.txt', 'r')
storedPassword = pwdFile.readline()
storedPassword = storedPassword.rstrip('\n')
if storedPassword == 'Password':
userPassword = input('Please enter your Password:')
if userPassword == storedPassword:
pwdFile.close()
next
else:
while counter != 3:
userPassword = input('That was incorrect you have ' + str(attempts-counter) + ' attempts left:')
counter += 1
if userPassword == storedPassword:
next
if counter == 3:
sys.exit()
else:
for x in range(len(storedPassword)):
output = output + chr(ord(storedPassword[x]) + 2)
userPassword = input('Please enter your Password:')
if userPassword == output:
pwdFile.close()
next
else:
while counter != 3:
userPassword = input('That was incorrect you have ' + str(attempts-counter) + ' attempts left:')
counter += 1
if userPassword == output:
next
if counter == 3:
sys.exit()
def getPassword():
output = ''
attempts = 3
counter = 0
print("This program is password protected, you have 3 attempts at the password before the system exits")
sleep(1)
print("***HINT***\nTry Password if this is your first time using the program(Case sensitive)")
sleep(1)
pwdFile = open('uspwdbfg.txt', 'r')
storedPassword = pwdFile.readline()
storedPassword = storedPassword.rstrip('\n')
if storedPassword == 'Password':
userPassword = input('Please enter your Password:')
if userPassword == storedPassword:
pwdFile.close()
next
else:
while counter != 3:
userPassword = input('That was incorrect you have ' + str(attempts-counter) + ' attempts left:')
counter += 1
if userPassword == storedPassword:
next
if counter == 3:
sys.exit()
else:
for x in range(len(storedPassword)):
output = output + chr(ord(storedPassword[x]) + 2)
userPassword = input('Please enter your Password:')
if userPassword == output:
pwdFile.close()
next
else:
while counter != 3:
userPassword = input('That was incorrect you have ' + str(attempts-counter) + ' attempts left:')
counter += 1
if userPassword == output:
next
if counter == 3:
sys.exit()
def changePassword():
passCheck()
continue1 = ''
input1 = ''
print("This is to change your stored password")
storedPassword = input("Please enter your new password:")
for i in range(0, len(storedPassword)):
input1 = input1 + chr(ord(storedPassword[i]) - 2)
encryptionFile = open('uspwdbfg.txt', 'w')
encryptionFile.write(str(input1) + '\n')
encryptionFile.close()
continue1 = input("Would you like to continue the program(yes or no)").lower()
if continue1 == 'yes':
main()
elif continue1 == 'no':
sys.exit()
else:
print("I didnt understand that")
changePassword()
def enterEncryption(input1):
continue1 = 'yes'
print("Any data entered here will be encrypted and written to a file")
while continue1 == 'yes':
prompt = input("Please enter what you want encrypted:")
for i in range(0, len(prompt)):
input1 = input1 + chr(ord(prompt[i]) - 2)
encryptionFile = open('secrets1.txt', 'a')
encryptionFile.write(str(input1) + '\n')
encryptionFile.close()
continue1 = input("Would you like to input anything else(yes or no):").lower()
if continue1 == 'yes':
enterEncryption(input1)
elif continue1 == 'no':
break
continue1 = input("Would you like to continue the program(yes or no)").lower()
if continue1 == 'yes':
main()
elif continue1 == 'no':
sys.exit()
else:
print("I didnt understand that")
enterEncryption()
def decryptFile(output):
passCheck()
continue1 = ''
encryptionFile = open('secrets1.txt', 'r')
readData = encryptionFile.readline()
while readData != '':
readData = encryptionFile.readline()
for x in range(len(readData)):
output = output + chr(ord(readData[x]) + 2)
print(output)
encryptionFile.close()
continue1 = input("Would you like to continue the program(yes or no)").lower()
if continue1 == 'yes':
main()
elif continue1 == 'no':
sys.exit()
else:
print("I didnt understand that")
decryptFile()
main()
| true |
38bf21783dbb85a7bea6f59f474238ea5220d032 | Python | WojciechKoz/huzarus | /husarus/slider_demo.py | UTF-8 | 1,021 | 2.5625 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider, Button, RadioButtons
from clustering import cluster_loop
fig, ax = plt.subplots()
scat = ax.scatter()
# plt.subplots_adjust(left=0.25, bottom=0.25)
# ax.margins(x=0)
# axcolor = 'lightgoldenrodyellow'
# axfreq = plt.axes([0.25, 0.1, 0.65, 0.03], facecolor=axcolor)
# axamp = plt.axes([0.25, 0.15, 0.65, 0.03], facecolor=axcolor)
iteration = Slider(axfreq, 'iter', 1, 10.0, valinit=f0, valstep=delta_f)
def update(val):
iteration = iteration.val
fig.canvas.draw_idle()
iteration.on_changed(update)
resetax = plt.axes([0.8, 0.025, 0.1, 0.04])
button = Button(resetax, 'Reset', color=axcolor, hovercolor='0.975')
def reset(event):
iteration.reset()
button.on_clicked(reset)
rax = plt.axes([0.025, 0.5, 0.15, 0.15], facecolor=axcolor)
radio = RadioButtons(rax, ('red', 'blue', 'green'), active=0)
def colorfunc(label):
l.set_color(label)
fig.canvas.draw_idle()
radio.on_clicked(colorfunc)
plt.show()
| true |
19e72259837bccbb0aef8d2c77c930d6528a83f2 | Python | lsg921203/SmartHome | /voice_machine.py | UTF-8 | 18,965 | 2.703125 | 3 | [] | no_license | import time, serial
class voice_machine:
def __init__(self, targetParts):
port = "/dev/ttyAMA0"
rate = 9600
self.ser = serial.Serial(port, rate)
self.ser.parity = serial.PARITY_NONE
self.ser.bytesize = serial.EIGHTBITS
self.Modes = ["hear2Act", "userSetting"]
# self.targetParts=[["door","closed"], ["LED","off","None"], ["window","closed"], ["AC","off"], ["TV","off"]]
self.targetParts = targetParts
def mode(self, Mode):
if Mode == self.Modes[0]:
print(self.Modes[0])
if self.ser.isOpen():
self.ser.close()
self.ser.open()
# print('serial open')
self.ser.flushInput()
self.ser.flushOutput()
time.sleep(0.1)
# print('test command')
self.ser.write(serial.to_bytes([0xAA]))
self.ser.write(serial.to_bytes([0x21]))
time.sleep(0.3)
print('hearing...')
return self.hear2Act2()
elif Mode == self.Modes[1]:
print(self.Modes[1])
def wait_command(self):
self.ser.write(serial.to_bytes([0xAA]))
self.ser.write(serial.to_bytes([0x00]))
time.sleep(0.4)
'''
def hear2Act(self):
cnt=0
msg=""
try:
print("1.hyunkwanmoon, 2.LED 3.changmoon 4.Aircon 5.TV")
command = self.ser.readline()
print(command)
if command == b'Result:11\r\n': # door
self.wait_command()
print('waiting')
time.sleep(0.3)
command = self.ser.readline()
print(command)
self.ser.write(serial.to_bytes([0xAA]))
self.ser.write(serial.to_bytes([0x22]))
print('waiting')
print("1.yeoluju")
time.sleep(0.3)
command = self.ser.readline()
print(command)
command = self.ser.readline()
if command == b'Result:13\r\n':
if self.targetParts[0][1] != "opened":
self.targetParts[0][1] = "opened"
print("door opened")
self.wait_command()
self.ser.close()
print('waiting')
return self.targetParts[0] ###############################
else:
print("Already door opened")
self.wait_command()
self.ser.close()
print('waiting')
return self.targetParts[0] ###############################
elif command == b'Result:12\r\n': # LED
self.wait_command()
print('waiting')
time.sleep(0.3)
command = self.ser.readline()
print(command)
self.ser.write(serial.to_bytes([0xAA]))
self.ser.write(serial.to_bytes([0x22]))
print('waiting')
time.sleep(0.3)
print("1.kyuzo 2.kkuzo")
# group 2 question
command = self.ser.readline()
if command == b'Result:12\r\n':
if self.targetParts[1][1] != "on":
self.targetParts[1][1] = "on"
print("LED turned on")
else:
print("Already LED on")
self.wait_command()
print('waiting')
time.sleep(0.3)
command = self.ser.readline()
print(command)
self.ser.write(serial.to_bytes([0xAA]))
self.ser.write(serial.to_bytes([0x31]))
print('waiting')
time.sleep(0.3)
while True: # group 3 question
print("1.barkye 2.udupkye")
command = self.ser.readline()
if command == b'Result:11\r\n':
if self.targetParts[1][2] != "brightly":
self.targetParts[1][2] = "brightly"
print("LED turned on brightly")
self.wait_command()
print('waiting')
self.ser.close()
return self.targetParts[1] ###############################
else:
print("Already LED turned on brightly")
self.wait_command()
print('waiting')
self.ser.close()
return self.targetParts[1] ###############################
elif command == b'Result:12\r\n':
if self.targetParts[1][2] != "not brightly":
self.targetParts[1][2] = "not brightly"
print("LED turned on brightly")
self.wait_command()
print('waiting')
self.ser.close()
return self.targetParts[1] ###############################
else:
print("Already LED turned on not brightly")
self.wait_command()
print('waiting')
self.ser.close()
return self.targetParts[1] ###############################
elif command == b'Result:15\r\n':
print("Welcome !")
elif command == b'Result:15\r\n':
if self.targetParts[1][1] != "off":
self.targetParts[1][1] = "off"
self.targetParts[1][2] = "None"
print("LED turned off")
self.wait_command()
print('waiting')
self.ser.close()
return self.targetParts[1] ###############################
else:
print("Already LED turned off")
self.wait_command()
print('waiting')
self.ser.close()
return self.targetParts[1] ###############################
elif command == b'Result:13\r\n': # window
self.wait_command()
print('waiting')
time.sleep(0.3)
command = self.ser.readline()
print(command)
self.ser.write(serial.to_bytes([0xAA]))
self.ser.write(serial.to_bytes([0x22]))
print('waiting')
time.sleep(0.3)
print("1.yeoluju 2.dadaju")
while True: # group 2 question
command = self.ser.readline()
if command == b'Result:13\r\n':
if self.targetParts[2][1] != "opened":
self.targetParts[2][1] = "opened"
print("window opened")
self.wait_command()
print('waiting')
self.ser.close()
return self.targetParts[2] ###############################
else:
print("Already window opened")
self.wait_command()
print('waiting')
self.ser.close()
return self.targetParts[2] ###############################
elif command == b'Result:11\r\n':
if self.targetParts[2][1] != "closed":
self.targetParts[2][1] = "closed"
print("window opened")
self.wait_command()
print('waiting')
self.ser.close()
return self.targetParts[2] ###############################
else:
print("Already window closed")
self.wait_command()
print('waiting')
self.ser.close()
return self.targetParts[2] ###############################
elif command == b'Result:14\r\n': # AC
self.wait_command()
print('waiting')
time.sleep(0.3)
command = self.ser.readline()
print(command)
self.ser.write(serial.to_bytes([0xAA]))
self.ser.write(serial.to_bytes([0x22]))
print('waiting')
time.sleep(0.3)
print("1.teuluju 2.kkujo")
while True: # group 2 question
command = self.ser.readline()
if command == b'Result:14\r\n':
if self.targetParts[3][1] != "on":
self.targetParts[3][1] = "on"
print("AC turned on")
self.wait_command()
print('waiting')
self.ser.close()
return self.targetParts[3] ###############################
else:
print("Already AC on")
self.wait_command()
print('waiting')
self.ser.close()
return self.targetParts[3] ###############################
elif command == b'Result:15\r\n':
if self.targetParts[3][1] != "off":
self.targetParts[3][1] = "off"
print("AC turned off")
self.wait_command()
print('waiting')
self.ser.close()
return self.targetParts[3] ###############################
else:
print("Already AC off")
self.wait_command()
print('waiting')
self.ser.close()
return self.targetParts[3] ###############################
elif command == b'Result:15\r\n': # TV
self.wait_command()
print('waiting')
time.sleep(0.3)
command = self.ser.readline()
print(command)
self.ser.write(serial.to_bytes([0xAA]))
self.ser.write(serial.to_bytes([0x22]))
print('waiting')
time.sleep(0.3)
print("1.teuluju 2.kkujo")
while True: # group 2 question
command = self.ser.readline()
if command == b'Result:14\r\n':
if self.targetParts[4][1] != "on":
self.targetParts[4][1] = "on"
print("TV turned on")
self.wait_command()
print('waiting')
self.ser.close()
return self.targetParts[4] ###############################
else:
print("Already TV on")
self.wait_command()
print('waiting')
self.ser.close()
return self.targetParts[4] ###############################
elif command == b'Result:15\r\n':
if self.targetParts[4][1] != "off":
self.targetParts[4][1] = "off"
print("TV turned off")
self.wait_command()
print('waiting')
self.ser.close()
return self.targetParts[4] ###############################
else:
print("Already TV off")
self.wait_command()
print('waiting')
self.ser.close()
return self.targetParts[4] ###############################
except KeyboardInterrupt:
print('voice command ended.')
pass
finally:
self.wait_command()
time.sleep(0.3)
self.ser.close()
'''
def Door_selection(self):
print("1.yeoluju")
command = self.ser.readline()
command = self.ser.readline()
print(command)
if command == b'Result:13\r\n':
return self.targetParts[0] # 알아들음
return self.targetParts[5] # 못 알아들음
def LED_selection(self):
print("1.kyuzo 2.kkuzo")
command = self.ser.readline()
command = self.ser.readline()
print(command)
if command == b'Result:12\r\n':
self.targetParts[1][1] = "on"
print("LED turned on")
return self.targetParts[1]
elif command == b'Result:15\r\n':
self.targetParts[1][1] = "off"
print("LED turned off")
return self.targetParts[1]
return self.targetParts[5] # 못 알아들음
def Window_selection(self):
return self.targetParts[5] # 못 알아들음
def AC_selection(self):
print("1.teuluju 2.kkujo")
command = self.ser.readline()
command = self.ser.readline()
print(command)
if command == b'Result:14\r\n':
self.targetParts[3][1] = "on"
print("AC turned on")
return self.targetParts[3]
elif command == b'Result:15\r\n':
self.targetParts[3][1] = "off"
print("AC turned off")
return self.targetParts[3]
return self.targetParts[5] # 못 알아들음
def TV_selection(self):
print("1.teuluju 2.kkujo")
command = self.ser.readline()
command = self.ser.readline()
print(command)
if command == b'Result:14\r\n' or b'Result:13\r\n' or b'Result:12\r\n':
self.targetParts[4][1] = "on"
print("TV turned on")
return self.targetParts[4]
elif command == b'Result:15\r\n':
self.targetParts[4][1] = "off"
print("TV turned off")
return self.targetParts[4]
return self.targetParts[5] # 못 알아들음
def import_ch_2(self, partsName):
self.ser.write(serial.to_bytes([0xAA]))
self.ser.write(serial.to_bytes([0x22]))
if partsName == "Door":
print('waiting')
time.sleep(0.3)
print(partsName)
return self.Door_selection()
elif partsName == "LED":
print('waiting')
time.sleep(0.3)
print(partsName)
return self.LED_selection()
elif partsName == "Window":
print('waiting')
time.sleep(0.3)
print(partsName)
return self.Window_selection()
elif partsName == "AC":
print('waiting')
time.sleep(0.3)
print(partsName)
return self.AC_selection()
elif partsName == "TV":
print('waiting')
time.sleep(0.3)
print(partsName)
return self.TV_selection()
def hear2Act2(self):
cnt = 0
msg = ""
cnt = 0
msg = ""
try:
command = self.ser.readline()
print(command)
print("1.hyunkwanmoon, 2.LED 3.changmoon 4.Aircon 5.TV")
command = self.ser.readline()
print(command)
if command == b'Result:11\r\n': # Door
self.wait_command()
print('waiting')
time.sleep(0.3)
command = self.ser.readline()
print(command)
return self.import_ch_2("Door")
elif command == b'Result:12\r\n': # LED
self.wait_command()
print('waiting')
time.sleep(0.3)
command = self.ser.readline()
print(command)
return self.import_ch_2("LED")
elif command == b'Result:13\r\n': # Window
self.wait_command()
print('waiting')
time.sleep(0.3)
command = self.ser.readline()
print(command)
return self.import_ch_2("Window")
elif command == b'Result:14\r\n': # AC
self.wait_command()
print('waiting')
time.sleep(0.3)
command = self.ser.readline()
print(command)
return self.import_ch_2("AC")
elif command == b'Result:15\r\n': # TV
self.wait_command()
print('waiting')
time.sleep(0.3)
command = self.ser.readline()
print(command)
return self.import_ch_2("TV")
except KeyboardInterrupt:
print('voice command ended.')
pass
finally:
self.wait_command()
time.sleep(0.3)
self.ser.close()
| true |
e733a0dbbd94d6366bf736c4d283c8ba8e6eb5a6 | Python | xiaotuzixuedaima/PythonProgramDucat | /python_program/Def_speed_calcultor.py | UTF-8 | 2,179 | 3.59375 | 4 | [] | no_license | # speed calcultor used def function .....???
def kmh():
cm = float(input("enter the cm : "))
sec = float(input("enter the sec : "))
m = cm / (10**2)
km = cm / (10**5)
feet = cm / (30.5)
miles = cm / (160934)
min = sec / 60
hour = sec / 3600
print((km /hour)," km / hour")
def metersec():
cm = float(input("enter the cm : "))
sec = float(input("enter the sec : "))
m = cm / (10**2)
km = cm / (10**5)
feet = cm / (30.5)
miles = cm / (160934)
min = sec / 60
hour = sec / 3600
print((m /sec)," m / sec")
def cmsec():
cm = float(input("enter the cm : "))
sec = float(input("enter the sec : "))
m = cm / (10**2)
km = cm / (10**5)
feet = cm / (30.5)
miles = cm / (160934)
min = sec / 60
hour = sec / 3600
print((cm /sec)," cm / sec")
def feetsec():
cm = float(input("enter the cm : "))
sec = float(input("enter the sec : "))
m = cm / (10**2)
km = cm / (10**5)
feet = cm / (30.5)
miles = cm / (160934)
min = sec / 60
hour = sec / 3600
print((feet / sec)," feet / sec")
def milesh():
cm = float(input("enter the cm : "))
sec = float(input("enter the sec : "))
m = cm / (10**2)
km = cm / (10**5)
feet = cm / (30.5)
miles = cm / (160934)
min = sec / 60
hour = sec / 3600
print((miles /hour)," miles / hour")
print("kmh , metersec , cmsec , feetsec , milesh .")
choice = input("enter the user choice : ")
if choice == 'kmh':
kmh()
elif choice == 'metersec':
metersec()
elif choice == 'cmsec':
cmsec()
elif choice == 'feetsec':
feetsec()
elif choice == 'milesh':
milesh()
else:
print("invalid key .")
'''
output ===
kmh , metersec , cmsec , feetsec , milesh .
enter the user choice : cmsec
enter the cm : 45
enter the sec : 89
0.5056179775280899 cm / sec
kmh , metersec , cmsec , feetsec , milesh .
enter the user choice : feetsec
enter the cm : 89
enter the sec : 96
0.03039617486338798 feet / sec
kmh , metersec , cmsec , feetsec , milesh .
enter the user choice : milesh
enter the cm : 986
enter the sec : 89
0.2478229961811409 miles / hour
kmh , metersec , cmsec , feetsec , milesh .
enter the user choice : kmh
enter the cm : 9865
enter the sec : 4589
0.07738940945739813 km / hour
''' | true |
7b63c1375402fd246b4e3027d022b9b6c0dc2bd4 | Python | MickeyLannister17/zc_plugin_noticeboard | /backend/notice_project/notice/test/tests_db.py | UTF-8 | 2,466 | 2.71875 | 3 | [] | no_license | # Create your tests here.
import requests
class Dbnoticeboard:
"""Class based DB to Read and write to zc_core using the read and write endpoints"""
def __init__(self):
BASE_URL = "https://api.zuri.chat"
self.read_endpoint = (
BASE_URL + "/data/read/{pgn_id}/{collec_name}/{org_id}?{query}"
)
self.write_endpoint = "https://api.zuri.chat/data/write"
def read(self):
"""Gets json data from the Db"""
print("Read api works")
pass
def save(self, collection_name, data):
"""This method writes json to the Db.
It does this using the collection name and the serialized json
"""
data = {
"plugin_id": "6139276099bd9e223a37d91d",
"organization_id": "613a1a3b59842c7444fb0220",
"collection_name": collection_name,
"bulk_write": False,
"object_id": "55",
"filter": {},
"payload": tolu,
}
# yoh=json.dumps(di).encode("utf-8") """for decoding thd dictionary"
try:
res = requests.post(self.write_endpoint, data)
print(res.text)
res.raise_for_status()
except requests.exceptions.RequestException as err:
print("OOps: There is a problem with the Request", err)
except requests.exceptions.HTTPError as errh:
print("Http Error:", errh)
except requests.exceptions.ConnectionError as errc:
print("Error Connecting:", errc)
pass
Database = Dbnoticeboard()
Database.read()
collection_name = "tf"
tolu = dict(name=5)
data = {
"plugin_id": "6139276099bd9e223a37d91d",
"organization_id": "613a1a3b59842c7444fb0220",
"collection_name": collection_name,
"bulk_write": False,
"object_id": "55",
"filter": {},
"payload": tolu,
}
# Database.save("tf")
# print(Database.save)
URL = "http://www.google.com/blahblah"
# SERIALIZERS.PY
# class SchedulesSerializer(serializers.Serializer):
# title = serializers.CharField(max_length=255)
# created = serializers.DateTimeField(default=timezone.now)
# author_name = serializers.CharField()
# author_username = serializers.CharField()
# author_img_url = serializers.CharField()
# message = serializers.CharField()
# scheduled_time=serializers.CharField()
# views = serializers.CharField(default=0)
# org_id = serializers.CharField()
# VIEWS.PY
| true |
c21f22b47a893349d6b5b256779a9ab2fbe441fe | Python | kiddyboots216/pytorch_privacy | /torchprivacy/dp_query/normalized_query.py | UTF-8 | 2,733 | 2.640625 | 3 | [] | no_license | """Implements DPQuery interface for normalized queries.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import torch
from torchprivacy.dp_query import dp_query
class NormalizedQuery(dp_query.DPQuery):
"""DPQuery for queries with a DPQuery numerator and fixed denominator."""
# pylint: disable=invalid-name
_GlobalState = collections.namedtuple(
'_GlobalState', ['numerator_state', 'denominator'])
def __init__(self, numerator_query, denominator):
"""Initializer for NormalizedQuery.
Args:
numerator_query: A DPQuery for the numerator.
denominator: A value for the denominator. May be None if it will be
supplied via the set_denominator function before get_noised_result is
called.
"""
self._numerator = numerator_query
self._denominator = denominator
def set_ledger(self, ledger):
"""See base class."""
self._numerator.set_ledger(ledger)
def initial_global_state(self):
"""See base class."""
if self._denominator is not None:
denominator = self._denominator
else:
denominator = None
return self._GlobalState(
self._numerator.initial_global_state(), denominator)
def derive_sample_params(self, global_state):
"""See base class."""
return self._numerator.derive_sample_params(global_state.numerator_state)
def initial_sample_state(self, template):
"""See base class."""
# NormalizedQuery has no sample state beyond the numerator state.
return self._numerator.initial_sample_state(template)
def preprocess_record(self, params, record):
return self._numerator.preprocess_record(params, record)
def accumulate_preprocessed_record(
self, sample_state, preprocessed_record):
"""See base class."""
return self._numerator.accumulate_preprocessed_record(
sample_state, preprocessed_record)
def get_noised_result(self, sample_state, global_state):
"""See base class."""
noised_sum, new_sum_global_state = self._numerator.get_noised_result(
sample_state, global_state.numerator_state)
def normalize(v):
return torch.div(v, global_state.denominator)
return (normalize(noised_sum), self._GlobalState(new_sum_global_state,
global_state.denominator))
def merge_sample_states(self, sample_state_1, sample_state_2):
"""See base class."""
return self._numerator.merge_sample_states(sample_state_1, sample_state_2)
| true |
d25a0327b15964456b337a31269e69fcbf1a8a79 | Python | openstack/yaql | /yaql/standard_library/collections.py | UTF-8 | 34,708 | 2.828125 | 3 | [
"Apache-2.0"
] | permissive | # Copyright (c) 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Functions that produce or consume finite collections - lists, dicts and sets.
"""
import itertools
from yaql.language import specs
from yaql.language import utils
from yaql.language import yaqltypes
import yaql.standard_library.queries
@specs.parameter('args', nullable=True)
@specs.inject('delegate', yaqltypes.Delegate('to_list', method=True))
def list_(delegate, *args):
""":yaql:list
Returns list of provided args and unpacks arg element if it's iterable.
:signature: list([args])
:arg [args]: arguments to create a list
:argType [args]: chain of any types
:returnType: list
.. code::
yaql> list(1, "", range(2))
[1, "", 0, 1]
"""
def rec(seq):
for t in seq:
if utils.is_iterator(t):
for t2 in rec(t):
yield t2
else:
yield t
return delegate(rec(args))
@specs.method
@specs.parameter('collection', yaqltypes.Iterable())
def flatten(collection):
""":yaql:flatten
Returns an iterator to the recursive traversal of collection.
:signature: collection.flatten()
:receiverArg collection: collection to be traversed
:argType collection: iterable
:returnType: list
.. code::
yaql> ["a", ["b", [2,3]]].flatten()
["a", "b", 2, 3]
"""
for t in collection:
if utils.is_iterable(t):
for t2 in flatten(t):
yield t2
else:
yield t
@specs.method
@specs.parameter('collection', yaqltypes.Iterable())
def to_list(collection):
""":yaql:toList
Returns list built from iterable.
:signature: collection.toList()
:receiverArg collection: collection to be transferred to list
:argType collection: iterable
:returnType: list
.. code::
yaql> range(3).toList()
[0, 1, 2]
"""
if isinstance(collection, tuple):
return collection
return tuple(collection)
@specs.parameter('args', nullable=True)
@specs.name('#list')
def build_list(engine, *args):
""":yaql:list
Returns list of provided args.
:signature: list([args])
:arg [args]: arguments to create a list
:argType [args]: any types
:returnType: list
.. code::
yaql> list(1, "", 2)
[1, "", 2]
"""
utils.limit_memory_usage(engine, *((1, t) for t in args))
return tuple(args)
@specs.no_kwargs
@specs.parameter('args', utils.MappingRule)
def dict_(engine, *args):
""":yaql:dict
Returns dictionary of provided keyword values.
:signature: dict([args])
:arg [args]: arguments to create a dictionary
:argType [args]: mappings
:returnType: dictionary
.. code::
yaql> dict(a => 1, b => 2)
{ "a": 1, "b": 2}
"""
utils.limit_memory_usage(engine, *((1, arg) for arg in args))
return utils.FrozenDict((t.source, t.destination) for t in args)
@specs.parameter('items', yaqltypes.Iterable())
@specs.no_kwargs
def dict__(items, engine):
""":yaql:dict
Returns dictionary with keys and values built on items pairs.
:signature: dict(items)
:arg items: list of pairs [key, value] for building dictionary
:argType items: list
:returnType: dictionary
.. code::
yaql> dict([["a", 2], ["b", 4]])
{"a": 2, "b": 4}
"""
result = {}
for t in items:
it = iter(t)
key = next(it)
value = next(it)
result[key] = value
utils.limit_memory_usage(engine, (1, result))
return utils.FrozenDict(result)
@specs.parameter('collection', yaqltypes.Iterable())
@specs.parameter('key_selector', yaqltypes.Lambda())
@specs.parameter('value_selector', yaqltypes.Lambda())
@specs.method
def to_dict(collection, engine, key_selector, value_selector=None):
""":yaql:dict
Returns dict built on collection where keys are keySelector applied to
collection elements and values are valueSelector applied to collection
elements.
:signature: collection.toDict(keySelector, valueSelector => null)
:receiverArg collection: collection to build dict from
:argType collection: iterable
:arg keySelector: lambda function to get keys from collection elements
:argType keySelector: lambda
:arg valueSelector: lambda function to get values from collection elements.
null by default, which means values to be collection items
:argType valueSelector: lambda
:returnType: dictionary
.. code::
yaql> [1, 2].toDict($, $ + 1)
{"1": 2, "2": 3}
"""
result = {}
for t in collection:
key = key_selector(t)
value = t if value_selector is None else value_selector(t)
result[key] = value
utils.limit_memory_usage(engine, (1, result))
return result
@specs.parameter('d', utils.MappingType, alias='dict')
@specs.parameter('key', yaqltypes.Keyword())
@specs.name('#operator_.')
def dict_keyword_access(d, key):
""":yaql:operator .
Returns value of a dictionary by given key.
:signature: left.right
:arg left: input dictionary
:argType left: dictionary
:arg right: key
:argType right: keyword
:returnType: any (appropriate value type)
.. code::
yaql> {"a" => 1, "b" => 2}.a
1
"""
return d[key]
@specs.parameter('d', utils.MappingType, alias='dict')
@specs.name('#indexer')
def dict_indexer(d, key):
""":yaql:operator indexer
Returns value of a dictionary by given key.
:signature: left[right]
:arg left: input dictionary
:argType left: dictionary
:arg right: key
:argType right: keyword
:returnType: any (appropriate value type)
.. code::
yaql> {"a" => 1, "b" => 2}["a"]
1
"""
return d[key]
@specs.parameter('d', utils.MappingType, alias='dict')
@specs.name('#indexer')
def dict_indexer_with_default(d, key, default):
""":yaql:operator indexer
Returns value of a dictionary by given key or default if there is
no such key.
:signature: left[right, default]
:arg left: input dictionary
:argType left: dictionary
:arg right: key
:argType right: keyword
:arg default: default value to be returned if key is missing in dictionary
:argType default: any
:returnType: any (appropriate value type)
.. code::
yaql> {"a" => 1, "b" => 2}["c", 3]
3
"""
return d.get(key, default)
@specs.parameter('d', utils.MappingType, alias='dict')
@specs.name('get')
@specs.method
def dict_get(d, key, default=None):
""":yaql:get
Returns value of a dictionary by given key or default if there is
no such key.
:signature: dict.get(key, default => null)
:receiverArg dict: input dictionary
:argType dict: dictionary
:arg key: key
:argType key: keyword
:arg default: default value to be returned if key is missing in dictionary.
null by default
:argType default: any
:returnType: any (appropriate value type)
.. code::
yaql> {"a" => 1, "b" => 2}.get("c")
null
yaql> {"a" => 1, "b" => 2}.get("c", 3)
3
"""
return d.get(key, default)
@specs.parameter('d', utils.MappingType, alias='dict')
@specs.name('set')
@specs.method
@specs.no_kwargs
def dict_set(engine, d, key, value):
""":yaql:set
Returns dict with provided key set to value.
:signature: dict.set(key, value)
:receiverArg dict: input dictionary
:argType dict: dictionary
:arg key: key
:argType key: keyword
:arg value: value to be set to input key
:argType value: any
:returnType: dictionary
.. code::
yaql> {"a" => 1, "b" => 2}.set("c", 3)
{"a": 1, "b": 2, "c": 3}
yaql> {"a" => 1, "b" => 2}.set("b", 3)
{"a": 1, "b": 3}
"""
utils.limit_memory_usage(engine, (1, d), (1, key), (1, value))
return utils.FrozenDict(itertools.chain(d.items(), ((key, value),)))
@specs.parameter('d', utils.MappingType, alias='dict')
@specs.parameter('replacements', utils.MappingType)
@specs.name('set')
@specs.method
@specs.no_kwargs
def dict_set_many(engine, d, replacements):
""":yaql:set
Returns dict with replacements keys set to replacements values.
:signature: dict.set(replacements)
:receiverArg dict: input dictionary
:argType dict: dictionary
:arg replacements: mapping with keys and values to be set on input dict
:argType key: dictionary
:returnType: dictionary
.. code::
yaql> {"a" => 1, "b" => 2}.set({"b" => 3, "c" => 4})
{"a": 1, "c": 4, "b": 3}
"""
utils.limit_memory_usage(engine, (1, d), (1, replacements))
return utils.FrozenDict(itertools.chain(d.items(), replacements.items()))
@specs.no_kwargs
@specs.method
@specs.parameter('args', utils.MappingRule)
@specs.parameter('d', utils.MappingType, alias='dict')
@specs.name('set')
def dict_set_many_inline(engine, d, *args):
""":yaql:set
Returns dict with args keys set to args values.
:signature: dict.set([args])
:receiverArg dict: input dictionary
:argType dict: dictionary
:arg [args]: key-values to be set on input dict
:argType [args]: chain of mappings
:returnType: dictionary
.. code::
yaql> {"a" => 1, "b" => 2}.set("b" => 3, "c" => 4)
{"a": 1, "c": 4, "b": 3}
"""
utils.limit_memory_usage(engine, (1, d), *((1, arg) for arg in args))
return utils.FrozenDict(itertools.chain(
d.items(), ((t.source, t.destination) for t in args)))
@specs.parameter('d', utils.MappingType, alias='dict')
@specs.name('keys')
@specs.method
def dict_keys(d):
""":yaql:keys
Returns an iterator over the dictionary keys.
:signature: dict.keys()
:receiverArg dict: input dictionary
:argType dict: dictionary
:returnType: iterator
.. code::
yaql> {"a" => 1, "b" => 2}.keys()
["a", "b"]
"""
return d.keys()
@specs.parameter('d', utils.MappingType, alias='dict')
@specs.name('values')
@specs.method
def dict_values(d):
""":yaql:values
Returns an iterator over the dictionary values.
:signature: dict.values()
:receiverArg dict: input dictionary
:argType dict: dictionary
:returnType: iterator
.. code::
yaql> {"a" => 1, "b" => 2}.values()
[1, 2]
"""
return d.values()
@specs.parameter('d', utils.MappingType, alias='dict')
@specs.name('items')
@specs.method
def dict_items(d):
""":yaql:items
Returns an iterator over pairs [key, value] of input dict.
:signature: dict.items()
:receiverArg dict: input dictionary
:argType dict: dictionary
:returnType: iterator
.. code::
yaql> {"a" => 1, "b" => 2}.items()
[["a", 1], ["b", 2]]
"""
return d.items()
@specs.parameter('lst', yaqltypes.Sequence(), alias='list')
@specs.parameter('index', int, nullable=False)
@specs.name('#indexer')
def list_indexer(lst, index):
""":yaql:operator indexer
Returns value of sequence by given index.
:signature: left[right]
:arg left: input sequence
:argType left: sequence
:arg right: index
:argType right: integer
:returnType: any (appropriate value type)
.. code::
yaql> ["a", "b"][0]
"a"
"""
return lst[index]
@specs.parameter('value', nullable=True)
@specs.parameter('collection', yaqltypes.Iterable())
@specs.name('#operator_in')
def in_(value, collection):
""":yaql:operator in
Returns true if there is at least one occurrence of value in collection,
false otherwise.
:signature: left in right
:arg left: value to be checked for occurrence
:argType left: any
:arg right: collection to find occurrence in
:argType right: iterable
:returnType: boolean
.. code::
yaql> "a" in ["a", "b"]
true
"""
return value in collection
@specs.parameter('value', nullable=True)
@specs.parameter('collection', yaqltypes.Iterable())
@specs.method
def contains(collection, value):
""":yaql:contains
Returns true if value is contained in collection, false otherwise.
:signature: collection.contains(value)
:receiverArg collection: collection to find occurrence in
:argType collection: iterable
:arg value: value to be checked for occurrence
:argType value: any
:returnType: boolean
.. code::
yaql> ["a", "b"].contains("a")
true
"""
return value in collection
@specs.parameter('key', nullable=True)
@specs.parameter('d', utils.MappingType, alias='dict')
@specs.method
def contains_key(d, key):
""":yaql:containsKey
Returns true if the dictionary contains the key, false otherwise.
:signature: dict.containsKey(key)
:receiverArg dict: dictionary to find occurrence in
:argType dict: mapping
:arg key: value to be checked for occurrence
:argType key: any
:returnType: boolean
.. code::
yaql> {"a" => 1, "b" => 2}.containsKey("a")
true
"""
return key in d
@specs.parameter('value', nullable=True)
@specs.parameter('d', utils.MappingType, alias='dict')
@specs.method
def contains_value(d, value):
""":yaql:containsValue
Returns true if the dictionary contains the value, false otherwise.
:signature: dict.containsValue(value)
:receiverArg dict: dictionary to find occurrence in
:argType dict: mapping
:arg value: value to be checked for occurrence
:argType value: any
:returnType: boolean
.. code::
yaql> {"a" => 1, "b" => 2}.containsValue("a")
false
yaql> {"a" => 1, "b" => 2}.containsValue(2)
true
"""
return value in d.values()
@specs.parameter('left', yaqltypes.Iterable())
@specs.parameter('right', yaqltypes.Iterable())
@specs.name('#operator_+')
def combine_lists(left, right, engine):
""":yaql:operator +
Returns two iterables concatenated.
:signature: left + right
:arg left: left list
:argType left: iterable
:arg right: right list
:argType right: iterable
:returnType: iterable
.. code::
yaql> [1, 2] + [3]
[1, 2, 3]
"""
if isinstance(left, tuple) and isinstance(right, tuple):
utils.limit_memory_usage(engine, (1, left), (1, right))
return left + right
elif isinstance(left, frozenset) and isinstance(right, frozenset):
utils.limit_memory_usage(engine, (1, left), (1, right))
return left.union(right)
return yaql.standard_library.queries.concat(left, right)
@specs.parameter('left', yaqltypes.Sequence())
@specs.parameter('right', int)
@specs.name('#operator_*')
def list_by_int(left, right, engine):
""":yaql:operator *
Returns sequence repeated count times.
:signature: left * right
:arg left: input sequence
:argType left: sequence
:arg right: multiplier
:argType right: integer
:returnType: sequence
.. code::
yaql> [1, 2] * 2
[1, 2, 1, 2]
"""
utils.limit_memory_usage(engine, (-right + 1, []), (right, left))
return left * right
@specs.parameter('left', int)
@specs.parameter('right', yaqltypes.Sequence())
@specs.name('#operator_*')
def int_by_list(left, right, engine):
""":yaql:operator *
Returns sequence repeated count times.
:signature: left * right
:arg left: multiplier
:argType left: integer
:arg right: input sequence
:argType right: sequence
:returnType: sequence
.. code::
yaql> 2 * [1, 2]
[1, 2, 1, 2]
"""
return list_by_int(right, left, engine)
@specs.parameter('left', utils.MappingType)
@specs.parameter('right', utils.MappingType)
@specs.name('#operator_+')
def combine_dicts(left, right, engine):
""":yaql:operator +
Returns combined left and right dictionaries.
:signature: left + right
:arg left: left dictionary
:argType left: mapping
:arg right: right dictionary
:argType right: mapping
:returnType: mapping
.. code::
yaql> {"a" => 1, b => 2} + {"b" => 3, "c" => 4}
{"a": 1, "c": 4, "b": 3}
"""
utils.limit_memory_usage(engine, (1, left), (1, right))
d = dict(left)
d.update(right)
return utils.FrozenDict(d)
def is_list(arg):
""":yaql:isList
Returns true if arg is a list, false otherwise.
:signature: isList(arg)
:arg arg: value to be checked
:argType arg: any
:returnType: boolean
.. code::
yaql> isList([1, 2])
true
yaql> isList({"a" => 1})
false
"""
return utils.is_sequence(arg)
def is_dict(arg):
""":yaql:isDict
Returns true if arg is dictionary, false otherwise.
:signature: isDict(arg)
:arg arg: value to be checked
:argType arg: any
:returnType: boolean
.. code::
yaql> isDict([1, 2])
false
yaql> isDict({"a" => 1})
true
"""
return isinstance(arg, utils.MappingType)
def is_set(arg):
""":yaql:isSet
Returns true if arg is set, false otherwise.
:signature: isSet(arg)
:arg arg: value to be checked
:argType arg: any
:returnType: boolean
.. code::
yaql> isSet({"a" => 1})
false
yaql> isSet(set(1, 2))
true
"""
return isinstance(arg, utils.SetType)
@specs.parameter('d', utils.MappingType, alias='dict')
@specs.extension_method
@specs.name('len')
def dict_len(d):
""":yaql:len
Returns size of the dictionary.
:signature: dict.len()
:receiverArg dict: input dictionary
:argType dict: mapping
:returnType: integer
.. code::
yaql> {"a" => 1, "b" => 2}.len()
2
"""
return len(d)
@specs.parameter('sequence', yaqltypes.Sequence())
@specs.extension_method
@specs.name('len')
def sequence_len(sequence):
""":yaql:len
Returns length of the list.
:signature: sequence.len()
:receiverArg sequence: input sequence
:argType dict: sequence
:returnType: integer
.. code::
yaql> [0, 1, 2].len()
3
"""
return len(sequence)
@specs.method
@specs.parameter('collection', yaqltypes.Iterable())
@specs.parameter('position', int)
@specs.parameter('count', int)
def delete(collection, position, count=1):
""":yaql:delete
Returns collection with removed [position, position+count) elements.
:signature: collection.delete(position, count => 1)
:receiverArg collection: input collection
:argType collection: iterable
:arg position: index to start remove
:argType position: integer
:arg count: how many elements to remove, 1 by default
:argType position: integer
:returnType: iterable
.. code::
yaql> [0, 1, 3, 4, 2].delete(2, 2)
[0, 1, 2]
"""
for i, t in enumerate(collection):
if count >= 0 and not position <= i < position + count:
yield t
elif count < 0 and not i >= position:
yield t
@specs.method
@specs.parameter('collection', yaqltypes.Iterable())
@specs.parameter('position', int)
@specs.parameter('count', int)
def replace(collection, position, value, count=1):
""":yaql:replace
Returns collection where [position, position+count) elements are replaced
with value.
:signature: collection.replace(position, value, count => 1)
:receiverArg collection: input collection
:argType collection: iterable
:arg position: index to start replace
:argType position: integer
:arg value: value to be replaced with
:argType value: any
:arg count: how many elements to replace, 1 by default
:argType count: integer
:returnType: iterable
.. code::
yaql> [0, 1, 3, 4, 2].replace(2, 100, 2)
[0, 1, 100, 2]
"""
yielded = False
for i, t in enumerate(collection):
if (count >= 0 and position <= i < position + count
or count < 0 and i >= position):
if not yielded:
yielded = True
yield value
else:
yield t
@specs.method
@specs.parameter('collection', yaqltypes.Iterable())
@specs.parameter('position', int)
@specs.parameter('count', int)
@specs.parameter('values', yaqltypes.Iterable())
def replace_many(collection, position, values, count=1):
""":yaql:replaceMany
Returns collection where [position, position+count) elements are replaced
with values items.
:signature: collection.replaceMany(position, values, count => 1)
:receiverArg collection: input collection
:argType collection: iterable
:arg position: index to start replace
:argType position: integer
:arg values: items to replace
:argType values: iterable
:arg count: how many elements to replace, 1 by default
:argType count: integer
:returnType: iterable
.. code::
yaql> [0, 1, 3, 4, 2].replaceMany(2, [100, 200], 2)
[0, 1, 100, 200, 2]
"""
yielded = False
for i, t in enumerate(collection):
if (count >= 0 and position <= i < position + count
or count < 0 and i >= position):
if not yielded:
for v in values:
yield v
yielded = True
else:
yield t
@specs.method
@specs.name('delete')
@specs.parameter('d', utils.MappingType, alias='dict')
def delete_keys(d, *keys):
""":yaql:delete
Returns dict with keys removed.
:signature: dict.delete([args])
:receiverArg dict: input dictionary
:argType dict: mapping
:arg [args]: keys to be removed from dictionary
:argType [args]: chain of keywords
:returnType: mapping
.. code::
yaql> {"a" => 1, "b" => 2, "c" => 3}.delete("a", "c")
{"b": 2}
"""
return delete_keys_seq(d, keys)
@specs.method
@specs.name('deleteAll')
@specs.parameter('d', utils.MappingType, alias='dict')
@specs.parameter('keys', yaqltypes.Iterable())
def delete_keys_seq(d, keys):
""":yaql:deleteAll
Returns dict with keys removed. Keys are provided as an iterable
collection.
:signature: dict.deleteAll(keys)
:receiverArg dict: input dictionary
:argType dict: mapping
:arg keys: keys to be removed from dictionary
:argType keys: iterable
:returnType: mapping
.. code::
yaql> {"a" => 1, "b" => 2, "c" => 3}.deleteAll(["a", "c"])
{"b": 2}
"""
copy = dict(d)
for t in keys:
copy.pop(t, None)
return copy
@specs.method
@specs.parameter('collection', yaqltypes.Iterable(validators=[
lambda x: not isinstance(x, utils.SetType)]
))
@specs.parameter('value', nullable=True)
@specs.parameter('position', int)
@specs.name('insert')
def iter_insert(collection, position, value):
""":yaql:insert
Returns collection with inserted value at the given position.
:signature: collection.insert(position, value)
:receiverArg collection: input collection
:argType collection: iterable
:arg position: index for insertion. value is inserted in the end if
position greater than collection size
:argType position: integer
:arg value: value to be inserted
:argType value: any
:returnType: iterable
.. code::
yaql> [0, 1, 3].insert(2, 2)
[0, 1, 2, 3]
"""
i = -1
for i, t in enumerate(collection):
if i == position:
yield value
yield t
if position > i:
yield value
@specs.method
@specs.parameter('collection', yaqltypes.Sequence())
@specs.parameter('value', nullable=True)
@specs.parameter('position', int)
@specs.name('insert')
def list_insert(collection, position, value):
""":yaql:insert
Returns collection with inserted value at the given position.
:signature: collection.insert(position, value)
:receiverArg collection: input collection
:argType collection: sequence
:arg position: index for insertion. value is inserted in the end if
position greater than collection size
:argType position: integer
:arg value: value to be inserted
:argType value: any
:returnType: sequence
.. code::
yaql> [0, 1, 3].insert(2, 2)
[0, 1, 2, 3]
"""
copy = list(collection)
copy.insert(position, value)
return copy
@specs.method
@specs.parameter('collection', yaqltypes.Iterable())
@specs.parameter('values', yaqltypes.Iterable())
@specs.parameter('position', int)
def insert_many(collection, position, values):
""":yaql:insertMany
Returns collection with inserted values at the given position.
:signature: collection.insertMany(position, values)
:receiverArg collection: input collection
:argType collection: iterable
:arg position: index for insertion. value is inserted in the end if
position greater than collection size
:argType position: integer
:arg values: items to be inserted
:argType values: iterable
:returnType: iterable
.. code::
yaql> [0, 1, 3].insertMany(2, [2, 22])
[0, 1, 2, 22, 3]
"""
i = -1
if position < 0:
for j in values:
yield j
for i, t in enumerate(collection):
if i == position:
for j in values:
yield j
yield t
if position > i:
for j in values:
yield j
@specs.parameter('s', utils.SetType, alias='set')
@specs.extension_method
@specs.name('len')
def set_len(s):
""":yaql:len
Returns size of the set.
:signature: set.len()
:receiverArg set: input set
:argType set: set
:returnType: integer
.. code::
yaql> set(0, 1, 2).len()
3
"""
return len(s)
@specs.parameter('args', nullable=True)
@specs.inject('delegate', yaqltypes.Delegate('to_set', method=True))
def set_(delegate, *args):
""":yaql:set
Returns set initialized with args.
:signature: set([args])
:arg [args]: args to build a set
:argType [args]: chain of any type
:returnType: set
.. code::
yaql> set(0, "", [1, 2])
[0, "", [1, 2]]
"""
def rec(seq):
for t in seq:
if utils.is_iterator(t):
for t2 in rec(t):
yield t2
else:
yield t
return delegate(rec(args))
@specs.method
@specs.parameter('collection', yaqltypes.Iterable())
def to_set(collection):
""":yaql:toSet
Returns set built from iterable.
:signature: collection.toSet()
:receiverArg collection: collection to build a set
:argType collection: iterable
:returnType: set
.. code::
yaql> [0, 1, 1, 2].toSet()
[0, 1, 2]
"""
return frozenset(collection)
@specs.parameter('left', utils.SetType)
@specs.parameter('right', utils.SetType)
@specs.method
def union(left, right):
""":yaql:union
Returns union of two sets.
:signature: left.union(right)
:receiverArg left: input set
:argType left: set
:arg right: input set
:argType right: set
:returnType: set
.. code::
yaql> set(0, 1).union(set(1, 2))
[0, 1, 2]
"""
return left.union(right)
@specs.parameter('left', utils.SetType)
@specs.parameter('right', utils.SetType)
@specs.name('#operator_<')
def set_lt(left, right):
""":yaql:operator <
Returns true if left set is subset of right set and left size is strictly
less than right size, false otherwise.
:signature: left < right
:arg left: left set
:argType left: set
:arg right: right set
:argType right: set
:returnType: boolean
.. code::
yaql> set(0) < set(0, 1)
true
"""
return left < right
@specs.parameter('left', utils.SetType)
@specs.parameter('right', utils.SetType)
@specs.name('#operator_<=')
def set_lte(left, right):
""":yaql:operator <=
Returns true if left set is subset of right set.
:signature: left <= right
:arg left: left set
:argType left: set
:arg right: right set
:argType right: set
:returnType: boolean
.. code::
yaql> set(0, 1) <= set(0, 1)
true
"""
return left <= right
@specs.parameter('left', utils.SetType)
@specs.parameter('right', utils.SetType)
@specs.name('#operator_>=')
def set_gte(left, right):
""":yaql:operator >=
Returns true if right set is subset of left set.
:signature: left >= right
:arg left: left set
:argType left: set
:arg right: right set
:argType right: set
:returnType: boolean
.. code::
yaql> set(0, 1) >= set(0, 1)
true
"""
return left >= right
@specs.parameter('left', utils.SetType)
@specs.parameter('right', utils.SetType)
@specs.name('#operator_>')
def set_gt(left, right):
""":yaql:operator >
Returns true if right set is subset of left set and left size is strictly
greater than right size, false otherwise.
:signature: left > right
:arg left: left set
:argType left: set
:arg right: right set
:argType right: set
:returnType: boolean
.. code::
yaql> set(0, 1) > set(0, 1)
false
"""
return left > right
@specs.parameter('left', utils.SetType)
@specs.parameter('right', utils.SetType)
@specs.method
def intersect(left, right):
""":yaql:intersect
Returns set with elements common to left and right sets.
:signature: left.intersect(right)
:receiverArg left: left set
:argType left: set
:arg right: right set
:argType right: set
:returnType: set
.. code::
yaql> set(0, 1, 2).intersect(set(0, 1))
[0, 1]
"""
return left.intersection(right)
@specs.parameter('left', utils.SetType)
@specs.parameter('right', utils.SetType)
@specs.method
def difference(left, right):
""":yaql:difference
Return the difference of left and right sets as a new set.
:signature: left.difference(right)
:receiverArg left: left set
:argType left: set
:arg right: right set
:argType right: set
:returnType: set
.. code::
yaql> set(0, 1, 2).difference(set(0, 1))
[2]
"""
return left.difference(right)
@specs.parameter('left', utils.SetType)
@specs.parameter('right', utils.SetType)
@specs.method
def symmetric_difference(left, right):
""":yaql:symmetricDifference
Returns symmetric difference of left and right sets as a new set.
:signature: left.symmetricDifference(right)
:receiverArg left: left set
:argType left: set
:arg right: right set
:argType right: set
:returnType: set
.. code::
yaql> set(0, 1, 2).symmetricDifference(set(0, 1, 3))
[2, 3]
"""
return left.symmetric_difference(right)
@specs.parameter('s', utils.SetType, alias='set')
@specs.method
@specs.name('add')
def set_add(s, *values):
""":yaql:add
Returns a new set with added args.
:signature: set.add([args])
:receiverArg set: input set
:argType set: set
:arg [args]: values to be added to set
:argType [args]: chain of any type
:returnType: set
.. code::
yaql> set(0, 1).add("", [1, 2, 3])
[0, 1, "", [1, 2, 3]]
"""
return s.union(frozenset(values))
@specs.parameter('s', utils.SetType, alias='set')
@specs.method
@specs.name('remove')
def set_remove(s, *values):
""":yaql:remove
Returns the set with excluded values provided in arguments.
:signature: set.remove([args])
:receiverArg set: input set
:argType set: set
:arg [args]: values to be removed from set
:argType [args]: chain of any type
:returnType: set
.. code::
yaql> set(0, 1, "", [1, 2, 3]).remove("", 0, [1, 2, 3])
[1]
"""
return s.difference(frozenset(values))
def register(context, no_sets=False):
context.register_function(list_)
context.register_function(build_list)
context.register_function(to_list)
context.register_function(flatten)
context.register_function(list_indexer)
context.register_function(dict_)
context.register_function(dict_, name='#map')
context.register_function(dict__)
context.register_function(to_dict)
context.register_function(dict_keyword_access)
context.register_function(dict_indexer)
context.register_function(dict_indexer_with_default)
context.register_function(dict_get)
context.register_function(dict_set)
context.register_function(dict_set_many)
context.register_function(dict_set_many_inline)
context.register_function(dict_keys)
context.register_function(dict_values)
context.register_function(dict_items)
context.register_function(in_)
context.register_function(contains_key)
context.register_function(contains_value)
context.register_function(combine_lists)
context.register_function(list_by_int)
context.register_function(int_by_list)
context.register_function(combine_dicts)
context.register_function(is_dict)
context.register_function(is_list)
context.register_function(dict_len)
context.register_function(sequence_len)
context.register_function(delete)
context.register_function(delete_keys)
context.register_function(delete_keys_seq)
context.register_function(iter_insert)
context.register_function(list_insert)
context.register_function(replace)
context.register_function(replace_many)
context.register_function(insert_many)
context.register_function(contains)
if not no_sets:
context.register_function(is_set)
context.register_function(set_)
context.register_function(to_set)
context.register_function(set_len)
context.register_function(set_lt)
context.register_function(set_lte)
context.register_function(set_gt)
context.register_function(set_gte)
context.register_function(set_add)
context.register_function(set_remove)
context.register_function(union)
context.register_function(intersect)
context.register_function(difference)
context.register_function(
difference, name='#operator_-', function=True, method=False)
context.register_function(symmetric_difference)
| true |
5e8333d4262466b5ba92d713c52903d880837337 | Python | rodolfopfranco/python_projects | /Fractions/Fracao.py | UTF-8 | 2,674 | 3.328125 | 3 | [
"CC0-1.0"
] | permissive | from Maths import Maths
class Fracao:
""""Object to manage 2 integers as a fraction den and num"""
def __init__(self,num=1, den=1):
""""contructor"""
if (den == 0):
raise ValueError ("Zero denominator")
self.num = num
self.den = den
if self.den < 0: #negative? Then make it positive
self.num =-self.num
self.den = -self.den
self.simplifica()
def __eq__(self,f):
"""change equals to match reduced fractions"""
a=self.simplifica()
b=f.simplifica()
return(a.num==b.num and a.den==b.den)
def __add__(self,f):
if isinstance(f,int):
num = self.num + f * self.den
den = self.den
elif isinstance(f,Fracao):
den = self.den * f.den
num = self.num * f.den + self.den * f.num
else:
raise TypeError ("__add__")
return Fracao(num,den)
def __iadd__(self,f):
if isinstance(f,int):
self.num += f * self.den
elif isinstance(f,Fracao):
self.den *= f.den
self.num = self.num * f.den + self.den * f.num
else:
raise TypeError ("__iadd__")
return self.simplifica()
def __sub__(self,f):
if isinstance(f,int):
num = self.num - f * self.den
den = self.den
elif isinstance(f,Fracao):
den = self.den * f.den
num = self.num * f.den - self.den * f.num
else:
raise TypeError ("__sub__")
return Fracao(num,den)
def __mul__(self,f):
if isinstance(f,int):
num = self.num * f
den = self.den
elif isinstance(f,Fracao):
num = self.num * f.num
den = self.den * f.den
else:
raise TypeError("__mul__")
return Fracao(num,den)
def __truediv__(self, f):
if isinstance(f,int):
num = self.num
den = self.den * f
elif isinstance(f,Fracao):
num = self.num * f.den
den = self.den * f.num
else:
raise TypeError("__truediv__")
return Fracao(num,den)
def __str__(self):
if self.num == 0:
return "0"
if self.den == 1:
return str(self.num)
else:
return str(self.num) + '/' + str(self.den)
def simplifica(self):
"""makes the fraction smaller"""
max = 1
m = Maths()
if self.num != 0:
max = m.mdc(self.num, self.den)
if max > 1:
self.num //=max
self.den //=max
return self | true |
550b10c20b5379dca0bed95eaf9d9cd6a2b122ac | Python | ccubed/Dyslexml | /dyslexml/todict.py | UTF-8 | 3,288 | 3.328125 | 3 | [
"MIT"
] | permissive | import io
import xml.etree.ElementTree as ET
def parse(data):
"""
Given a file like object or xml string, return a dictionary.
:param data: File like object or xml string
:return: Parsed XML as Dictionary
:rtype: dict
"""
if isinstance(data, str):
return __parse(ET.fromstring(data))
elif isinstance(data, io.IOBase):
return __parse(ET.fromstring(data.read()))
else:
raise TypeError("Dyslexml requires a string or file like object.")
def __parse(root):
"""
Parse an XML document into a resulting dictionary.
:param root: The ElementTree Instance
:return: The Root Element
:rtype: dict
"""
if isinstance(root, ET.Element):
base = root
else:
base = root.getroot()
result = {base.tag: {'type': 'root', 'children': {}, 'attributes': None}}
if base.attrib:
result[base.tag]['attributes'] = {}
for item in base.attrib:
result[base.tag]['attributes'][item] = base.attrib[item]
for child in base:
child_element = build_child(child)
result[base.tag]['children'][child.tag] = child_element[child.tag]
return result
def build_child(element):
"""
Build a resulting child object given the child element.
:param element: An XML element
:return: The Child Element
:rtype: dict
"""
child_object = {
element.tag: {'type': 'child', 'children': None, 'attributes': None, 'value': element.text.strip() or None}}
if element.attrib:
child_object[element.tag]['attributes'] = {}
for item in element.attrib:
if "{" in item:
child_object[element.tag]['attributes'][item.split("}")[1]] = element.attrib[item]
else:
child_object[element.tag]['attributes'][item] = element.attrib[item]
for child in element:
add_child(build_child(child), child_object, element.tag)
return child_object
def add_child(child, dictionary, root_tag):
"""
Adding a child has some complication to it because we have to dynamically
change data types in the resulting dictionary depending on the amount of each
child. That's in addition to building the child object.
:param child: The parsed Child object from an XML element
:param dict dictionary: The dictionary so far
:param str root_tag: The name of the root element
:return: None
:rtype: NoneType
"""
if not dictionary[root_tag]['children']:
dictionary[root_tag]['children'] = {}
dictionary[root_tag]['children'][list(child.keys())[0]] = child[list(child.keys())[0]]
else:
if list(child.keys())[0] in dictionary[root_tag]['children']:
if dictionary[root_tag]['children'][list(child.keys())[0]].get('type', None) == "node":
dictionary[root_tag]['children'][list(child.keys())[0]]['children'].append(child[list(child.keys())[0]])
else:
dictionary[root_tag]['children'][list(child.keys())[0]] = {'type': 'node', 'children': [
dictionary[root_tag]['children'][list(child.keys())[0]], child[list(child.keys())[0]]]}
else:
dictionary[root_tag]['children'][list(child.keys())[0]] = child[list(child.keys())[0]]
| true |
eb20daedf889168407223d595743f612a8b303d3 | Python | williampmb/people-counting | /Blob.py | UTF-8 | 6,356 | 2.8125 | 3 | [] | no_license | import math
import cv2
from Point import Point
class Blob:
id = 0
conf_area = 0
conf_min_aspect_ratio = 0
conf_max_aspect_ratio = 0
conf_width = 0
conf_height = 0
conf_diagonal_size = 0
conf_contour_area_by_area = 0
@staticmethod
def getId():
Blob.id +=1
return Blob.id
def __init__(self, contour):
self.id = -1
self.contour = contour
self.set_bounding_rect(contour)
self.center = Point(((self.position.x + self.position.x + self.width)/2),
((self.position.y + self.position.y + self.height)/2))
curCenter = Point(self.center.x, self.center.y)
self.centerPositions = []
self.centerPositions.append(curCenter)
self.predictedNextPosition = self.predictNextPosition()
self.isStillBeingTracked = True
self.isMatchFoundOrNewBlob = True
self.numbOfConsecutiveFramesWithoutAMatch = 0
def set_bounding_rect(self, contour):
x, y, w, h = cv2.boundingRect(contour);
#self.set_bounding_rect(contour)
self.position = Point(x,y)
self.width = w
self.height = h
self.area = self.width*self.height
self.diagonalSize = math.sqrt(math.pow(self.width, 2) + math.pow(self.height, 2));
self.aspectRatio = float(self.width) / float(self.height);
def set_contour(self, contour):
self.contour = contour
def get_contour(self):
return self.contour
def __str__(self):
times =0
if len(self.centerPositions)>=5:
times =5
else:
times = len(self.centerPositions)-1
return (" id: " + str(self.id) +
" (x,y):" + str(self.position.x)+","+str(self.position.y) +
" (w,h):" + str(self.width) +"," +str(self.height)+
" area:" + str(self.area) +
" AS:" + str("%.2f" % self.aspectRatio) +
" DS: " + str("%.2f" % self.diagonalSize) +
#" area(contour)/area " + str(cv2.contourArea(self.contour))+ "/" + str(self.area) + "= " + str("%.2f" % (cv2.contourArea(self.contour)/float(self.area)))+
" area(contour)/area=" + str("%.2f" % (cv2.contourArea(self.contour)/float(self.area)))+
#" center: " + ' '.join(str(e.x)+","+str(e.y) for e in self.centerPositions) +
" center: " + ' '.join(str(self.centerPositions[-i].x)+","+str(self.centerPositions[-i].y) for i in range(times)) +
" tracked: " + str(self.isStillBeingTracked) +
" w/tmatch: " + str(self.numbOfConsecutiveFramesWithoutAMatch)
)
def predictNextPosition(self):
numPos = len(self.centerPositions)
predictedNextPosition = Point(self.centerPositions[-1].x,self.centerPositions[-1].y)
if ( numPos == 1):
return predictedNextPosition
elif (numPos == 2):
deltaX = (self.centerPositions[1].x - self.centerPositions[0].x)
deltaY = (self.centerPositions[1].y - self.centerPositions[0].y)
predictedNextPosition.x = self.centerPositions[-1].x + deltaX
predictedNextPosition.y = self.centerPositions[-1].y + deltaY
return predictedNextPosition
elif numPos == 3:
sumChangesX = (self.centerPositions[2].x - self.centerPositions[1].x)*2 + (self.centerPositions[1].x - self.centerPositions[0].x)*1
sumChangesY = (self.centerPositions[2].y - self.centerPositions[1].y)*2 + (self.centerPositions[1].y - self.centerPositions[0].y)*1
deltaX = int(round(float(sumChangesX)/3.0))
deltaY = int(round(float(sumChangesY)/3.0))
predictedNextPosition.x = self.centerPositions[-1].x + deltaX
predictedNextPosition.y = self.centerPositions[-1].y + deltaY
elif numPos == 4:
sumChangesX = ((self.centerPositions[3].x - self.centerPositions[2].x)*3
+ (self.centerPositions[2].x - self.centerPositions[1].x)*2
+ (self.centerPositions[1].x - self.centerPositions[0].x)*1 )
sumChangesY = ((self.centerPositions[3].y - self.centerPositions[2].y)*3
+ (self.centerPositions[2].y - self.centerPositions[1].y)*2
+ (self.centerPositions[1].y - self.centerPositions[0].y)*1)
deltaX = int(round(float(sumChangesX)/6.0))
deltaY = int(round(float(sumChangesY)/6.0))
predictedNextPosition.x = self.centerPositions[-1].x + deltaX
predictedNextPosition.y = self.centerPositions[-1].y + deltaY
elif numPos >= 5:
sumChangesX = ((self.centerPositions[numPos-1].x - self.centerPositions[numPos-2].x)*4
+ (self.centerPositions[numPos-2].x - self.centerPositions[numPos-3].x)*3
+ (self.centerPositions[numPos-3].x - self.centerPositions[numPos-4].x)*2
+ (self.centerPositions[numPos-4].x - self.centerPositions[numPos-5].x)*1)
sumChangesY = ((self.centerPositions[numPos-1].y - self.centerPositions[numPos-2].y)*4
+ (self.centerPositions[numPos-2].y - self.centerPositions[numPos-3].y)*3
+ (self.centerPositions[numPos-3].y - self.centerPositions[numPos-4].y)*2
+ (self.centerPositions[numPos-4].y - self.centerPositions[numPos-5].y)*1)
deltaX = int(round(float(sumChangesX)/10.0))
deltaY = int(round(float(sumChangesY)/10.0))
predictedNextPosition.x = self.centerPositions[-1].x + deltaX
predictedNextPosition.y = self.centerPositions[-1].y + deltaY
self.predictedNextPosition = predictedNextPosition
return predictedNextPosition
def isObject(self):
if (self.area > Blob.conf_area and
self.aspectRatio >= Blob.conf_min_aspect_ratio and
self.aspectRatio <= Blob.conf_max_aspect_ratio and
self.width > Blob.conf_width and
self.height > Blob.conf_height and
self.diagonalSize > Blob.conf_diagonal_size and
cv2.contourArea(self.contour)/float(self.area) > Blob.conf_contour_area_by_area
):
return True
return False
| true |
0c72c3eed3a9bec20c27aa780764f4800aa8e5d1 | Python | hanseuljun/unity-rotation-conversion | /python/test_unity_rotation.py | UTF-8 | 1,382 | 3.03125 | 3 | [] | no_license | #!/usr/bin/env python3
import unity_rotation
def convert_words(words):
for i in range(len(words)):
words[i] = float(words[i])
return words
def convert_line(line):
words = line.split('/')
quaternion = convert_words(words[0].split(','))
euler = convert_words(words[1].split(','))
look_at = convert_words(words[2].split(','))
look_up = convert_words(words[3].split(','))
return quaternion, euler, look_at, look_up
def main():
with open('../test_case.txt', 'r') as f:
for line in f:
quaternion, euler, look_at, look_up = convert_line(line)
qw, qx, qy, qz = unity_rotation.convert_euler_to_quaternion(euler[0], euler[1], euler[2])
print('quaternion: {}, {}, {}, {}'.format(quaternion[0], quaternion[1], quaternion[2], quaternion[3]))
print('q: {}, {}, {}, {}'.format(qw, qx, qy, qz))
lax, lay, laz = unity_rotation.convert_quaternion_to_look_at(qw, qx, qy, qz)
print('look_at: {}, {}, {}'.format(look_at[0], look_at[1], look_at[2]))
print('la: {}, {}, {}'.format(lax, lay, laz))
lux, luy, luz = unity_rotation.convert_quaternion_to_look_up(qw, qx, qy, qz)
print('look_up: {}, {}, {}'.format(look_up[0], look_up[1], look_up[2]))
print('lu: {}, {}, {}'.format(lux, luy, luz))
if __name__ == '__main__':
main() | true |
7d6b26cec526f08206fa0463413caf654dd45252 | Python | PrabhatML/CodeSnippets | /algorithms/datastructures/lru.py | UTF-8 | 868 | 3.796875 | 4 | [] | no_license | from collections import OrderedDict
# https://www.geeksforgeeks.org/lru-cache-in-python-using-ordereddict/
class LRUCache():
def __init__(self,capacity):
self.cache = OrderedDict()
self.capacity = capacity
def get(self, key):
if key not in self.cache:
return -1
else:
return self.cache[key]
def put(self,key, value):
self.cache[key] = value
self.cache.move_to_end(key)
if len(self.cache) > self.capacity:
self.cache.popitem(last=False)
cache = LRUCache(2)
cache.put(1, 1)
print(cache.cache)
cache.put(2, 2)
print(cache.cache)
cache.get(1)
print(cache.cache)
cache.put(3, 3)
print(cache.cache)
cache.get(2)
print(cache.cache)
cache.put(4, 4)
print(cache.cache)
cache.get(1)
print(cache.cache)
cache.get(3)
print(cache.cache)
cache.get(4)
print(cache.cache) | true |
c71c9535575edef957a79494e187ea4f43dbb218 | Python | Papillon-Nebula/case | /14_GMM.py | UTF-8 | 1,280 | 2.875 | 3 | [] | no_license | import sys
sys.path.append('./')
from sklearn.mixture import GaussianMixture
import pandas as pd
import csv
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.mixture import GaussianMixture
from sklearn.preprocessing import StandardScaler
gmm = GaussianMixture(n_components=1, covariance_type='full', max_iter=100)
# 数据加载,避免中文乱码问题
data_ori = pd.read_csv('ML/case/datas/heros.csv', encoding = 'gb18030')
features = [u'最大生命',u'生命成长',u'初始生命',u'最大法力', u'法力成长',u'初始法力',u'最高物攻',u'物攻成长',u'初始物攻',u'最大物防',u'物防成长',u'初始物防', u'最大每5秒回血', u'每5秒回血成长', u'初始每5秒回血', u'最大每5秒回蓝', u'每5秒回蓝成长', u'初始每5秒回蓝', u'最大攻速', u'攻击范围']
data = data_ori[features]
# 对英雄属性之间的关系进行可视化分析
# 设置plt正确显示中文
plt.rcParams['font.sans-serif']=['SimHei'] #用来正常显示中文标签
plt.rcParams['axes.unicode_minus']=False #用来正常显示负号
# 用热力图呈现features_mean字段之间的相关性
corr = data[features].corr()
plt.figure(figsize=(14,14))
# annot=True显示每个方格的数据
sns.heatmap(corr, annot=True)
plt.show()
| true |
b394dff0ec69719aefc6336e0a8db1a37773ede1 | Python | xavierlu/FIFO-Ledger-Calculator | /testsuite.py | UTF-8 | 4,064 | 2.6875 | 3 | [] | no_license | #!/usr/bin/python
import os
import filecmp
import unittest
"""
To run test suite, type `chmod +x testsuite.py; ./testsuite.py`
or `python3 testsuite.py`
"""
class TestFIFO(unittest.TestCase):
def test_given(self):
os.system("python3 fifo.py transactions1.csv")
self.assertTrue(filecmp.cmp("transactions1.out", "transactions1.ref"))
os.system("python3 fifo.py transactions2.csv")
self.assertTrue(filecmp.cmp("transactions2.out", "transactions2.ref"))
os.system("python3 fifo.py transactions3.csv")
self.assertTrue(filecmp.cmp("transactions3.out", "transactions3.ref"))
def test_sell_all(self):
# test against buying a small amount of shares many times and then sell all
os.system("python3 fifo.py transactions7.csv")
self.assertTrue(filecmp.cmp("transactions7.out", "transactions7.ref"))
os.system("python3 fifo.py transactions8.csv")
self.assertTrue(filecmp.cmp("transactions8.out", "transactions8.ref"))
def test_sell_half(self):
# buy a lot of shares, then sell half
os.system("python3 fifo.py transactions9.csv")
self.assertTrue(filecmp.cmp("transactions9.out", "transactions9.ref"))
def test_buy_sell_buy_sell(self):
# buy sell buy sell buy sell buy sell etc.
os.system("python3 fifo.py transactions10.csv")
self.assertTrue(filecmp.cmp("transactions10.out", "transactions10.ref"))
os.system("python3 fifo.py transactions11.csv")
self.assertTrue(filecmp.cmp("transactions11.out", "transactions11.ref"))
os.system("python3 fifo.py transactions12.csv")
self.assertTrue(filecmp.cmp("transactions12.out", "transactions12.ref"))
def test_random(self):
# test against some random ledgers
os.system("python3 fifo.py transactions4.csv")
self.assertTrue(filecmp.cmp("transactions4.out", "transactions4.ref"))
os.system("python3 fifo.py transactions5.csv")
self.assertTrue(filecmp.cmp("transactions5.out", "transactions5.ref"))
os.system("python3 fifo.py transactions6.csv")
self.assertTrue(filecmp.cmp("transactions6.out", "transactions6.ref"))
def test_multi(self):
# multiple buy low sell high
os.system("python3 fifo.py transactions13.csv")
self.assertTrue(filecmp.cmp("transactions13.out", "transactions13.ref"))
os.system("python3 fifo.py transactions14.csv")
self.assertTrue(filecmp.cmp("transactions14.out", "transactions14.ref"))
# same as 13 but randomized order
os.system("python3 fifo.py transactions15.csv")
self.assertTrue(filecmp.cmp("transactions15.out", "transactions15.ref"))
def test_net_lost(self):
# test transcations lose money
os.system("python3 fifo.py transactions16.csv")
self.assertTrue(filecmp.cmp("transactions16.out", "transactions16.ref"))
os.system("python3 fifo.py transactions17.csv")
self.assertTrue(filecmp.cmp("transactions17.out", "transactions17.ref"))
def test_combo(self):
# test combination of stuff mentioned above
os.system("python3 fifo.py transactions18.csv")
self.assertTrue(filecmp.cmp("transactions18.out", "transactions18.ref"))
os.system("python3 fifo.py transactions19.csv")
self.assertTrue(filecmp.cmp("transactions19.out", "transactions19.ref"))
os.system("python3 fifo.py transactions20.csv")
self.assertTrue(filecmp.cmp("transactions20.out", "transactions20.ref"))
os.system("python3 fifo.py transactions21.csv")
self.assertTrue(filecmp.cmp("transactions21.out", "transactions21.ref"))
os.system("python3 fifo.py transactions22.csv")
self.assertTrue(filecmp.cmp("transactions22.out", "transactions22.ref"))
def test_speed(self):
# test against a large file
os.system("python3 fifo.py transactions23.csv")
self.assertTrue(filecmp.cmp("transactions23.out", "transactions23.ref"))
if __name__ == "__main__":
unittest.main()
| true |
cdbc41bf327e430c019a5a0f9c8579e53c054409 | Python | garce1/xortool | /xortool/xortool-xor | UTF-8 | 2,426 | 3.046875 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python3
#-*- coding:utf-8 -*-
"""
xor strings
options:
-s - string with \\xAF escapes
-r - raw string
-h - hex-encoded string (non-letterdigit chars are stripped)
-f - read data from file (- for stdin)
-n - no newline at the end
--no-cycle / --nc - pad smaller strings with null bytes
example: xor -s lol -h 414243 -f /etc/passwd
author: hellman ( hellman1908@gmail.com )
"""
from __future__ import print_function
import sys
import string
import getopt
DATA_OPTS = "s:r:h:f:"
HEXES = set("0123456789abcdefABCDEF")
def main():
nocycle = False
nonewline = False
try:
opts, args = getopt.getopt(sys.argv[1:], "n" + DATA_OPTS, ["no-cycle", "nc"])
datas = []
for c, val in opts:
if c in ("--no-cycle", "--nc"):
nocycle = True
elif c == "-n":
nonewline = True
else:
v = arg_data(c, val)
datas.append(v)
if not datas:
raise getopt.GetoptError("no data given")
except getopt.GetoptError as e:
print("error:", e, file=sys.stderr)
print(__doc__, file=sys.stderr)
quit()
sys.stdout.buffer.write(xor(datas, nocycle=nocycle))
if not nonewline:
sys.stdout.buffer.write(b"\n")
def xor(args, nocycle=False):
# Sort by len DESC
args.sort(key=len, reverse=True)
res = list(args.pop(0))
maxlen = len(res)
for s in args:
slen = len(s)
srange = range(slen if nocycle else maxlen)
for i in srange:
res[i] ^= s[i % slen]
return bytes(res)
def from_str(s):
res = []
i = 0
while True:
if i + 4 > len(s):
break
if s[i] == "\\" and s[i+1] == "x" and s[i+2] in HEXES and s[i+3] in HEXES:
res.append(int(s[i+2:i+4], 16))
i += 4
else:
res.append(ord(s[i]))
i += 1
res += s[i:].encode("ascii")
return bytes(res)
def from_file(s):
if s == "-":
return sys.stdin.buffer.read()
return open(s, "rb").read()
def arg_data(opt, s):
if opt == "-s":
return from_str(s)
elif opt == "-r":
return str.encode(s)
elif opt == "-h":
return bytes.fromhex(s)
elif opt == "-f":
return from_file(s)
raise getopt.GetoptError("unknown option -%s" % opt)
if __name__ == '__main__':
main()
| true |
0fb92ee971e286fb1f9ec401396573b348b057b7 | Python | ryananderson60/Sentiment-Analysis-Vader | /sentiment_twitter.py | UTF-8 | 1,807 | 3.09375 | 3 | [] | no_license | #Ryan Anderson
#NLP assign 3 - Part 3
###USED POLITICAL CORPUS FROM USNA
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
kt = open("keyword-tweets.txt", "r")
data = []
#just receive tweet, not "POLIT/NOT"
for line in kt:
data.append(line.split()[1:])
#print(data)
data_all = []
for i in data:
data_all.append(" ".join(i))
#print(data_all)
tweet = []
vs_compound = []
vs_pos = []
vs_neu = []
vs_neg = []
analyzer = SentimentIntensityAnalyzer()
for i in range(0, len(data_all)):
tweet.append(data_all[i])
vs_compound.append(analyzer.polarity_scores(data_all[i])['compound'])
vs_pos.append(analyzer.polarity_scores(data_all[i])['pos'])
vs_neu.append(analyzer.polarity_scores(data_all[i])['neu'])
vs_neg.append(analyzer.polarity_scores(data_all[i])['neg'])
import pandas
from pandas import DataFrame
twitter_df = DataFrame({'Tweet': tweet,
'Compound': vs_compound,
'Positive': vs_pos,
'Neutral': vs_neu,
'Negative': vs_neg})
twitter_df = twitter_df[['Tweet', 'Compound',
'Positive', 'Neutral', 'Negative']]
#part 3-1
twitter_df.head()
#part 3-2
import matplotlib.pyplot as plt
plt.hist(vs_compound, bins=20)
out = open("output.txt", "w")
out.write("I USED POLITCAL CORPUS FROM USNA. WILL NOT INCLUDE FAVORITES COLUMN")
out.write("\n")
out.write(head)
out.write("\n")
out.write("\n screenshot of histogram will be named screenshot_hist.png in folder")
out.write("\n")
#part 3-3
sorted = twitter_df.sort_values(by=['Compound'], ascending = False)
sorted = sorted.head(10)
sorted
out.write("\n")
out.write("\n")
out.write(str(sorted))
out.write("\n")
out.write("\n")
out.write("NOTE: spyder does not format datafram correctly. I have also put into the folder a python notebook to run if needed to see columns.") | true |
1acae0e8a1c24428bf8bda57d2762b617a9fdbdb | Python | abhishekalbert/Airline-Data-Analyzer-using-Data-Mining | /Python Codes/data_reader_v3.py | UTF-8 | 3,639 | 3.703125 | 4 | [] | no_license | import csv
import pickle
needed_cols = [1, 2, 3, 4, 8, 10, 15, 16, 17]
years = [2008]
def ComputeDayofYear(row):
"""This function will return an integer to represent the day of the year given an integer
representing month and an integer representing the day of the month. This number will
correspond to the ordered day of the year [0-365]. For instance, Jan 1st will be returned
as 0. Feb 29th will be returned as 59."""
if(row[0] == '1'):
calc = 0 + int(row[1]) - 1
row[1] = str(calc)
elif(row[0] == '2'):
calc = 31 + int(row[1]) - 1
row[1] = str(calc)
elif(row[0] == '3'):
calc = 60 + int(row[1]) - 1
row[1] = str(calc)
elif(row[0] == '4'):
calc = 91 + int(row[1]) - 1
row[1] = str(calc)
elif(row[0] == '5'):
calc = 121 + int(row[1]) - 1
row[1] = str(calc)
elif(row[0] == '6'):
calc = 152 + int(row[1]) - 1
row[1] = str(calc)
elif(row[0] == '7'):
calc = 182 + int(row[1]) - 1
row[1] = str(calc)
elif(row[0] == '8'):
calc = 213 + int(row[1]) - 1
row[1] = str(calc)
elif(row[0] == '9'):
calc = 244 + int(row[1]) - 1
row[1] = str(calc)
elif(row[0] == '10'):
calc = 274 + int(row[1]) - 1
row[1] = str(calc)
elif(row[0] == '11'):
calc = 305 + int(row[1]) - 1
row[1] = str(calc)
elif(row[0] == '12'):
calc = 335 + int(row[1]) - 1
row[1] = str(calc)
return row
def DiscretizeDepTime(row):
"""This function takes a scheduled departure time, classifies the departure time as:
morning (0700 - 1259), afternoon (1300 - 1759), or evening (1800-0659). The input value
is assumed to be an integer in 24-hour time format. These labels will correspond to
variable values of 0 = morning, 1 = afternoon, 2 = evening. The value is then returned.
An error time is returned as morning."""
if(int(row[3]) <= 559):
row[3] = '2'
elif(int(row[3]) >= 600 and int(row[3]) <= 1259):
row[3] = '0'
elif(int(row[3]) >= 1300 and int(row[3]) <= 1759):
row[3] = '1'
elif(int(row[3]) >= 1800):
row[3] = '2'
else:
row[3] = '0'
return row
def AddDepVar(row):
"""This function adds a classification label based on the length of the recorded
Departure Delay in the data set. It assumes an input integer value of the delay in mins.
By airline industry standards, flight delays are defined as departure delays greater than
or equal to 15 minutes. For delayed flights, this variable will have value "1".
For on time flights, it will have value "0". Default value will be set at "0"."""
if(row[6] >= '15'):
row[6] = '1'
else:
row[6] = '0'
return row
def SaveData(data, pickle_file_name):
"""This function pickles each file."""
f = open (pickle_file_name, "w")
pickle.dump(data, f)
f.close()
for i in years:
data = []
file_path='C:\data\airline' + str(i) + '.csv'
pickle_file_name = 'data' + str(i)
with open(file_path, 'r') as data_csv:
csv_reader = csv.reader(data_csv, delimiter=',')
for row in list(csv_reader):
if row[21] == '0':
if (row[16] == 'SFO' or row[16] == 'OAK'):
content = list(row[i] for i in needed_cols)
content2 = ComputeDayofYear(content)
content3 = DiscretizeDepTime(content2)
content4 = AddDepVar(content3)
data.append(content4)
SaveData(data, pickle_file_name)
| true |
f75989250a05011f9b3a0ac4e3bb26ec3f4dd642 | Python | lakshmisravyasaripella/Python-Lab-Programs | /Exp-2.2.py | UTF-8 | 279 | 4.03125 | 4 | [] | no_license | name=input("Enter your name:")
n=int(input("Enter the number of times the name to be printed:"))
for i in range(n):
print(name)
'''
0utput:
Enter your name:Sravya
Enter the number of times the name to be printed:5
Sravya
Sravya
Sravya
Sravya
Sravya
'''
| true |
d97da0ec65aaa134dedc7e55cbb083b82b18b8f6 | Python | fabioresta/GeFa | /Animale.py | UTF-8 | 1,401 | 3.484375 | 3 | [] | no_license | #!/usr/bin/python
from time import sleep
class Animale(object):
# costruttore
def __init__(self, nome, eta):
self.__nome = nome
self.__eta = eta
# metodo get/set per il nome
def nome(self, nome = None):
# se il nome e' definito
if (nome != None):
# lo assegna
self.__nome = nome
# in ogni caso lo ritorna
return self.__nome
# metodo get/set per l'eta'
def eta(self, eta = None):
# se l'eta' e' definita
if (eta != None):
# la assegna
self.__eta = eta
# in ogni caso la restituisce
return self.__eta
# metodo per dormire
def dorme(self, secondi = 1):
# di default dorme 1 secondo
t = secondi
# si sospende per t secondi
sleep(t)
return t
# metodo beve - uguale per tutti quindi lo implemento
def beve(self):
return "beve"
# metodo mangia - uguale per 2 sottoclassi su 3 quindi lo implemento
def mangia(self):
return "mangia"
#Per i metodi che fanno cose diverse per tutte le sottoclassi,
#posso scegliere di non implementarli in Animale. Oppure si potrebbero
#implementare metodi "astratti"
#in Python non si usano classi/metodi astratti, ma si possono specificare
#metodi che non fanno nulla - equivalenti a quelli astratti
# equivalente di metodo astratto
def info(self):
pass
# equivalente di metodo astratto
def parla(self):
pass
# equivalente di metodo astratto
def si_muove(self):
pass
| true |
ce84b2f481086aeeb306a29662deba21728c2695 | Python | amukiza/cash_register-Python | /lib/purchase.py | UTF-8 | 1,035 | 3.5625 | 4 | [] | no_license | class Purchase(object):
RECEIPT_HEADER = {'header': "Items\nNo.\t\tItem(s)\t\tQauntity\t\tCost\n"}
def __init__(self, items):
self.items = items
self.duplicate_list = []
def as_string(self):
items_as_string = self.RECEIPT_HEADER['header']
for item in self.items:
if item.price:
items_as_string += self.get_cost_for(item)
items_as_string += "\tTotal: %d" % self.get_total()
return items_as_string
def __format(self, count, item):
return "%s. %s %d\n" % (count, item.name, item.price)
def exists_times(self, item):
return self.items.count(item)
def duplicate(self):
return self.items.copy()
def get_cost_for(self, item):
times_exists = self.exists_times(item)
item.price = int(item.price) * times_exists
return self.__format(times_exists, item)
def get_total(self):
total = 0
for item in self.items:
total += int(item.price)
return total | true |
3ab2baa2e1175a23f85202bb0994c43b89d07890 | Python | lollipopnougat/AlgorithmLearning | /排序专题/交换排序/快速排序/quick_sort.py | UTF-8 | 1,049 | 3.734375 | 4 | [
"MIT"
] | permissive | # 标准快排
def get_index(numlist:list, low: int, high: int) -> int:
tmp = numlist[low]
while low < high:
while low < high and numlist[high] >= tmp:
high -= 1
numlist[low] = numlist[high]
while low < high and tmp >= numlist[low]:
low += 1
numlist[high] = numlist[low]
numlist[low] = tmp
return low
def quick_sort_o(numlist: list, low: int, high: int):
if low < high:
index = get_index(numlist, low, high)
quick_sort_o(numlist, low, index - 1)
quick_sort_o(numlist, index + 1, high)
# 一分钟写快排
def quick_sort(n: list, l: int, h: int):
ol = l
oh = h
if l < h:
t = n[l]
while l < h:
while l < h and n[h] >= t:
h -= 1
n[l] = n[h]
while l < h and n[l] <= t:
l += 1
n[h] = n[l]
n[l] = t
quick_sort(n, ol, l - 1)
quick_sort(n, l + 1, oh)
li = [18, 5, 23, 8, 16, 5]
quick_sort(li, 0, len(li) - 1)
print(li)
| true |
8cfc3e875c3262f0039b7ad68bee46e725c8ac9d | Python | xuguojiande/python | /ex4-3.py | UTF-8 | 202 | 3.890625 | 4 | [] | no_license | #ex4.3
a,b=eval(input("输入两个整数:"))
c=g=0
if a>b:
a,b=b,a
while a%b!=0:
c=a%b
a=b
b=c
g=int(a*b/c)
print("最小公约数是{}\n最大公倍数是{}".format(c,g))
| true |
6f63b0c8a2d9c3663fc2a59cf17d4f6f049279a2 | Python | narien/AdventOfCode2017 | /day14/14.py | UTF-8 | 2,432 | 3.125 | 3 | [] | no_license | import sys
"""START OF MODIFIED DAY 10 SOLUTION"""
from functools import reduce
currIx = 0
skipSize = 0
myList = list(range(256))
def hash(length):
global currIx
global skipSize
global myList
if (currIx + length) > len(myList):
reverseList = myList[currIx:]
reverseList += myList[:length - len(reverseList)]
else:
reverseList = myList[currIx:currIx + length]
reverseList = reverseList[::-1]
tmpIx = currIx
for val in reverseList:
if tmpIx == len(myList):
tmpIx = 0
myList[tmpIx] = val
tmpIx += 1
currIx = (currIx + length + skipSize) % len(myList)
skipSize += 1
def doHash(hashString):
#Program start
hashCode = list()
for c in hashString:
hashCode.append(ord(c))
hashCode += [17, 31, 73, 47, 23]
for iter in range(64):
for i in hashCode:
hash(i)
sparseHash = list()
for i in range(0, 16):
sparseHash.append('%02x'%reduce((lambda x, y: x ^ y), myList[i*16:i*16 + 16]))
returnString = ''.join(sparseHash)
return returnString
"""END OF MODIFIED DAY 10 SOLUTION"""
def removeGroup(i, j):
global matrix
matrix[i][j] = 0
if i > 0 and matrix [i-1][j] == 1: removeGroup(i-1, j)
if j > 0 and matrix [i][j-1] == 1: removeGroup(i, j-1)
if j < 127 and matrix [i][j+1] == 1: removeGroup(i, j+1)
if i < 127 and matrix [i+1][j] == 1: removeGroup(i+1, j)
hex2bin_map = {
"0":"0000",
"1":"0001",
"2":"0010",
"3":"0011",
"4":"0100",
"5":"0101",
"6":"0110",
"7":"0111",
"8":"1000",
"9":"1001",
"a":"1010",
"b":"1011",
"c":"1100",
"d":"1101",
"e":"1110",
"f":"1111",
}
with open(sys.argv[1], 'r') as f:
inputString = f.read()
inputString += "-"
matrix = [[0 for x in range(128)] for y in range(128)]
for riMatrix in range(128):
ciMatrix = 0
currIx = 0
skipSize = 0
myList = list(range(256))
rowString = inputString + str(riMatrix)
returnString = doHash(rowString)
for c in returnString:
binaryStr = hex2bin_map[c]
for j in binaryStr:
matrix[riMatrix][ciMatrix] = int(j)
ciMatrix += 1
print("Used squares: ",sum(map(sum, matrix)))
groups = 0
for i in range(128):
for j in range(128):
if matrix[i][j] == 1:
groups += 1
removeGroup(i, j)
print("Total groups: ", groups)
| true |
6c5c9184e4ead3b1848cd428951df95b6139735c | Python | AmazaspShumik/mtlearn | /tests/cross_stitch_test.py | UTF-8 | 1,988 | 2.90625 | 3 | [] | no_license | from unittest import TestCase
import tensorflow as tf
from tensorflow.keras.layers import Dense, Input
from tensorflow.keras.models import Model
from mtlearn.layers import CrossStitchBlock
from tests import BaseSpecificationMTL
def get_cross_stitch_model(n_features: int):
"""
Creates model with cross-stitch units
Parameters
----------
n_features: int
Shape of the input
"""
input_layer = Input(shape=(n_features,))
layer_1 = [Dense(12, "selu")(input_layer), Dense(12, "selu")(input_layer)]
cs_block_1 = CrossStitchBlock()(layer_1)
layer_2 = [Dense(6, "selu")(stitch) for stitch in cs_block_1]
cs_block_2 = CrossStitchBlock()(layer_2)
output_layers = [Dense(1, "selu")(stitch) for stitch in cs_block_2]
model = Model(inputs=input_layer, outputs=output_layers)
model.compile(loss="mse", optimizer="adam")
return model
class TestCrossStitch(TestCase, BaseSpecificationMTL):
""" Test cross-stitch block """
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.set_data_and_directory()
@property
def models(self):
return [get_cross_stitch_model(1)]
def test_cross_stitch_normalization_and_shape(self):
"""
Test that elements of each column are convex combinations,
and that output is a list of tensors with the same dimensionality
as
"""
test_input = [tf.ones([2, 3], dtype=tf.float32) for _ in range(4)]
cross_stitch = CrossStitchBlock()
output = cross_stitch(test_input)
# check column normalization
col_sum = tf.reduce_sum(cross_stitch.cross_stitch_kernel, 0)
expected_col_sum = tf.ones(4, dtype=tf.float32)
abs_diff = tf.abs(col_sum - expected_col_sum)
self.assertTrue(tf.reduce_sum(abs_diff) < 1e-4)
# check shape
for i in range(4):
self.assertTrue(tf.reduce_all(output[i].shape == tf.TensorShape([2, 3])))
| true |
4926689441cfb6e22953c526b42ad08898f8aa86 | Python | rfmurray/psyc6273 | /06 - numpy/np_intro.py | UTF-8 | 3,698 | 3.78125 | 4 | [] | no_license | # np_intro.py Introduction to the numpy module
# np is the standard short form for the numpy module
import numpy as np
# - the main class in numpy is the multidimensional array (ndarray)
# - elements are all of the same type; usually numbers
# - elements are indexed by non-negative integers, just like in lists and tuples
# - the dimensions of the array are also called 'axes'
# create an array of zeros
x = np.zeros(shape=(3,4))
x
type(x)
# some important attributes of an array
x.ndim # number of dimensions
x.shape # size of each dimension
x.size # total number of elements
x.dtype # data type of elements
# more ways of creating arrays
x = np.zeros(shape=(3,4))
x = np.ones(shape=(3,4))
x = np.empty(shape=(3,4)) # contents depend on what was previously in memory
x = np.random.normal(loc=0.0, scale=1.0, size=(3,4))
x = np.random.uniform(low=0.0, high=1.0, size=(3,4))
# - transform a sequence into a 1D array
x = np.array([ 1.0, 2.0, 3.0 ]) # transform a list
x = np.array(( 1.0, 2.0, 3.0 )) # transform a tuple
# - transform a sequence of sequences into a 2D array
x = np.array([ [ 1.0, 2.0, 3.0 ], [ 4.0, 5.0, 6.0 ] ]) # list of lists
x = np.array([ ( 1.0, 2.0, 3.0 ), ( 4.0, 5.0, 6.0 ) ]) # list of tuples
# - can optionally specify a data type
x = np.array([1, 2, 3], dtype=np.float64) # a few types: uint8, int64, float32, float64
x.dtype
x = np.array([1, 2, 3], dtype=np.uint8)
x.dtype
# - create 1D sequences of numbers
x = np.arange(10) # data type is np.int64
x = np.arange(start=0.0, stop=100.0, step=20.0) # step is step size
x = np.linspace(start=0.0, stop=100.0, num=6) # num is number of steps
# with floating point numbers, it can be difficult to predict the number
# of elements returned by arange (e.g., rounding errors); better to use
# linspace
# - create an array from a function
def f(i,j):
return 10*i + j
x = np.fromfunction(f, shape=(3,4))
x = np.fromfunction(lambda i, j: 10*i + j, shape=(3,4))
# - load data from a text file
x = np.loadtxt(fname='data.txt', comments='#', skiprows=0, delimiter=',')
# arithmetic operations apply to whole arrays
x = np.random.normal(size=(3,4))
y = np.random.normal(size=(3,4))
z = x + y
z = x - y
z = x * y
z = x / y
x += 1 # these operations act in place
x -= 2
x *= 10
x /= 20
x = x + 1 # these operations do not act in place
x = 10 * x
# arrays are mutable (like lists), and several variables can refer to
# the same array
# compare
z = x
z is x # True
x += 1
z is x # True
z = x
z is x # True
x = x + 1
z is x # False
# if you want a separate, independent copy of an array, use the copy() method
z = x
z is x # True
z = x.copy()
z is x # False
# functions that apply to whole arrays
u = np.exp(x)
v = np.log(u)
# others: all, any, argmax, argmin, ceil, clip, corrcoef, cov, cross, cumprod,
# cumsum, diff, dot, floor, inner, max, mean, median, min, prod, round, sort,
# std, sum, trace, transpose, var
# indexing and slicing
x = np.random.normal(size=(10,5))
y = x[2,3] # pick a single element: row, column; result is a number, not an array
type(y) # np.float64, not np.ndarray
y.ndim # zero-dimensional
y = x[0:5,1] # multiple rows; result is a 1D array
y = x[:,1] # all rows; result is a 1D array
y = x[1:3,:] # multiple rows, all columns; result is a 2D array
x[:,1] = 0 # can also assign new values to parts of an array
x = np.random.normal(size=(10,5))
y = x[:,0] # interesting: y is a "view" of a subarray of x
y[:] = 0 # change y in place
x # changing y in place changes x!
y = 100 # assign a new value to y
x # assigning a new value to y does not change x
| true |
3ba32c7f1108749951c63bc1db8463a993ea0e58 | Python | jeongwook/python_work | /ch_11/test_cities.py | UTF-8 | 675 | 3.296875 | 3 | [] | no_license | import unittest
from city_functions import get_city_country
class CityCountriesTestCase(unittest.TestCase):
"""Tests for city_functions.py."""
def test_city_country(self):
"""Do city, country such as 'Santiago, Chile' work?"""
formatted_city_country = get_city_country('santiago', 'chile')
self.assertEqual(formatted_city_country, 'Santiago, Chile')
def test_city_country_population(self):
"""
Do city, country - population such as 'Santiago, Chile -
population 5000000' work?
"""
formatted_city_country = get_city_country('santiago', 'chile', 5000000)
self.assertEqual(formatted_city_country, 'Santiago, Chile - population 5000000')
unittest.main() | true |
bd87d498ffb8fc9ebdf7339c38c4df98a656de60 | Python | Ascend/ModelZoo-PyTorch | /PyTorch/built-in/cv/classification/3D_ResNet_ID0421_for_PyTorch/spatial_transforms.py | UTF-8 | 6,163 | 2.59375 | 3 | [
"MIT",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later"
] | permissive | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
from torchvision.transforms import transforms
from torchvision.transforms import functional as F
from PIL import Image
class Compose(transforms.Compose):
def randomize_parameters(self):
for t in self.transforms:
t.randomize_parameters()
class ToTensor(transforms.ToTensor):
def randomize_parameters(self):
pass
class Normalize(transforms.Normalize):
def randomize_parameters(self):
pass
class ScaleValue(object):
def __init__(self, s):
self.s = s
def __call__(self, tensor):
tensor *= self.s
return tensor
def randomize_parameters(self):
pass
class Resize(transforms.Resize):
def randomize_parameters(self):
pass
class Scale(transforms.Scale):
def randomize_parameters(self):
pass
class CenterCrop(transforms.CenterCrop):
def randomize_parameters(self):
pass
class CornerCrop(object):
def __init__(self,
size,
crop_position=None,
crop_positions=['c', 'tl', 'tr', 'bl', 'br']):
self.size = size
self.crop_position = crop_position
self.crop_positions = crop_positions
if crop_position is None:
self.randomize = True
else:
self.randomize = False
self.randomize_parameters()
def __call__(self, img):
image_width = img.size[0]
image_height = img.size[1]
h, w = (self.size, self.size)
if self.crop_position == 'c':
i = int(round((image_height - h) / 2.))
j = int(round((image_width - w) / 2.))
elif self.crop_position == 'tl':
i = 0
j = 0
elif self.crop_position == 'tr':
i = 0
j = image_width - self.size
elif self.crop_position == 'bl':
i = image_height - self.size
j = 0
elif self.crop_position == 'br':
i = image_height - self.size
j = image_width - self.size
img = F.crop(img, i, j, h, w)
return img
def randomize_parameters(self):
if self.randomize:
self.crop_position = self.crop_positions[random.randint(
0,
len(self.crop_positions) - 1)]
def __repr__(self):
return self.__class__.__name__ + '(size={0}, crop_position={1}, randomize={2})'.format(
self.size, self.crop_position, self.randomize)
class RandomHorizontalFlip(transforms.RandomHorizontalFlip):
def __init__(self, p=0.5):
super().__init__(p)
self.randomize_parameters()
def __call__(self, img):
"""
Args:
img (PIL.Image): Image to be flipped.
Returns:
PIL.Image: Randomly flipped image.
"""
if self.random_p < self.p:
return F.hflip(img)
return img
def randomize_parameters(self):
self.random_p = random.random()
class MultiScaleCornerCrop(object):
def __init__(self,
size,
scales,
crop_positions=['c', 'tl', 'tr', 'bl', 'br'],
interpolation=Image.BILINEAR):
self.size = size
self.scales = scales
self.interpolation = interpolation
self.crop_positions = crop_positions
self.randomize_parameters()
def __call__(self, img):
short_side = min(img.size[0], img.size[1])
crop_size = int(short_side * self.scale)
self.corner_crop.size = crop_size
img = self.corner_crop(img)
return img.resize((self.size, self.size), self.interpolation)
def randomize_parameters(self):
self.scale = self.scales[random.randint(0, len(self.scales) - 1)]
crop_position = self.crop_positions[random.randint(
0,
len(self.crop_positions) - 1)]
self.corner_crop = CornerCrop(None, crop_position)
def __repr__(self):
return self.__class__.__name__ + '(size={0}, scales={1}, interpolation={2})'.format(
self.size, self.scales, self.interpolation)
class RandomResizedCrop(transforms.RandomResizedCrop):
def __init__(self,
size,
scale=(0.08, 1.0),
ratio=(3. / 4., 4. / 3.),
interpolation=Image.BILINEAR):
super().__init__(size, scale, ratio, interpolation)
self.randomize_parameters()
def __call__(self, img):
if self.randomize:
self.random_crop = self.get_params(img, self.scale, self.ratio)
self.randomize = False
i, j, h, w = self.random_crop
return F.resized_crop(img, i, j, h, w, self.size, self.interpolation)
def randomize_parameters(self):
self.randomize = True
class ColorJitter(transforms.ColorJitter):
def __init__(self, brightness=0, contrast=0, saturation=0, hue=0):
super().__init__(brightness, contrast, saturation, hue)
self.randomize_parameters()
def __call__(self, img):
if self.randomize:
self.transform = self.get_params(self.brightness, self.contrast,
self.saturation, self.hue)
self.randomize = False
return self.transform(img)
def randomize_parameters(self):
self.randomize = True
class PickFirstChannels(object):
def __init__(self, n):
self.n = n
def __call__(self, tensor):
return tensor[:self.n, :, :]
def randomize_parameters(self):
pass | true |
3ab413634df2e48c7e57d9d7a9669a3e3d845115 | Python | wu137928049/temp | /home_application/celery_tasks.py | UTF-8 | 2,438 | 2.703125 | 3 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"Apache-2.0"
] | permissive | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云(BlueKing) available.
Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and limitations under the License.
celery 任务示例
本地启动celery命令: python manage.py celery worker --settings=settings
周期性任务还需要启动celery调度命令:python manage.py celerybeat --settings=settings
"""
import datetime
from celery import task
from celery.schedules import crontab
from celery.task import periodic_task
from common.log import logger
@task()
def async_task(x, y):
"""
定义一个 celery 异步任务
"""
logger.error(u"celery 定时任务执行成功,执行结果:{:0>2}:{:0>2}".format(x, y))
return x + y
def execute_task():
"""
执行 celery 异步任务
调用celery任务方法:
task.delay(arg1, arg2, kwarg1='x', kwarg2='y')
task.apply_async(args=[arg1, arg2], kwargs={'kwarg1': 'x', 'kwarg2': 'y'})
delay(): 简便方法,类似调用普通函数
apply_async(): 设置celery的额外执行选项时必须使用该方法,如定时(eta)等
详见 :http://celery.readthedocs.org/en/latest/userguide/calling.html
"""
now = datetime.datetime.now()
logger.error(u"celery 定时任务启动,将在60s后执行,当前时间:{}".format(now))
# 调用定时任务
async_task.apply_async(args=[now.hour, now.minute], eta=now + datetime.timedelta(seconds=60))
@periodic_task(run_every=crontab(minute='*/5', hour='*', day_of_week="*"))
def get_time():
"""
celery 周期任务示例
run_every=crontab(minute='*/5', hour='*', day_of_week="*"):每 5 分钟执行一次任务
periodic_task:程序运行时自动触发周期任务
"""
execute_task()
now = datetime.datetime.now()
logger.error(u"celery 周期任务调用成功,当前时间:{}".format(now))
| true |
c19a825a40c1c0ea240c9fddd92c5e4d4026f496 | Python | sidprobstein/generate_email | /src/generate_email.py | UTF-8 | 10,722 | 2.609375 | 3 | [
"MIT"
] | permissive | #!/usr/local/bin/python2.7
# encoding: utf-8
'''
@author: Sid Probstein
@copyright: RightWhen, Inc. All Rights Reserved.
@license: MIT License (https://opensource.org/licenses/MIT)
@contact: sid@rightwhen.com
'''
import sys
import os
import argparse
import glob
import json
import datetime
import random
# to do: move to a separate module and/or package (low priority)
# the following is from http://stackoverflow.com/questions/31392361/how-to-read-eml-file-in-python
# author http://stackoverflow.com/users/2247264/dalen
from email import message_from_file
# Path to directory where attachments will be stored:
path = "./msgfiles"
# To have attachments extracted into memory, change behaviour of 2 following functions:
def file_exists (f):
"""Checks whether extracted file was extracted before."""
return os.path.exists(os.path.join(path, f))
def save_file (fn, cont):
"""Saves cont to a file fn"""
file = open(os.path.join(path, fn), "wb")
file.write(cont)
file.close()
def construct_name (id, fn):
"""Constructs a file name out of messages ID and packed file name"""
id = id.split(".")
id = id[0]+id[1]
return id+"."+fn
def disqo (s):
"""Removes double or single quotations."""
s = s.strip()
if s.startswith("'") and s.endswith("'"): return s[1:-1]
if s.startswith('"') and s.endswith('"'): return s[1:-1]
return s
def disgra (s):
"""Removes < and > from HTML-like tag or e-mail address or e-mail ID."""
s = s.strip()
if s.startswith("<") and s.endswith(">"): return s[1:-1]
return s
def pullout (m, key):
"""Extracts content from an e-mail message.
This works for multipart and nested multipart messages too.
m -- email.Message() or mailbox.Message()
key -- Initial message ID (some string)
Returns tuple(Text, Html, Files, Parts)
Text -- All text from all parts.
Html -- All HTMLs from all parts
Files -- Dictionary mapping extracted file to message ID it belongs to.
Parts -- Number of parts in original message.
"""
Html = ""
Text = ""
Files = {}
Parts = 0
if not m.is_multipart():
if m.get_filename(): # It's an attachment
fn = m.get_filename()
cfn = construct_name(key, fn)
Files[fn] = (cfn, None)
if file_exists(cfn): return Text, Html, Files, 1
save_file(cfn, m.get_payload(decode=True))
return Text, Html, Files, 1
# Not an attachment!
# See where this belongs. Text, Html or some other data:
cp = m.get_content_type()
if cp=="text/plain": Text += m.get_payload(decode=True)
elif cp=="text/html": Html += m.get_payload(decode=True)
else:
# Something else!
# Extract a message ID and a file name if there is one:
# This is some packed file and name is contained in content-type header
# instead of content-disposition header explicitly
cp = m.get("content-type")
try: id = disgra(m.get("content-id"))
except: id = None
# Find file name:
o = cp.find("name=")
if o==-1: return Text, Html, Files, 1
ox = cp.find(";", o)
if ox==-1: ox = None
o += 5; fn = cp[o:ox]
fn = disqo(fn)
cfn = construct_name(key, fn)
Files[fn] = (cfn, id)
if file_exists(cfn): return Text, Html, Files, 1
save_file(cfn, m.get_payload(decode=True))
return Text, Html, Files, 1
# This IS a multipart message.
# So, we iterate over it and call pullout() recursively for each part.
y = 0
while 1:
# If we cannot get the payload, it means we hit the end:
try:
pl = m.get_payload(y)
except: break
# pl is a new Message object which goes back to pullout
t, h, f, p = pullout(pl, key)
Text += t; Html += h; Files.update(f); Parts += p
y += 1
return Text, Html, Files, Parts
def extract (msgfile, key):
"""Extracts all data from e-mail, including From, To, etc., and returns it as a dictionary.
msgfile -- A file-like readable object
key -- Some ID string for that particular Message. Can be a file name or anything.
Returns dict()
Keys: from, to, subject, date, text, html, parts[, files]
Key files will be present only when message contained binary files.
For more see __doc__ for pullout() and caption() functions.
"""
m = message_from_file(msgfile)
From, To, Subject, Date = caption(m)
Text, Html, Files, Parts = pullout(m, key)
Text = Text.strip(); Html = Html.strip()
msg = {"subject": Subject, "from": From, "to": To, "date": Date,
"text": Text, "html": Html, "parts": Parts}
if Files: msg["files"] = Files
return msg
def caption (origin):
"""Extracts: To, From, Subject and Date from email.Message() or mailbox.Message()
origin -- Message() object
Returns tuple(From, To, Subject, Date)
If message doesn't contain one/more of them, the empty strings will be returned.
"""
Date = ""
if origin.has_key("date"): Date = origin["date"].strip()
From = ""
if origin.has_key("from"): From = origin["from"].strip()
To = ""
if origin.has_key("to"): To = origin["to"].strip()
Subject = ""
if origin.has_key("subject"): Subject = origin["subject"].strip()
return From, To, Subject, Date
# end contribution
# author http://stackoverflow.com/users/2247264/dalen
#############################################
def main(argv):
parser = argparse.ArgumentParser(description='Generate email in specific date ranges, using input .eml files')
parser.add_argument('-o', '--outputdir', default="messages2/", help="subdirectory into which to place enriched files")
parser.add_argument('-d', '--debug', action="store_true", help="dump diagnostic information for debugging purposes")
args = parser.parse_args()
# initialize
nDays = 20
nStartDay = 10 # 10th through 30th
nReadPerDay = 60
nSentPerDay = 20
fWeekend = .1
nWeekDay = 0
nReadToday = 0
nSentToday = 0
nGenerated = 0
nRead = 0
nSent = 0
sReadDir = 'enron/inbox/'
sSentDir = 'enron/sent/'
# to do: scan sReadDir and sSentDir and build a list of files
lstReadFiles = []
lstSentFiles = []
lstReadFiles = glob.glob(sReadDir + '*')
lstSentFiles = glob.glob(sSentDir + '*')
for nDay in range(0, nDays):
# day
nReadToday = nReadPerDay
nSentToday = nSentPerDay
nWeekDay = nWeekDay + 1
if nWeekDay > 5:
# weekend
nReadToday = int(nReadToday * fWeekend)
nSentToday = int(nSentToday * fWeekend)
if nWeekDay == 7:
nWeekDay = 0
for nMessages in range(1, nReadToday + nSentToday + 1):
# emails for that day
#####
# read emails
# open the next file in ReadDir
nRead = nRead + 1
if nRead > nReadToday:
nSent = nSent + 1
sFile = lstSentFiles[nSent-1]
else:
sFile = lstReadFiles[nRead-1]
# debug
# print "generate_email.py: reading:", sFile
try:
f = open(sFile, 'rb')
except Exception, e:
sys.exit(e)
# extract the file using the new defs above
# print json.dumps(extract(f, f.name), sort_keys=True, indent=4, separators=(',', ': '))
jEml = extract(f, f.name)
f.close()
# debug
# print json.dumps(jEml, sort_keys=True, indent=4, separators=(',', ': '))
# write the new file out
jNew = {}
jNew['user'] = "sid"
jNew['subject'] = jEml['subject']
jNew['subject'] = jNew['subject'].replace('\n', ' ')
jNew['subject'] = jNew['subject'].replace('\r', ' ')
jNew['subject'] = jNew['subject'].replace('\t', ' ')
if nRead > nReadToday:
# sent
jNew['from'] = 'sid@rightwhen.com'
jNew['body'] = 'To: ' + jEml['to'] + ' ' + jEml['text']
else:
# read
jNew['from'] = jEml['from']
# cleanse from
jNew['from'] = jNew['from'].replace('\n', ' ')
jNew['from'] = jNew['from'].replace('\r', ' ')
jNew['from'] = jNew['from'].replace('\t', ' ')
jNew['body'] = jEml['text']
# cleanse body
jNew['body'] = jNew['body'].replace('\n', ' ')
jNew['body'] = jNew['body'].replace('\r', ' ')
jNew['body'] = jNew['body'].replace('\t', ' ')
# remove extra spaces
jNew['body'] = " ".join(jNew['body'].split())
jNew['subject'] = " ".join(jNew['subject'].split())
jNew['from'] = " ".join(jNew['from'].split())
# construct date/time
# e.g. 2016-01-25T09:03:00
nHour = random.randint(8, 20)
nMin = random.randint(0, 59)
sDate = "2016-01-" + str(nStartDay + nDay) + "T" + "%02d" % nHour + ":" + "%02d" % nMin + ":00"
jNew['date'] = sDate
# debug
# print json.dumps(jNew, sort_keys=True, indent=4, separators=(',', ': '))
sOutputFile = args.outputdir + str(nRead) + '.json'
# debug
# print "generate_email.py: writing:", sOutputFile
try:
fo = open(sOutputFile, 'w')
except Exception, e: # to do: clean up
f.close()
sys.exit(e)
# write the file
try:
json.dump(jNew,fo, sort_keys=True, indent=4, separators=(',', ': '))
except Exception, e:
sys.exit(e)
fo.close()
nGenerated = nGenerated + 1
# to do: run these through BT ee???
# end for
print "generate_email.py: generated", nMessages, "messages for day", nDay
# end for
print "generate_email.py: generated", nGenerated, "messages for", nDays, "days"
# end main
#############################################
if __name__ == "__main__":
main(sys.argv)
# end | true |
bdad291c96ed7f5dedb087f830ea62e4ace31f7e | Python | knightrohit/monthly_challenges | /leetcode_jan_2021/13_boat_to_save_people.py | UTF-8 | 592 | 3.21875 | 3 | [] | no_license | """
Time Complexity = O(N)
Space Complexity = O(1)
"""
class Solution:
def numRescueBoats(self, people: List[int], limit: int) -> int:
people.sort()
start, end = 0, len(people) - 1
boat = 0
people_left = len(people)
while people_left and start <= end:
boat += 1
if people[end] + people[start] <= limit:
people_left -= 2
start += 1
end -= 1
else:
end -= 1
people_left -= 1
return boat | true |
299981060fdb3ddd0fd5c830dd12d9749b32fb6c | Python | odvk/sf-pyfullstack-c02 | /mB2-python-02/script-b0211-01.py | UTF-8 | 1,873 | 4.21875 | 4 | [] | no_license | # B2.11 Модуль collections: defaultdict и Counter
# В этом уроке мы остановимся на двух новых для нас контейнерах: defaultdict и Counter.
# Название этого класса говорит за себя: defaultdict представляет из себя обычный словарь
# с заранее заданным типом, используемым как фабрика значений для отсутствующих ключей.
# если мы создаем словарь. и пытаемся получить значение по отсутствующему в d ключу key,
# то мы получим пустой объект заданного типа some_type_here.
# d = collections.defaultdict(some_type_here)
import collections
# Рассмотрим несколько примеров:
print("1 -----------")
d = collections.defaultdict(list) # создаем словарь d и указывам list, как фабрику для новых ключей
print(d) # в переменной d сейчас хранится пустой словарь
print(d["key"]) # выводим значение по ключу key, оно не определено в словаре, поэтому используется значение по умолчанию — пустой список
print(d[42]) # аналогично происходит для любого ключа
print(d) # в переменной d сейчас хранится пустой словарь
print("2 -----------")
# Подобная особенность позволяет нам работать со словарем типа defaultdict так,
# будто интересующий нас ключ уже есть в словаре:
d["k"].append(33)
print(d)
print(d["k"])
| true |
0503d94365ad542ee5226f7d613d2318c929389a | Python | nielsr2/P3-G306 | /Tor_test/TorBatch/LowPassFiltering.py | UTF-8 | 1,055 | 2.78125 | 3 | [] | no_license | from scipy import fftpack
import numpy as np
import imageio
from PIL import Image, ImageDraw
import cv2
def lowPass(img):
image1 = cv2.imread(img,cv2.IMREAD_GRAYSCALE)
#convert image to numpy array
image1_np=np.array(image1)
#fft of image
fft1 = fftpack.fftshift(fftpack.fft2(image1_np))
#Create a low pass filter image
x,y = image1_np.shape[0],image1_np.shape[1]
#size of circle
e_x,e_y=50,50
#create a box
bbox=((x/2)-(e_x/2),(y/2)-(e_y/2),(x/2)+(e_x/2),(y/2)+(e_y/2))
low_pass=Image.new("L",(image1_np.shape[0],image1_np.shape[1]),color=0)
draw1=ImageDraw.Draw(low_pass)
draw1.ellipse(bbox, fill=1)
low_pass_np=np.array(low_pass)
#multiply both the images
filtered=np.multiply(fft1,low_pass_np)
#inverse fft
ifft2 = np.real(fftpack.ifft2(fftpack.ifftshift(filtered)))
ifft2 = np.maximum(0, np.minimum(ifft2, 255))
#save the image
result= imageio.imshow(imageio)
#imageio.imsave('fft-then-ifft.png', ifft2.astype(np .uint8))
return (result)
| true |
226b34cdfc78167a62ea12f44c4a9a90fed73595 | Python | JSuelfl/Media-Monitoring | /pyScrape_GN.py | UTF-8 | 3,317 | 2.65625 | 3 | [] | no_license | from lxml import html
from lxml.etree import tostring
from datetime import datetime, timedelta
import requests, csv, re, time
## Function to scrape info
def scrapeInfo(mainURL, mainContent, mainXPath, paraXPath):
li = []
currDate = datetime.now()
## Make currDate 7 day's ago date
currDate = currDate - timedelta(days=7)
currDate = currDate.strftime('%b %d, %Y')
currDate = time.strptime(currDate, "%b %d, %Y")
mainLinksXPath = mainContent.xpath(mainXPath)
mainLinksXPath = list(set(mainLinksXPath))
for mainLinksElements in mainLinksXPath:
link = tostring(mainLinksElements)
link = link[(link.index("<a")):]
link = link[(link.index("href=") + 5):]
link = link.split()
link = link[0]
link = link.replace('"', '')
date = tostring(mainLinksElements)
date = date[(date.index("f nsa _uQb") + 12):]
date = date[:date.index("</span>")]
date = time.strptime(date, "%b %d, %Y")
if currDate <= date:
linkRequest = requests.get(link)
linkContent = html.fromstring(linkRequest.content)
linkXPath = linkContent.xpath(paraXPath)
pageContent = ''
for linkXElement in linkXPath:
text = tostring(linkXElement)
## Take out small icons in text
icons = re.findall(r'&#\d*;', text)
icons = list(set(icons))
for icon in icons:
text = re.sub(icon, '', text)
## Take out HTML tags
tags = re.findall('<[^>]+>', text)
tags = list(set(tags))
for tag in tags:
text = text.replace(tag, '')
pageContent = pageContent + text
## Adds list item
li.append([pageContent,link])
return li
## Function to create the CSV file
def createCSV(liCSV, f1):
writer = csv.writer(f1, delimiter=',')
## Add a header row
writer.writerow(["Content","Link"])
for i in liCSV:
rowStr = ''
for e in i:
rowStr = rowStr + '"' + str(e) + '"'
rowStr = rowStr + ","
rowStr = rowStr[:-1]
print rowStr
writer.writerow([rowStr])
## Main Function
def main(mainURL, mainXPath, paraXPath, fileName):
liData = []
with open(fileName,'w') as scrapeFile:
increment = 0
while increment < 10:
## Set the request to seem like a browser to get correct layout of HTML
header = {'User-Agent': 'Mozilla/Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36'}
if increment == 0:
mainRequest = requests.get(mainURL, headers=header)
else:
url = mainURL + "&start=" + str(increment * 10)
mainRequest = requests.get(url, headers=header)
mainContent = html.fromstring(mainRequest.content)
liData.extend(scrapeInfo(mainURL, mainContent, mainXPath, paraXPath))
increment = increment + 1
createCSV(liData, scrapeFile)
## Run main
main('https://www.google.ca/search?q=anti+money+laundering+and+money+laundering+and+arrested+and+convicted+and+sentenced&hl=en&gl=ca&authuser=0&tbm=nws&tbs=sbd:1', '//*[@class="g"]', '//p', 'GN_Scrape.csv')
| true |
cca6305fb6446ba9a9a12fa0e52ca6d36ea33df5 | Python | Aasthaengg/IBMdataset | /Python_codes/p04013/s649990195.py | UTF-8 | 695 | 2.9375 | 3 | [] | no_license | # ABC044
N, A = map(int, input().split())
X = list(map(int, input().split()))
# 2 ** 50 -> 全探索は厳しい -> DP
# dp[i][k][wa] = i枚まで見て、k枚選んだ時の合計値がwaとなるときの選び方の合計
# max wa = max(X) * N
max_wa = max(X) * N
dp = [[[0] * (max(X) * (N+1)) for _ in range(N+1)] for __ in range(N+1)]
dp[0][0][0] = 1
for i in range(N):
for k in range(i+1):
for wa in range(max_wa):
# 選ぶとき
dp[i+1][k+1][wa+X[i]] += dp[i][k][wa]
# 選ばないとき
dp[i+1][k][wa] += dp[i][k][wa]
ans = 0
for k in range(1, N+1):
if k * A > max_wa:
break
ans += dp[-1][k][k*A]
print(ans)
| true |
1e11a51c1e95f36ad5caa0efeeb17ae752019352 | Python | DmitryVakhrushev/Python | /3_Coursera_Learn to Program Crafting Quality Code (LPT2)/week1_Crafting Qaulity Code_LPT2/my_restaraunt code.py | UTF-8 | 1,414 | 3.3125 | 3 | [] | no_license |
# Restaraunt recomendation problem
'''
A sample from the big list
Georgie Porgie
87%
$$$
Canadian, Pub Food
Queen St. Cafe
82%
$
Malaysian, Thai
Dumplings R Us
71%
$
Chinese
Mexican Grill
85%
$$
Mexican
Deep Fried Everything
52%
$
Pub Food
'''
#-----------------------------------------
# dict of {str: int}
name_to_rating = {
'Georgie Porgie': 8,
'Queen St. Cafe': 82,
'Dumplings R Us': 71,
'Mexican Grill': 85,
'Deep Fried Everything': 52}
# dict {str: list of str}
price_to_names = {
'$': ['Queen St. Cafe', 'Dumplings R Us', 'Deep Fried Everything'],
'$$': ['Mexican Grill'],
'$$$': ['Georgie Porgie'],
'$$$$':[]}
# dict {str: list if str}
cuisine_to_names = {
'Canadian': ['Georgie Porgie'],
'Pub Food': ['Georgie Porgie', 'Deep Fried Everything'],
'Malaysian': ['Queen St. Cafe'],
'Thai': ['Queen St. Cafe'],
'Chinese': ['Dumplings R Us'],
'Mexican': ['Mexican Grill']}
#-----------------------------------------
# The file containing the restaraunt data
FILENAME = 'lecture_code_w1_restaurants_small.txt'
def recommend(file, price, cuisines_list):
'''(file open for reading, str, list of syt) -> list of [int, str] list
the output will be sorted by rating %
'''
# Read the file and build the data structure
#
def read_restaraunt(file):
'''(file) -> (dict, dict, dict)
Return a tuple of three dictionaries based on the information in the file
'''
# accumulators
| true |
c3a29ec1048baa000ec7abd4a5f058d09ae4ad49 | Python | PabloWually/computer_vision | /Image_Analysis/Mod1/Excercice 01_/Open_image_Subplot.py | UTF-8 | 1,691 | 3.03125 | 3 | [
"MIT"
] | permissive | import os
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import image as mp_image
from PIL import Image
import skimage as sk
from skimage import io as sk_io
from skimage import transform as sk_transform
import cv2
src_folder = "C:/Repositories/Image_Analysis/data/voc"
images = []
pil_image = Image.open(os.path.join(src_folder,"automobile","000522.jpg"))
images.append(np.array(pil_image)) #numpy format
#images.append(pil_image) #pil format
sk_image = sk_io.imread(os.path.join(src_folder,"plane","000228.jpg"))
#images.append(Image.fromarray(sk_image)) #pil format
images.append(sk_image) #numpy format
#sk_image.shape #show the shape of imagen
cv_image = cv2.imread(os.path.join(src_folder,"train", "000712.jpg"))
#images.append(cv_image) #Imagen sin correcion
images.append(cv2.cvtColor(cv_image, cv2.COLOR_BGR2RGB)) #Imagen con correccion
""" #Image in gray scale
sk_gray_image = sk.color.rgb2gray(sk_image)
plt.imshow(sk_gray_image, 'gray')
sk_gray_image.shape """
""" #Rotation with PIL Library
rotated_pil_image = pil_image.rotate(90, expand=1)
plt.imshow(rotated_pil_image)
#The expand parameter tells PIL to change the image dimenions to fit the rotated orientation. Without this, we'd get an image with the original dimensions with a resized """
""" #Rotation with Scikit-Image Library
rotated_sk_image = sk_transform.rotate(sk_image, 90, resize=True)
plt.imshow(rotated_sk_image) """
fig = plt.figure(figsize=(12,12))
image_num = 0
num_images = len(images)
for image_idx in range(num_images):
a = fig.add_subplot(1,num_images,image_idx+1)
image_plot = plt.imshow(images[image_idx])
a.set_title("Image" + str(image_idx+1))
plt.show()
| true |
9c403d3e587c0a631469b754e7b25e5b4690167d | Python | MasterGroosha/pybase100 | /pybase100/__init__.py | UTF-8 | 1,308 | 3.140625 | 3 | [
"Unlicense"
] | permissive | # Licensed under UNLICENSE
# See UNLICENSE provided with this file for details
# For more information, please refer to <http://unlicense.org/>
def encode(data, encoding="utf-8"):
"""
Encodes text to emoji
:param data: Original text as bytes array or plaintext
:param encoding: (Optional) encoding if {data} passed as plaintext
:return: bytes array of encoded text
"""
if isinstance(data, str):
data = data.encode(encoding)
out = [240, 159, 0, 0]*len(data)
for i, b in enumerate(data):
out[4*i+2] = (b + 55) // 64 + 143
out[4*i+3] = (b + 55) % 64 + 128
return bytes(out)
def decode(data, encoding="utf-8"):
"""
Decodes emoji to text
:param data: Encoded text in form of emoji as bytes array or plaintext
:param encoding: (Optional) encoding if {data} passed as plaintext
:return: bytes array of decoded text
"""
if isinstance(data, str):
data = data.encode(encoding)
if len(data) % 4 != 0:
raise Exception('Length of string should be divisible by 4')
tmp = 0
out = [None]*(len(data) // 4)
for i, b in enumerate(data):
if i % 4 == 2:
tmp = ((b - 143) * 64) % 256
elif i % 4 == 3:
out[i//4] = (b - 128 + tmp - 55) & 0xff
return bytes(out)
| true |
8a8ae2f0430d42da886e8ff499b514e8c1d9dfda | Python | caohanlu/test | /网络编程_单进程_循环/8 群聊_a需求分析.py | UTF-8 | 3,215 | 3.359375 | 3 | [] | no_license | ''
'''
通信协议设计:
这个思想很重要,对应的是server的while if总分结构
通信协议设计 : 数据传输中双方做一些数据格式和含义的约定,客户端和服务端同时遵守就行,
请求类型和数据参量【即传给服务端的数据】都是程序员自己定义的
将请求类型和数据参量一起发给服务端,服务端判断客户端想要做什么
请求类型 数据参量
登录聊天室 L【login】 name
聊天 C【chart】 name 消息内容
退出 E 【exit】 name
需求分析 : 要点 -》构建出软件的基本使用方法
【1】 有人进入聊天室需要输入姓名,姓名不能重复
【2】 有人进入聊天室时,其他人会收到通知:xxx 进入了聊天室
【3】 一个人发消息,其他人会收到:xxx : xxxxxxxxxxx
【4】 有人退出聊天室,则其他人也会收到通知:xxx退出了聊天室
技术分析 : 使用的技术
* C / S 模型
* 服务端存储用户信息 : 姓名 和 address,可以用列表、字典、类
[(name,address),....]
{name:address}
class Person:
def __init__(self,name,address):
self.name = name
self.address = address
* 网络通信 : udp
* 服务端消息传输机制: 客户端 ---》 服务端 --》转发给其他客户端
* 客户端收发消息互不影响 : 多进程,一个进程负责收消息,一个负责发消息
功能模块分析
* 整体框架设计
* 登录聊天室
* 聊天
* 退出聊天室
封装 : 函数
模块逻辑设计
* 整体框架设计
【以服务端为出发点,去思考】
服务端: 1.创建udp网络套接字
2.循环接收来自客户端的请求
3.根据请求调用不同的函数去解决
客户端: 1. 建立网络套接字
*不同请求如下:
【不同的请求,或不同的功能,以客户端为出发点去思考,因为客户端是功能的发出者,服务端只是满足功能】
* 进入聊天室
客户端 : 1.输入用户名
2. 发送用户名
3. 等待进入聊天室 --》 Y 进入聊天室聊天
N 重新输入
服务端: 1. 接收用户名
2. 判断是否已经存在
3. 存在 --》 告知客户端 无法进入
不存在 --》 告知客户端进入聊天室
4. 客户端进入聊天室则 存储用户信息,告知其他用户
* 聊天
客户端 : 创建子进程
父进程循环发送消息
子进程循环接收消息【因为子进程不能使用input】
服务端 : 接收消息
转发给其他人
* 退出聊天室
优化完善
* 显示效果
* 客户端强行退出
''' | true |
c31a145376412a3f20e387cd67863cabe834a0ed | Python | kenkang0228/reviews-analytics | /reviews-analytics.py | UTF-8 | 1,003 | 3.84375 | 4 | [] | no_license | data = []
with open('reviews.txt', 'r') as f:
for line in f:
data.append(line)
print('讀取完成,總共有', len(data), '筆資料')
sum_len = 0
for x in data:
sum_len = sum_len + len(x)
print('1.', data[0].strip()) #印出檔案中第1筆資料
print('2.', data[1].strip()) #印出檔案中第2筆資料
print('3.', data[2].strip()) #印出檔案中第3筆資料
print('4.', data[3].strip()) #印出檔案中第4筆資料
print('5.', data[4].strip()) #印出檔案中第5筆資料
print('6.', data[5].strip()) #印出檔案中第6筆資料
print('留言的平均長度為', sum_len/len(data), '個字元')
# 練習篩選留言長度
new = []
for d in data:
if len(d) < 30:
new.append(d)
print('一共有', len(new), '筆留言長度小於30')
#練習篩選某關鍵字在留言
word = []
for d in data:
if 'if' in d:
word.append(d)
print('共有', len(word), '筆留言提到if')
print(word[0])
#練習快寫法 留言中有if字眼的
www = ['if'in d for d in data]
print(www) | true |
74d58dbe3ae7677d5e6f34b8d789a3da66928121 | Python | Tiburso/Trabalhos-IST | /FP/Minitestes/Miniteste 3.py | UTF-8 | 141 | 2.703125 | 3 | [] | no_license | def maior_elemento(t):
t_f = t[0]
for i in range(len(t)):
if t[i] > t_f:
t_f = t[i]
return t_f
| true |
242f7161d4bd5d965be37813619ffb014a34c6cc | Python | malay95/ctci | /2. Linked List/fruit_in_basket.py | UTF-8 | 427 | 3.296875 | 3 | [
"MIT"
] | permissive | from collections import Counter
def total_fruit(tree):
count = Counter()
start = 0
ret = []
for j in range(len(tree)):
count[tree[j]] += 1
while len(count) >= 3:
count[tree[start]] -=1
if count[tree[start]] == 0:
count.pop(tree[start])
start += 1
ret.append(j-start +1)
return max(ret)
assert total_fruit([1,0,1,4,1,4,1,2,3]) == 5 | true |
33417315618fd539ab91a419c2aa783c4a6bcc9f | Python | MarcinKonowalczyk/mazes-for-programmers-python-src | /algorithms/sidewinder.py | UTF-8 | 1,136 | 3.390625 | 3 | [] | no_license | from random import choice, randint
from typing import TYPE_CHECKING
from algorithms.algorithm import Algorithm
if TYPE_CHECKING:
from base.grid import Grid
else:
Grid = 'Grid'
'''
A sidewinder visits each cell in the grid and chooses to carve a passage either north or east (similar to Binary Tree),
but running row by row.
Causes topmost row to always be a straight line.
'''
class Sidewinder(Algorithm):
def on(self, grid: Grid) -> None:
for row in grid.eachRow():
run = []
for cell in row:
run.append(cell)
at_eastern_boundary = cell.east is None
at_northen_boundary = cell.north is None
should_close_out = at_eastern_boundary or (not at_northen_boundary and randint(0, 1) == 0)
if should_close_out:
member = choice(run)
if member.north:
member += member.north
run.clear()
else:
cell += cell.east # type: ignore # Made sure cell is not at eastern boundry
self.step()
| true |
df92ce8db9b840ae0f4373af7ee15c65ceffcc4e | Python | tvachev/Nand2Tetris | /11/VMWriter.py | UTF-8 | 2,638 | 2.9375 | 3 | [] | no_license | '''
Created on Mar 22, 2016
@author: saikumar
'''
class VMWriter:
"""
Segments
'argument', 'local', 'static', 'field', 'temp'
Id_Type
'user_class', 'int', 'char', 'boolean'
"""
def __init__(self, out_file_name):
self._out_file_name = out_file_name
self._file_object = open(out_file_name, 'w')
def write_subroutine(self, class_name, sub_name, local_var_count):
print 'Subroutine',
temp_buffer = 'function ' + class_name + '.' +\
sub_name + ' ' + str(local_var_count) + '\n'
self.flush(temp_buffer)
def write_call(self, class_name, sub_name, argument_count):
temp_buffer = 'call ' + class_name + '.' +\
sub_name + ' ' + str(argument_count) + '\n'
self.flush(temp_buffer)
def write_if_goto(self, label):
temp_buffer = 'if-goto ' + label + '\n'
self.flush(temp_buffer)
def write_goto(self, label):
temp_buffer = 'goto ' + label + '\n'
self.flush(temp_buffer)
def write_label(self, label):
temp_buffer = 'label ' + label + '\n'
self.flush(temp_buffer)
def write_push(self, segment, index):
temp_buffer = 'push ' + segment + ' ' + str(index) + '\n'
self.flush(temp_buffer)
def write_pop(self, segment, index):
temp_buffer = 'pop ' + segment + ' ' + str(index) + '\n'
self.flush(temp_buffer)
def write_arithmatic(self, operator, helper=None):
temp_buffer = ""
if operator == '+':
temp_buffer = 'add\n'
elif operator == '-' and helper == None:
temp_buffer = 'sub\n'
elif operator == '-' and helper == 'NEG':
temp_buffer = 'neg\n'
elif operator == '~':
temp_buffer = "not\n"
elif operator == '<':
temp_buffer = "lt\n"
elif operator == '>':
temp_buffer = "gt\n"
elif operator == '&':
temp_buffer = "and\n"
elif operator == '|':
temp_buffer = "or\n"
elif operator == '=':
temp_buffer = "eq\n"
elif operator == '/':
temp_buffer = "call Math.divide 2\n"
elif operator == '*':
temp_buffer = 'call Math.multiply 2\n'
self.flush(temp_buffer)
def write_return(self):
self.flush('return\n')
def flush(self, temp_buffer):
self._file_object.write(temp_buffer)
self._file_object.flush()
def writer_close(self):
self._file_object.close() | true |
eb3b757a0e29bfc764bdc3c3e2f6d481533ce8c7 | Python | HananeOB/RechercheInformationsInex | /main.py | UTF-8 | 4,803 | 2.6875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Fri Nov 13 11:10:56 2020
@author: Hanane OBLOUHOU
Jérémy LEMÉE
William LIM
Yana SOARES DE PAULA
"""
from numpy import argmax
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer as tf_idf
from sklearn.metrics.pairwise import cosine_similarity, linear_kernel
# from util import XMLFile, TextFile, XMLFile, Request, TextArticle
import re
from scipy.sparse import csr_matrix
import nltk
nltk.download('wordnet')
nltk.download('punkt')
from nltk.stem import WordNetLemmatizer
from nltk.stem import PorterStemmer
from nltk.corpus import stopwords
ps = PorterStemmer()
lemmatizer = WordNetLemmatizer()
stop_words = set(stopwords.words('english'))
def lemmetizing(doc):
word_list = nltk.word_tokenize(doc)
word_list = [w for w in word_list if not w.lower() in stop_words]
return( ' '.join([lemmatizer.lemmatize(w) for w in word_list]))
def stemming(doc) :
word_list = nltk.word_tokenize(doc)
word_list = [w for w in word_list if not w.lower() in stop_words]
return( ' '.join([ps.stem(w) for w in word_list]))
def get_all_docs(textfile="Collection_Texte" ):# Gets all documents as rows of a list
file = open(textfile)
text = file.read()
text = re.sub(r'\n', ' ', text)
patternN = r'<doc><docno>(\d+)</docno>'
patternD = r'<doc><docno>\d+</docno>(.*?)</doc>'
DocN = re.findall(patternN, text) ## list of DocIds
DocD = re.findall(patternD, text) ## list of DocTexts
return [DocN, DocD]
# La variable Bol permet de choisir entre Lemmatisation et stemming !!
def pretraitement(DocD, Bol=True):
# eliminer les nombres
DocD = [re.sub(r'\d+', ' ', doc) for doc in DocD]
# eliminer les espaces
DocD = [re.sub(r'\s+', ' ', doc) for doc in DocD]
if Bol :
# eliminer les stop words + lemmatiser --> ca prend bcp de temps troop trop long
DocD = [lemmetizing(doc) for doc in DocD]
else :
# eliminer les stop words + stemming --> ca prend moins de temps troop long
DocD = [stemming(doc) for doc in DocD]
return(DocD)
def get_all_queries(queryfile="requetes.txt"):
file = open(queryfile)
req = file.read()
file.close()
req = re.sub(r'\+', ' ', req)
patternq = r'(\d*)\s(\D*)'
q = re.findall(patternq, req)
q = [ (num,lemmetizing(req)) for (num,req) in q]
return(q)
def put_query(query, docs_matrix, num_matrix): # inserts query in the last lines of the collection
docs_matrix.append(query[1])
num_matrix.append(query[0])
return
def see_doc(index): # Visualize document of indice "index" in collection
print("Doc number = " + num_m[index] + "\n")
print(doc_m[index] + "\n")
return
def evaluation_ltn(docs, query) :
# vector of scores (document and query using ltn)
query_output = linear_kernel(query, docs, dense_output=False).toarray()[0]
# Sorting the scores :
scores_indexed = [(index, content) for index, content in enumerate(query_output) ]
sorted_scores = sorted(scores_indexed , key=lambda tup: tup[1], reverse= True)[0:1500] # Maximum results = 1500
return(sorted_scores)
def evaluation_ltc(docs, query) :
# Best fitting query result (ltc) :
query_output = cosine_similarity(query, docs) # query_output is a list inside a list (je ne sais pas pq)
# Sorting the scores :
scores_indexed = [(index, content) for index, content in enumerate(query_output[0]) ]
sorted_scores = sorted(scores_indexed , key=lambda tup: tup[1], reverse= True)[0:1500] # Maximum results = 1500
return(sorted_scores)
# Get documents and their document numbers :
all_docs = get_all_docs()
num_m,doc_m = all_docs[0],all_docs[1]
# Pretraitement
doc_m = pretraitement(doc_m, Bol=False)
# # Visualize first document
# see_doc(0)
# Get queries
query_list = get_all_queries()
# Queries :
for q in query_list :
# add query to matrix of documents
put_query(q, doc_m, num_m)
# Creates vectorial matrix with corresponding weights :
vectorizer = tf_idf(sublinear_tf=True, norm="l1")
matrix = vectorizer.fit_transform(doc_m)
# Separate docs and query
docs = matrix[0:-1]
query = matrix[-1]
sorted_scores_ltn = evaluation_ltn(docs, query)
sorted_scores_ltc = evaluation_ltc(docs, query)
# Writing the result to the file (the runs)
rang = 1
for e in sorted_scores_ltn :
# We print to the file the first 1500 values
with open("HananeJeremyWilliamYana_01_01_ltn_article_stemming.txt", 'a') as f:
f.write("{} Q0 {} {} {} HananeJeremyWilliamYana /article[1]\n".format(num_m[-1],num_m[e[0]],rang,e[1]))
rang += 1
rang = 1
for e in sorted_scores_ltc :
# We print to the file the first 1500 values
with open("HananeJeremyWilliamYana_01_02_ltc_article_stemming.txt", 'a') as f:
f.write("{} Q0 {} {} {} HananeJeremyWilliamYana /article[1]\n".format(num_m[-1],num_m[e[0]],rang,e[1]))
rang += 1
## eliminer la requête de la collection
num_m.pop()
doc_m.pop()
| true |
96295dbd2bda477f1959fda9bf41b33a1578dbb5 | Python | EdgarLozano185519/100-Days-of-Python | /projects-source/day17/hurst-painting/main.py | UTF-8 | 1,829 | 3.546875 | 4 | [] | no_license | from turtle import Turtle, Screen, colormode
from random import randint, choice
import colorgram
colors = colorgram.extract('download.jpg', 10)
colors_list = []
print("Colors extracted (rgb values): ")
for color in colors:
colors_list.append((color.rgb[0], color.rgb[1], color.rgb[2]))
# Handy function to return a random RGB value to be used throughout the program
def generate_rgb():
r = randint(0,255)
g = randint(0,255)
b = randint(0,255)
return (r, g, b)
#print(generate_rgb())
# Create and configure turtle
timmy_the_turtle = Turtle()
timmy_the_turtle.shape("turtle")
timmy_the_turtle.color("red")
colormode(255)
# Pen up so lines will not show
timmy_the_turtle.penup()
timmy_the_turtle.hideturtle()
# Adjust turtle to be at a spot
timmy_the_turtle.setheading(225)
timmy_the_turtle.forward(250)
timmy_the_turtle.setheading(90)
timmy_the_turtle.forward(50)
timmy_the_turtle.left(90)
timmy_the_turtle.forward(500)
timmy_the_turtle.right(90)
# Move turtle to draw a shape
#timmy_the_turtle.forward(100)
#timmy_the_turtle.right(90)
#timmy_the_turtle.forward(100)
#timmy_the_turtle.right(90)
#timmy_the_turtle.forward(100)
#timmy_the_turtle.right(90)
#timmy_the_turtle.forward(100)
# Draw alternating line and gap combinations
#for i in range(1,51,2):
# if i%2 == 0:
# timmy_the_turtle.pendown()
# timmy_the_turtle.forward(10)
# else:
# timmy_the_turtle.penup()
# timmy_the_turtle.forward(10)
# Draw dots in different colors in grid fashion
for i in range(1,101):
timmy_the_turtle.dot(20, choice(colors_list))
if i%10 == 0:
timmy_the_turtle.setheading(90)
timmy_the_turtle.forward(50)
timmy_the_turtle.left(90)
timmy_the_turtle.forward(500)
timmy_the_turtle.right(90)
# End code to allow window to stay and allow for exit
off_screen= Screen()
off_screen.exitonclick() | true |
816a358b49138c5b5c7af49e5bd9efd82594a761 | Python | walteranyika/python-image-writer | /image_writer.py | UTF-8 | 336 | 2.921875 | 3 | [] | no_license | from PIL import Image, ImageFont, ImageDraw
img = Image.open("tribal.jpg","r")
width,height = img.size
font = ImageFont.truetype("monaco.ttf",16)
draw = ImageDraw.Draw(img)
text_x,text_y= font.getsize("Tribal Ethiopia")
draw.text(((width-text_x)/2,(height-text_y)/2),"Tribal Ethiopia",(255,255,255),font=font)
img.save("branded.jpg")
| true |
c37cb1e7e419b9edcb73b454f897b6bd2114fe66 | Python | hakobian4/BattleShip_Python_BackEnd | /api/game.py | UTF-8 | 417 | 3.21875 | 3 | [] | no_license | from player import Player
import numpy as np
class Game():
def __init__(self):
self.player1 = None
self.player2 = None
def registration(self, name1 = "Player1", name2 = "Player2"):
self.player1 = Player(name1)
self.player2 = Player(name2)
def starting(self):
player_list = [self.player1.name, self.player2.name]
return np.random.choice(player_list)
| true |
d0bea24fe2339519f8b286ac804c4a6b148f9b1e | Python | Panlq/Algorithm | /Leetcode/166-分数到小数.py | UTF-8 | 1,928 | 4.25 | 4 | [] | no_license | """
LeetCode 上第 153 号问题:
给定两个整数, 分别表示分数的分子numerator 和 分母 denominator,
以字符串形式返回小数,如果小数部分为循环小数,则将循环的部分括在括号内。
"""
"""
思路:模式消除出发,当小数部分出现重复的时候, 余数也开始重复
这种题有几种情况
正负号问题
加小数点的情况, 比如 8/ 2 不需要加小数点
小数部分,如何判断是否开始循环了
解决方法,
先判断结果的正负
直接相除, 通过余数,看能否整除
开始循环的时候, 说明之前已经出现过这个余数, 我们只要记录前面出现余数的位置,插入括号即可!
参考:https://leetcode-cn.com/problems/fraction-to-recurring-decimal/solution/ji-lu-yu-shu-by-powcai/
"""
class Solution:
def fractionToDecimal(self, numerator: int, denominator: int) -> str:
if numerator == 0: return "0"
res = ['-'] if numerator * denominator < 0 else []
numerator, denominator = abs(numerator), abs(denominator)
# 判断有没有小数
a, b = divmod(numerator, denominator) # return the tuple (x//y, x%y)
res.append(str(a))
if b == 0:
return "".join(res)
res.append('.')
# 处理余数,并记录所有出现过的余数的索引,判断循环节点
loc = {b: len(res)}
while b:
b *= 10
a, b = divmod(b, denominator)
res.append(str(a))
if b in loc:
res.insert(loc[b], "(")
res.append(")")
break
loc[b] = len(res)
return ''.join(res)
if __name__ == '__main__':
s = Solution()
print(s.fractionToDecimal(1, 2))
print(s.fractionToDecimal(2, 3))
print(s.fractionToDecimal(1, 6))
print(s.fractionToDecimal(8, 2))
| true |
4230f6a2ad377459ecbf2125f9828222d134578c | Python | Leputa/Leetcode | /python/143.Reorder List.py | UTF-8 | 1,521 | 3.5625 | 4 | [] | no_license | class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def reorderList(self, head):
"""
:type head: ListNode
:rtype: void Do not return anything, modify head in-place instead.
"""
length = self.getLength(head)
if length == 0 or length == 1:
return
reversehead = self.reverseList(head, length)
seqNode, reverseNode = head, reversehead
while(reverseNode != None):
nextSeqNode = seqNode.next
nextReverseNode = reverseNode.next
seqNode.next = reverseNode
reverseNode.next = nextSeqNode
seqNode = nextSeqNode
reverseNode = nextReverseNode
def getLength(self, head):
length = 0
tmpNode = head
while (tmpNode != None):
length += 1
tmpNode = tmpNode.next
return length
def reverseList(self, head, length):
tmpNode = head
if length % 2 == 0:
steps = length//2
else:
steps = length//2 + 1
for i in range(steps):
preNode = tmpNode
tmpNode = tmpNode.next
preNode.next = None
Head = ListNode(None)
while(tmpNode != None):
nextNode = tmpNode.next
tmpNode.next = Head.next
Head.next = tmpNode
tmpNode = nextNode
return Head.next
| true |
dbee8592addcd20a8e166fcf2da5d95b1e7f5d54 | Python | zzhu24/Machine-Learning | /zzhu24-hw3/python code/Question3.py | UTF-8 | 4,700 | 3.0625 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
from add_noise import add_noise
from gen import gen
from Perceptron import Perceptron
from Perceptron_Margin import Perceptron_Margin
from Winnow import Winnow
from Winnow_Margin import Winnow_Margin
from AdaGrad import AdaGrad
"""
Function that test the algoritem and plot for question 3
"""
#m = 100
(train_y,train_x)=gen(10,100,1000,50000,True)
(test_y,test_x)=gen(10,100,1000,10000,False)
#Perceptron without margin
p_s=Perceptron(train_x.shape[1],None)
for i in range(0,20):
p_s.train(train_x,train_y)
print("perceptron without margin learning rate:" + str(p_s.learning_rate) + "accuracy:" + str(p_s.test(test_x,test_y)))
#Perceptron with margin 1
p_m=Perceptron_Margin(train_x.shape[1],1,None)
p_m.tune_parameter_perceptron(train_x,train_y)
for i in range(0,20):
p_m.train(train_x,train_y)
print("perceptron margin learning rate:" + str(p_m.learning_rate) + "accuracy:" + str(p_m.test(test_x,test_y)))
#Winnow without margin
w_s = Winnow(train_x.shape[1],None)
w_s.tune_parameter_winnow(train_x,train_y)
for i in range(0,20):
w_s.train(train_x,train_y)
print("winnow without margin alpha:" + str(w_s.alpha) + "accuracy:" + str(w_s.test(test_x,test_y)))
#Winnow with margin
w_m=Winnow_Margin(train_x.shape[1],2.0,None)
w_m.tune_parameter_winnow_margin(train_x,train_y)
for i in range(0,20):
w_m.train(train_x,train_y)
print("winnow with margin alpha:" + str(w_m.alpha) + "margin:" + str(w_m.margin) + "accuracy:" + str(w_m.test(test_x,test_y)))
#AdaGrad
adaGrad=AdaGrad(train_x.shape[1],None)
adaGrad.tune_parameter_adagrad(train_x,train_y)
for i in range(0,20):
adaGrad.train(train_x,train_y)
print("adagrad learning rate:" + str(adaGrad.learning_rate) + "accuracy:" + str(adaGrad.test(test_x,test_y)))
#m = 500
(train_y,train_x)=gen(10,500,1000,50000,True)
(test_y,test_x)=gen(10,500,1000,10000,False)
#Perceptron without margin
p_s=Perceptron(train_x.shape[1],None)
for i in range(0,20):
p_s.train(train_x,train_y)
print("perceptron without margin learning rate:" + str(p_s.learning_rate) + "accuracy:" + str(p_s.test(test_x,test_y)))
#Perceptron with margin 1
p_m=Perceptron_Margin(train_x.shape[1],1,None)
p_m.tune_parameter_perceptron(train_x,train_y)
for i in range(0,20):
p_m.train(train_x,train_y)
print("perceptron margin learning rate:" + str(p_m.learning_rate) + "accuracy:" + str(p_m.test(test_x,test_y)))
#Winnow without margin
w_s = Winnow(train_x.shape[1],None)
w_s.tune_parameter_winnow(train_x,train_y)
for i in range(0,20):
w_s.train(train_x,train_y)
print("winnow without margin alpha:" + str(w_s.alpha) + "accuracy:" + str(w_s.test(test_x,test_y)))
#Winnow with margin
w_m=Winnow_Margin(train_x.shape[1],2.0,None)
w_m.tune_parameter_winnow_margin(train_x,train_y)
for i in range(0,20):
w_m.train(train_x,train_y)
print("winnow with margin alpha:" + str(w_m.alpha) + "margin:" + str(w_m.margin) + "accuracy:" + str(w_m.test(test_x,test_y)))
#AdaGrad
adaGrad=AdaGrad(train_x.shape[1],None)
adaGrad.tune_parameter_adagrad(train_x,train_y)
for i in range(0,20):
adaGrad.train(train_x,train_y)
print("adagrad learning rate:" + str(adaGrad.learning_rate) + "accuracy:" + str(adaGrad.test(test_x,test_y)))
#m = 1000
(train_y,train_x)=gen(10,1000,1000,50000,True)
(test_y,test_x)=gen(10,1000,1000,10000,False)
#Perceptron without margin
p_s=Perceptron(train_x.shape[1],None)
for i in range(0,20):
p_s.train(train_x,train_y)
print("perceptron without margin learning rate:" + str(p_s.learning_rate) + "accuracy:" + str(p_s.test(test_x,test_y)))
#Perceptron with margin 1
p_m=Perceptron_Margin(train_x.shape[1],1,None)
p_m.tune_parameter_perceptron(train_x,train_y)
for i in range(0,20):
p_m.train(train_x,train_y)
print("perceptron margin learning rate:" + str(p_m.learning_rate) + "accuracy:" + str(p_m.test(test_x,test_y)))
#Winnow without margin
w_s = Winnow(train_x.shape[1],None)
w_s.tune_parameter_winnow(train_x,train_y)
for i in range(0,20):
w_s.train(train_x,train_y)
print("winnow without margin alpha:" + str(w_s.alpha) + "accuracy:" + str(w_s.test(test_x,test_y)))
#Winnow with margin
w_m=Winnow_Margin(train_x.shape[1],2.0,None)
w_m.tune_parameter_winnow_margin(train_x,train_y)
for i in range(0,20):
w_m.train(train_x,train_y)
print("winnow with margin alpha:" + str(w_m.alpha) + "margin:" + str(w_m.margin) + "accuracy:" + str(w_m.test(test_x,test_y)))
#AdaGrad
adaGrad=AdaGrad(train_x.shape[1],None)
adaGrad.tune_parameter_adagrad(train_x,train_y)
for i in range(0,20):
adaGrad.train(train_x,train_y)
print("adagrad learning rate:" + str(adaGrad.learning_rate) + "accuracy:" + str(adaGrad.test(test_x,test_y)))
| true |
fdb1dde640d173dad97a2f0b76602649a7a5d7dd | Python | kellpossible/blog | /content/convert_img.py | UTF-8 | 296 | 2.53125 | 3 | [] | no_license | import os, csv
matches = []
pattern = r"[!]\[(.*)\]\(/(.*)\)"
replacement = "{{ img(path="{2}", caption="{1}") }}"
for root, dirnames, filenames in os.walk("."):
for filename in filenames:
if filename.endswith(".md"):
matches.append(os.path.join(root, filename))
| true |
6c102eead034aeabcc9e8799bae701b67e149133 | Python | dominhhieu1019/EARIN-Introduction_to_Artificial_Intelligence | /Project1-Optimization/algorithm.py | UTF-8 | 3,655 | 3.515625 | 4 | [] | no_license | from heapq import heappop, heappush
from graph import (edges_to_graph, graph_to_edges)
def find_path(graph, no_cities, w1, w2):
"""Algorithm implementation to find the highway
Args:
graph (dict): A non-empty graph.
no_cities (int): number of cities in the network
w1 (float): parameter w1
w2 (float): parameter w2
Returns:
list: Returns a list of tuples representing the edges of the path
"""
if not graph or not isinstance(graph, dict):
raise ValueError("graph must be a dict.")
min_path = []
start = next(iter(graph))
explored = []
unexplored = [(0, start, None)]
unused = []
while unexplored:
w, u, v = heappop(unexplored)
if u in explored:
continue
if not v is None:
min_path.append((v, u, w))
explored.append(u)
for n in graph[u]:
if n not in explored:
heappush(unexplored, (graph[u][n], n, u))
else:
heappush(unused, (graph[u][n], n, u))
minf, distance_between_cities = objective_function(min_path, no_cities, w1, w2)
while unused:
path = min_path.copy()
w, u, v = heappop(unused)
if (v, u, w) in min_path or (u, v, w) in min_path or w == distance_between_cities[u][v]:
continue
path.append((v, u, graph[v][u]))
f, dis = objective_function(path, no_cities, w1, w2)
if f > minf:
continue
min_path = path
minf = f
distance_between_cities = dis
return min_path
def cities_distance(path, no_cities):
"""Algorithm implementation to find the highway
Args:
path (list): A list of edges
no_cities (int): number of cities in the network
Returns:
dict: Returns a weighted graph (weights are length of path between two cities)
represented as {src: {dst: weight}, ...}
"""
distance_between_cities = edges_to_graph(path)
for src in range(no_cities):
for dst in range(no_cities):
if dst == src:
continue
if dst not in distance_between_cities[src]:
distance_between_cities[src][dst] = distance_between_cities[dst][src] = float('inf')
for k in range(no_cities):
for src in range(no_cities):
if k == src:
continue
for dst in range(no_cities):
if dst == src or dst == k or distance_between_cities[src][k] == float('inf') or distance_between_cities[k][dst] == float('inf'):
continue
distance_between_cities[src][dst] = distance_between_cities[dst][src] = min(
distance_between_cities[src][dst], distance_between_cities[src][k] + distance_between_cities[k][dst])
return distance_between_cities
def objective_function(path, no_cities, w1, w2):
"""Algorithm implementation to find the highway.
Args:
path (list): A list of edges
no_cities (int): number of cities in the network
w1 (float): parameter w1
w2 (float): parameter w2
Returns:
tuple: Returns result of objective function f and
a weighted graph (weights are length of path between two cities)
represented as {src: {dst: weight}, ...}
"""
distance_between_cities = cities_distance(path, no_cities)
t = d = 0
for _, _, w in path:
t = t + w
for i in distance_between_cities:
d = d + sum(distance_between_cities[i].values())
return w1 * t / 2 + w2 * d / (no_cities * (no_cities - 1)), distance_between_cities
| true |
91fd208b75565c6a823411bf26fd676ee7b692ed | Python | tungdo204/dongoctung-fundamental-c4e29 | /Labs/homework/ex1.py | UTF-8 | 1,222 | 2.859375 | 3 | [] | no_license | from urllib.request import urlopen
from bs4 import BeautifulSoup
from pyexcel import *
from pyexcel_xls import *
from pyexcel_xlsx import *
from collections import OrderedDict
from youtube_dl import YoutubeDL
#1. Open Connection
url = "https://www.apple.com/itunes/charts/songs"
conn = urlopen(url)
raw_data = conn.read()
html_content = raw_data.decode('utf8')
#2. Find ROI (Region of Interest)
soup = BeautifulSoup(html_content, 'html.parser')
section = soup.find("section","section chart-grid")
div = section.div
ul = div.ul
li_list = ul.find_all("li")
song_list = []
for li in li_list:
h3 = li.h3
a = h3.a
h4 = li.h4
a_2 = h4.a
title = a.string.strip()
artist = a_2.string.strip()
song = OrderedDict({
'Title': title,
'Artist' : artist
})
song_list.append(song)
# save_as(records=song_list, dest_file_name="itunes.xls")
options = {
'default_search': 'ytsearch', # tell downloader to search instead of directly downloading
'max_downloads': 1 # Tell downloader to download only the first entry (audio)
}
for i in range(len(song_list)):
ytsearch = song_list[i]['Title'] + ' ' + song_list[i]['Artist']
YoutubeDL(options).download([ytsearch])
| true |
5c99ec2a20c92ba4f25aa6718638980939753188 | Python | scooterman/pymunch | /munch/utils/__init__.py | UTF-8 | 603 | 3.140625 | 3 | [] | no_license |
def apply_pipeline(lst, *items):
'''
Iters over a list applying a list of @items on each
obs: I could use map here, but map returns a list and eats up some memory
'''
for applicable in lst:
items = applicable(*items)
return items
def flatten(exprs, prepend='', append=lambda expr: str(expr)):
if type(exprs) is None:
return ''
if type(exprs) != list:
return str(exprs)
return prepend.join(
map(lambda expr: ''
if expr is None else (append(expr)
if type(expr) != list else flatten(expr, append)), exprs))
| true |
a994f7a9f54543cd1c1f567532baeb6a6d4459bd | Python | MoisesHer/gluon-nlp | /scripts/tests/test_bleu.py | UTF-8 | 5,811 | 2.515625 | 3 | [
"Apache-2.0"
] | permissive | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test BLEU."""
import string
import os
import io
import re
import subprocess
import codecs
import numpy as np
from numpy.testing import assert_allclose
from ..machine_translation.bleu import compute_bleu, _bpe_to_words, _split_compound_word
actions = ['deletion', 'replacement', 'add']
def _sample_translation(reference, max_len):
translation = reference[:]
while np.random.uniform() < 0.8 and 1 < len(translation) < max_len:
trans_len = len(translation)
ind = np.random.randint(trans_len)
action = np.random.choice(actions)
if action == 'deletion':
del translation[ind]
elif action == 'replacement':
ind_rep = np.random.randint(trans_len)
translation[ind] = translation[ind_rep]
else:
ind_insert = np.random.randint(trans_len)
translation.insert(ind, translation[ind_insert])
return translation
def _sample_reference(vocabulary, k):
return np.random.choice(vocabulary, size=k).tolist()
def _sample_translation_corpus(reference_corpus_list, max_len):
translation_corpus = []
for references in zip(*reference_corpus_list):
n_refs = len(references)
ref_ind = np.random.randint(n_refs)
translation = _sample_translation(references[ref_ind], max_len)
translation_corpus.append(translation)
return translation_corpus
def _sample_reference_corpus(vocabulary, n, max_len, n_refs=5):
reference_corpus_list = [[] for _ in range(n_refs)]
for _ in range(n):
for i in range(n_refs):
ref_len = np.random.randint(1, max_len + 1)
reference = _sample_reference(vocabulary, ref_len)
reference_corpus_list[i].append(reference)
return reference_corpus_list
def _write_translaton(translations, path='hypothesis'):
out_file = codecs.open(path, 'w', 'utf-8')
preds = [' '.join(translation) for translation in translations]
out_file.write('\n'.join(preds) + '\n')
out_file.flush()
out_file.close()
def _write_reference(references, path='reference'):
for i, reference in enumerate(references):
out_file = codecs.open(path + str(i), 'w', 'utf-8')
refs = [' '.join(ref) for ref in reference]
out_file.write('\n'.join(refs) + '\n')
out_file.flush()
out_file.close()
def test_bleu():
n = 100
max_len = 50
n_refs = 5
path = os.path.dirname(os.path.realpath(__file__))
ref_path = os.path.join(path, 'reference')
trans_path = os.path.join(path, 'hypothesis')
vocabulary = list(string.ascii_lowercase)
reference_corpus_list = _sample_reference_corpus(vocabulary, n, max_len, n_refs)
translation_corpus = _sample_translation_corpus(reference_corpus_list, max_len)
_write_reference(reference_corpus_list, path=ref_path)
_write_translaton(translation_corpus, path=trans_path)
ret_bleu, _, _, _, _ = compute_bleu(reference_corpus_list, translation_corpus)
mose_ret = subprocess.check_output('perl %s/multi-bleu.perl %s < %s'
% (path, ref_path, trans_path),
shell=True).decode('utf-8')
m = re.search('BLEU = (.+?),', mose_ret)
gt_bleu = float(m.group(1))
assert_allclose(round(ret_bleu * 100, 2), gt_bleu)
os.remove(trans_path)
for i in range(n_refs):
os.remove(ref_path + str(i))
def test_detok_bleu():
path = os.path.dirname(os.path.realpath(__file__))
ref_path = os.path.join(path, 'test_references.txt')
trans_path = os.path.join(path, 'test_translations.txt')
with io.open(trans_path, 'r', encoding='utf-8') as f:
translations = f.readlines()
with io.open(ref_path, 'r', encoding='utf-8') as f:
references = f.readlines()
ret_bleu, _, _, _, _ = compute_bleu([references], translations, tokenized=False)
mose_ret = subprocess.check_output('perl %s/multi-bleu-detok.perl %s < %s'
% (path, ref_path, trans_path),
shell=True).decode('utf-8')
m = re.search('BLEU = (.+?),', mose_ret)
gt_bleu = float(m.group(1))
assert_allclose(round(ret_bleu * 100, 2), gt_bleu)
def test_bpe():
sequence = ['Th@@', 'is', 'man', 'is', 'ma@@', 'rr@@', 'ied', 'wi@@', 'th', 'her']
gt_sequence = ['This', 'man', 'is', 'married', 'with', 'her']
merged_sequence = _bpe_to_words(sequence)
for gt_word, word in zip(gt_sequence, merged_sequence):
assert gt_word == word
def test_split_compound_word():
sequence = ['rich-text', 'man', 'feed-forward', 'yes', 'true', 'machine-learning', 'language-model']
gt_sequence = ['rich', '##AT##-##AT##', 'text', 'man', 'feed', '##AT##-##AT##', 'forward',
'yes', 'true', 'machine', '##AT##-##AT##', 'learning', 'language', '##AT##-##AT##', 'model']
split_sequence = _split_compound_word(sequence)
for gt_word, word in zip(gt_sequence, split_sequence):
assert gt_word == word
| true |
ab866bdf0676996f71f7e79e131489d0a50e0335 | Python | bearwork/TelegramBot | /app.py | UTF-8 | 1,754 | 3.046875 | 3 | [] | no_license | import telebot
from config import keys, token
from extensions import APIException, CryptoConverter
bot = telebot.TeleBot(token)
@bot.message_handler(commands=['start', 'help'])
def help1(message: telebot.types.Message):
text = 'Чтобы начать работу введите команду боту в следующем формате: \n <имя валюты> ' \
'<в какую валюту перевести> ' \
'<колличество переводимой валюты> \n ' \
'Чтобы увидить список всех доступных валют введите команду /values'
bot.reply_to(message, text)
@bot.message_handler(commands=['values'])
def values1(message: telebot.types.Message):
text = 'Доступные для конвертации валюты: '
for key in keys.keys():
text = '\n'.join((text, key, ))
bot.reply_to(message, text)
@bot.message_handler(content_types=['text', ])
def convert1(message: telebot.types.Message):
try:
values = message.text.split(' ')
if len(values) != 3:
raise APIException('Не верное количество параметров')
base, quote, amount = values
total_base = CryptoConverter.get_price(base, quote, amount)
except APIException as e:
bot.reply_to(message, f'Ошибка пользователя \n{e}')
except Exception as e:
bot.reply_to(message, f'Не удалось обработать команду \n{e}')
else:
text = f'Стоимость {amount} {base} равна {total_base} {quote}'
bot.send_message(message.chat.id, text)
bot.polling() | true |
ea07eaf569f666c2d3c5264d623cdfc6e8783bda | Python | shallcro/Various-Tests | /cdrdao_test.py | UTF-8 | 2,032 | 2.546875 | 3 | [] | no_license | '''
Script to test content and metadata extraction with cdrdao
'''
import subprocess
import os
import random
import string
barcode = ''.join(random.choice(string.digits) for x in range(7))
if not os.path.exists('C:\\temp\\cdda'):
os.makedirs('C:\\temp\\cdda')
#determine appropriate drive ID for cdrdao; save output of command to log file
cdr_scan_log = 'C:\\temp\\cdda\\scan.log'
scan_cmd = 'cdrdao scanbus > %s 2>&1' % cdr_scan_log
subprocess.check_output(scan_cmd, shell=True)
#pull drive ID from file
with open(cdr_scan_log, 'rb') as f:
drive_id = f.read().splitlines()[8].split(':')[0]
#get info about CD; record this as a premis event, too.
disk_info_log = 'C:\\temp\\cdda\\cdr_info.log'
cmd = 'cdrdao disk-info --device %s --driver generic-mmc-raw > %s 2>&1' % (drive_id, disk_info_log)
exitcode = subprocess.call(cmd, shell=True)
#read log file to determine # of sessions on disk.
with open(disk_info_log, 'rb') as f:
sessions = int(f.read().splitlines()[21].split(':')[1].strip())
#for each session, create a bin/toc file
for x in range(1, (sessions+1)):
cdr_bin = os.path.join("C:\\temp\\cdda", "%s-%s.bin") % (barcode, str(sessions).zfill(2))
cdr_toc = os.path.join("C:\\temp\\cdda", "%s-%s.toc") % (barcode, str(sessions).zfill(2))
cdr_cmd = 'cdrdao read-cd --read-raw --datafile %s --device %s --driver generic-mmc-raw %s' % (drive_id, cdr_bin, cdr_toc)
exitcode = subprocess.call(cdr_cmd, shell=True)
#need to write PREMIS
#convert TOC to CUE
cue = os.path.join("C:\\temp\\cdda", "%s-%s.cue") % (barcode, str(sessions).zfill(2))
t2c_cmd = 'toc2cue %s %s' % (cdr_toc, cue)
subprocess.check_output(t2c_cmd, shell=True)
#now rip to WAV using cdparanoia
paranoia_log = os.path.join('C:\\temp\\cdda', '%s-cdparanoia.log' % barcode)
paranoia_out = os.path.join('C:\\temp\\cdda', '%s.wav' % barcode)
paranoia_cmd = 'cd-paranoia -l %s -w [00:00:00.00]- %s' % (paranoia_log, paranoia_out)
exitcode = subprocess.call(paranoia_cmd, shell=True)
| true |
e7d14de52c1c1218cec5ba88818bbb199247d1bb | Python | SurajPatil314/Leetcode-problems | /dfs_numberOf_island.py | UTF-8 | 2,181 | 3.34375 | 3 | [] | no_license | """
https://leetcode.com/problems/number-of-islands/
Given a 2d grid map of '1's (land) and '0's (water), count the number of islands. An island is surrounded by water and
is formed by connecting adjacent lands horizontally or vertically. You may assume all four edges of the grid are all
surrounded by water.
"""
class Solution:
def numIslands(self, grid: List[List[str]]) -> int:
visited = []
all1 = []
ans = 0
for i in range(len(grid)):
for j in range(len(grid[0])):
if grid[i][j] == '1':
all1.append([i, j])
tempc = []
while (len(all1) > 0):
qw = all1[0]
visited = []
visited.append(qw)
ans += 1
tempc.append(qw)
while (len(tempc) > 0):
# print(tempc)
bs = tempc[0]
del tempc[0]
tempi = bs[0]
tempj = bs[1]
if tempi - 1 >= 0:
if grid[tempi - 1][tempj] == '1':
sd = [tempi - 1, tempj]
if sd not in visited:
tempc.append(sd)
visited.append(sd)
if tempj - 1 >= 0:
if grid[tempi][tempj - 1] == '1':
sd = [tempi, tempj - 1]
if sd not in visited:
tempc.append(sd)
visited.append(sd)
if tempi + 1 < len(grid):
if grid[tempi + 1][tempj] == '1':
sd = [tempi + 1, tempj]
if sd not in visited:
tempc.append(sd)
visited.append(sd)
if tempj + 1 < len(grid[0]):
if grid[tempi][tempj + 1] == '1':
sd = [tempi, tempj + 1]
if sd not in visited:
tempc.append(sd)
visited.append(sd)
if bs in all1:
all1.remove(bs)
return ans | true |
3270d23957da4c1aa4cddafc1b4280e1d1723709 | Python | red-fox-yj/Clock-In-Server | /tests/client.py | UTF-8 | 388 | 2.8125 | 3 | [] | no_license | import asyncio
import websockets
async def hello(uri):
async with websockets.connect(uri) as websocket:
await websocket.send("Hello world!")
print("< Hello world!")
while True:
recv_text = await websocket.recv()
print("> {}".format(recv_text))
asyncio.get_event_loop().run_until_complete(hello("ws://localhost:8000"))
| true |
28c42c7d714f759554d0a1a0de56b576296de8fc | Python | joepatten/CPTS_451_milestone2 | /LuJoRo_ParseAndInsert.py | UTF-8 | 10,014 | 3.125 | 3 | [] | no_license | import json
import zipfile
import psycopg2
import os
#from this directory, there is a data file that has the zip file with the json files
os.chdir(r'C:\Users\josep\python_files\CptS_451\project\milestone1')
# change the parameters (like dbname and password) for it to work on your computer
def psycopg2_connect():
return psycopg2.connect("dbname='yelp' user='postgres' host='localhost' password='soccer'")
# put data files in data folder
def extract_zipfiles(filepath=r'./data/yelp_CptS451_2020.zip', folderpath=r'./data'):
with zipfile.ZipFile(filepath, 'r') as zip_ref:
zip_ref.extractall(folderpath)
def cleanStr4SQL(s):
return s.replace("'","`").replace("\n"," ")
def int2BoolStr (value):
if value == 0:
return 'False'
else:
return 'True'
def flatten_json(y):
d = {}
def flatten(x, name =''):
if type(x) is dict:
for attribute in x:
flatten(x[attribute], name + attribute + '_')
elif type(x) is list:
for i, attribute in enumerate(x):
flatten(attribute, name + str(i) + '_')
else:
d[name[:-1]] = x
flatten(y)
return d
def create_cmd(d, tablename):
cmd = f"INSERT INTO {tablename} " + "(" + ", ".join(d.keys()) + ")" + " VALUES " + "(" + ", ".join("'" + str(e) + "'" for e in d.values()) + ");"
return cmd
def create_d(data, strings, floats, ints, bools):
d = {}
for name in strings:
d[name] = cleanStr4SQL(str(data[name]))
for name in floats:
d[name] = float(cleanStr4SQL(str(data[name])))
for name in ints:
d[name] = int(cleanStr4SQL(str(data[name])))
for name in bools:
value = int(cleanStr4SQL(str(data[name])))
d[name] = int2BoolStr(value)
return d
def insert_row(conn, cur, d, tablename):
try:
cmd = create_cmd(d, tablename)
cur.execute(cmd)
except Exception as e:
print(f"Insert to {tablename} failed!",e)
conn.commit()
def parseBusinessData():
# read the JSON file
with open(r'./data/yelp_business.JSON','r') as f:
outfile = open(r'./data/business.txt', 'w')
line = f.readline()
count_line = 0
#read each JSON abject and extract data
while line:
data = json.loads(line)
outfile.write(cleanStr4SQL(data['business_id'])+'\t') #business id
outfile.write(cleanStr4SQL(data['name'])+'\t') #name
outfile.write(cleanStr4SQL(data['address'])+'\t') #full_address
outfile.write(cleanStr4SQL(data['state'])+'\t') #state
outfile.write(cleanStr4SQL(data['city'])+'\t') #city
outfile.write(cleanStr4SQL(data['postal_code']) + '\t') #zipcode
outfile.write(str(data['latitude'])+'\t') #latitude
outfile.write(str(data['longitude'])+'\t') #longitude
outfile.write(str(data['stars'])+'\t') #stars
outfile.write(str(data['review_count'])+'\t') #reviewcount
outfile.write(str(data['is_open'])+'\t') #openstatus
categories = data["categories"].split(', ')
outfile.write(str(categories)+'\t') #category list
# TO-DO : write your own code to process attributes
attributes = json.dumps(flatten_json(data['attributes']))
outfile.write(attributes +'\t')
# TO-DO : write your own code to process hours data
week_hours = [hours for day, hours in data['hours'].items()]
outfile.write(str(week_hours) + '\t')
# newline
outfile.write('\n');
line = f.readline()
count_line += 1
print('Number of observations parsed:', count_line)
outfile.close()
def insertBusinessData():
with open(r'./data/yelp_business.JSON','r') as f:
line = f.readline()
count_line = 0
#connect to database
conn = psycopg2_connect()
cur = conn.cursor()
#features to be extracted from json
strings = ['business_id', 'name', 'address', 'state', 'city', 'postal_code']
floats = ['latitude', 'longitude', 'stars']
ints = ['review_count']
bools = ['is_open']
while line:
data = json.loads(line)
business_id = data["business_id"]
#insert into Business table
d = create_d(data, strings, floats, ints, bools)
insert_row(conn, cur, d, tablename='business')
#insert into BusinessCategories table
categories = [cleanStr4SQL(category) for category in data["categories"].split(', ')]
for category in categories:
cat_d = {'business_id':business_id, 'category':category}
insert_row(conn, cur, d=cat_d, tablename='businesscategories')
#hours data
for day, hours in data['hours'].items():
#TODO:
# - seperate start and end hour into two sep vars
# - change sql table
day_d = {'business_id':business_id, 'day':day, 'hours':hours}
insert_row(conn, cur, d=day_d, tablename='WeekHours')
#attributes
attributes = flatten_json(data['attributes'])
for attribute_name, value in attributes.items():
if value == 'True':
att_d = {'business_id': business_id, 'attribute_name': attribute_name}
insert_row(conn, cur, d=att_d, tablename='additional_attribute')
#done with reading current line
count_line += 1
line = f.readline()
if count_line % 100 == 0:
print(f'Finshed reading row {count_line}.')
cur.close()
conn.close()
print('Finished with yelp_business.JSON file\n\n')
def insertUserData_1():
with open(r'./data/yelp_user.JSON','r') as f:
line = f.readline()
count_line = 0
#connect to database
conn = psycopg2_connect()
cur = conn.cursor()
#features to be extracted from json
strings = ['user_id', 'name', 'yelping_since']
floats = ['average_stars']
ints = ['cool', 'fans', 'funny', 'tipcount', 'useful']
bools = []
while line:
data = json.loads(line)
#insert into usertable table
d = create_d(data, strings, floats, ints, bools)
insert_row(conn, cur, d, tablename='usertable')
#done with reading current line
count_line += 1
line = f.readline()
if count_line % 1000 == 0:
print(f'Finshed reading row {count_line}.')
cur.close()
conn.close()
print('Finished with yelp_user.JSON file (Part 1/2)\n\n')
def insertUserData_2():
with open(r'./data/yelp_user.JSON','r') as f:
line = f.readline()
count_line = 0
#connect to database
conn = psycopg2_connect()
cur = conn.cursor()
while line:
data = json.loads(line)
#insert into Friendship table
user_id = data["user_id"]
for friend_id in data["friends"]:
friend_d = {'first_user_id':user_id, 'second_user_id':friend_id}
insert_row(conn, cur, d=friend_d, tablename='Friendship')
#done with reading current line
count_line += 1
line = f.readline()
if count_line % 1000 == 0:
print(f'Finshed reading row {count_line}.')
cur.close()
conn.close()
print('Finished with yelp_user.JSON file (Part 2/2)\n\n')
def insertCheckinData():
with open(r'./data/yelp_checkin.JSON','r') as f:
line = f.readline()
count_line = 0
#connect to database
conn = psycopg2_connect()
cur = conn.cursor()
while line:
data = json.loads(line)
business_id = cleanStr4SQL(data['business_id'])
dates = data["date"].split(',')
for date in dates:
date_d = {'business_id':business_id, 'checkin_date':date}
insert_row(conn, cur, d=date_d, tablename='CheckIn')
#done with reading current line
count_line += 1
line = f.readline()
if count_line % 100 == 0:
print(f'Finshed reading row {count_line}.')
cur.close()
conn.close()
print('Finished with yelp_checkin.JSON file\n\n')
def insertTipData():
with open(r'./data/yelp_tip.JSON','r') as f:
line = f.readline()
count_line = 0
#connect to database
conn = psycopg2_connect()
cur = conn.cursor()
#features to be extracted from json
strings = ['business_id', 'user_id', 'text', 'date']
floats = []
ints = ['likes']
bools = []
while line:
data = json.loads(line)
#insert into usertable table
d = create_d(data, strings, floats, ints, bools)
insert_row(conn, cur, d, tablename='Tip')
#done with reading current line
count_line += 1
line = f.readline()
if count_line % 1000 == 0:
print(f'Finshed reading row {count_line}.')
cur.close()
conn.close()
print('Finished with yelp_tip.JSON file\n\n')
if __name__ == "__main__":
insertBusinessData()
insertUserData_1()
insertUserData_2()
insertCheckinData()
insertTipData()
| true |
a0161d6a6f3162b41bb640dc4c9d5e42025b483d | Python | Divaon/ISP_SEM4 | /Lab2/Parser/Jsonserialzerliy.py | UTF-8 | 7,071 | 2.59375 | 3 | [] | no_license |
import re
import inspect
from types import FunctionType
import sys
import types
class JsonSerializer:
_convert_complex = False
_sended_globals = {}
def loads(self, s):
raw_data = self._splitted_str(s)
obj = self._parse_string_to_obj(raw_data, 0)['result']
return obj
def _complex_to_simple(self, obj):
result = obj
if inspect.isclass(obj):
# result = { '__type__': 'class' }
# print('Class')
result = {'__type__': 'class'}
result['name'] = obj.__name__
allowed_keys = ['__init__']
result['members'] = dict(
(key, value)
for (key, value) in obj.__dict__.items()
if not key.startswith('__') or key in allowed_keys
)
# print(result)
elif callable(obj):
# print('Function')
result = {'__type__': 'function'}
code = getsource(obj).strip()
if 'lambda ' in code:
code = code[code.find('lambda '):]
result['code'] = code
result['globals'] = {}
excluded_keys = ['global_vars']
for key, value in list(obj.__globals__.items()):
if key.startswith('global') and not key in excluded_keys:
result['globals'][key] = value
# print(f'Globals for {obj.__name__} function: {result["globals"]}')
# transformed_code = {}
# print(obj.__code__.co_consts)
# print(dir(obj.__code__))
# for key in dir(obj.__code__):
# if key.startswith('co'):
# attr = getattr(obj.__code__, key)
# transformed_code[key] = attr() if callable(attr) else attr
# print(transformed_code)
# result['code'] = transformed_code
result['name'] = obj.__name__
result['args'] = inspect.getargspec(obj).args
# result['globals'] = {}
# excluded_keys = ['init']
# for key, value in list(global_vars.init.__globals__.items()):
# if not key.startswith('__') and key not in excluded_keys:
# result['globals'][key] = value
return result
def _simple_to_complex(self, obj):
if type(obj) is dict and '__type__' in obj.keys():
if obj['__type__'] == 'function':
# compiled_code = compile(obj['code'], 'string', 'exec')
# print(obj['code'])
# print(obj['code'])
# compiled_code = types.CodeType(
# obj['code']['co_argcount'],
# obj['code']['co_kwonlyargcount'],
# obj['code']['co_nlocals'],
# obj['code']['co_stacksize'],
# obj['code']['co_flags'],
# obj['code']['co_code'],
# obj['code']['co_consts'],
# obj['code']['co_names'],
# obj['code']['co_varnames'],
# obj['code']['co_name'],
# obj['code']['co_firstlineno'],
# obj['code']['co_lnotab'],
# obj['code']['co_freevars'],
# obj['code']['co_cellvars']
# )
# new_func = FunctionType(compiled_code, obj['globals'], obj['name'])
if 'lambda ' in obj['code']:
obj['code'] = 'new_lambda = ' + obj['code']
result = {}
exec(obj['code'], obj['globals'], result)
# print(result)
func_name = 'new_lambda' if 'lambda ' in obj['code'] else obj['name']
new_func = result[func_name]
# print(func_name)
# print(new_func)
return new_func
# exec(obj['code'], obj['globals'], result)
# return result[obj['name']]
elif obj['__type__'] == 'class':
new_class = type(obj['name'], (object,), obj['members'])
return new_class
return obj
def _splitted_str(self, input):
processed_data = []
splitted_str = re.split(r"\"", input)
for i in range(len(splitted_str)):
if i % 2 == 1:
processed_data.append('"' + splitted_str[i] + '"')
else:
escaped_from_funcs = re.split(r"[<>]", splitted_str[i])
for j in range(len(escaped_from_funcs)):
if j % 2 == 1:
processed_data.append('<' + escaped_from_funcs[j] + '>')
else:
arr = re.split(r"[\s,]", escaped_from_funcs[j])
for elem in arr:
if elem:
if elem[-1] == ':' and len(elem) > 1:
processed_data.append(elem[0:-1])
processed_data.append(elem[-1])
else:
processed_data.append(elem)
return processed_data
def _parse_string_to_obj(self, raw_data, i):
result = {'result': None, 'i': i + 1}
if re.match(r'\d', raw_data[i].strip('"')[0]):
result['result'] = int(raw_data[i].strip('"')) if raw_data[i].find('.') == -1 else float(
raw_data[i].strip('"'))
result['i'] = i + 1
elif re.match(r'true|false', raw_data[i].strip('"')):
raw_data[i].strip('"')
result['result'] = raw_data[i] == 'true'
result['i'] = i + 1
elif raw_data[i][0] == '"':
result['result'] = raw_data[i][1:(len(raw_data[i]) - 1)]
result['i'] = i + 1
elif raw_data[i][0] == '[':
result_arr = []
i += 1
while i < len(raw_data):
if raw_data[i] == ']':
i += 1
break
return_dict = self._parse_string_to_obj(raw_data, i)
result_arr.append(return_dict['result'])
i = return_dict['i']
result['result'] = result_arr
result['i'] = i
elif raw_data[i][0] == '{':
result_dict = {}
i += 1
while i < len(raw_data):
if raw_data[i] == '}':
i += 1
break
return_dict = self._parse_string_to_obj(raw_data, i)
key = return_dict['result']
i = return_dict['i']
if raw_data[i] != ':': pass # raise exeption
i += 1
return_dict = self._parse_string_to_obj(raw_data, i)
value = return_dict['result']
i = return_dict['i']
result_dict[key] = value
if self._convert_complex:
result_dict = self._simple_to_complex(result_dict)
result['result'] = result_dict
result['i'] = i
return result
| true |
898f1817c9070ffcb26162fcc31d19d045ffe50b | Python | alphafan/Data_Structure_And_Algorithms | /array/Maximum_Sum_Path_in_Two_Arrays.py | UTF-8 | 1,604 | 4.25 | 4 | [] | no_license | """ Maximum Sum Path in Two Arrays
Given two sorted arrays such the arrays may have some common elements.
Find the sum of the maximum sum path to reach from beginning of any array
to end of any of the two arrays. We can switch from one array to another
array only at common elements.
Expected time complexity is O(m+n) where m is the number of elements in ar1[]
and n is the number of elements in ar2[].
https://www.geeksforgeeks.org/maximum-sum-path-across-two-arrays/
"""
def maxSumPath(nums1, nums2):
result= []
start, end = 0, 0
len1, len2 = len(nums1), len(nums2)
if len1 > len2:
nums1, len1, nums2, len2 = nums2, len2, nums1, len1
while end < len1:
if nums1[end] == nums2[end]:
# common element found
# Compute the sub sum for each and choose the larger one to result
subSum1 = sum(nums1[start:end+1])
subSum2 = sum(nums2[start:end+1])
if subSum1 > subSum2:
result.extend(nums1[start:end+1])
else:
result.extend(nums2[start:end+1])
start = end + 1
end += 1
if len(result) == len1:
if len2 > len1 and sum(nums2[len1:len2]) > 0:
result.extend(nums2[len1:len2])
return result
if sum(nums1[len(result):]) > sum(nums2[len(result):2]):
result.extend(nums1[len(result):])
else:
result.extend(nums2[len(result):])
return result
if __name__ == '__main__':
input1 = [2, 3, 7, 10, 12, 15, 30, 34]
input2 = [1, 5, 7, 8, 10, 15, 16, 19]
print(maxSumPath(input1, input2))
| true |
f650f17287cd69d22ff8b9981e7b424e3bddfc60 | Python | Fabius716/RomansToArabic | /Romanstonumber.py | UTF-8 | 2,263 | 3.046875 | 3 | [] | no_license | #sorry for the messy code and the broken english
raw=input()
#initializing some values (idk if it's necessary)
pn=0
cn=0
c=0
count=0
al=[]
#error bool
e=False
sv=""
dsub=False
#the dict
table={
"I":1,
"V":5,
"X":10,
"L":50,
"C":100,
"D":500,
"M":1000,
"i":1000,
"v":5000,
"x":10000,
"l":50000,
"c":100000,
"d":500000,
"m":1000000
}
#static list of the dict
l=['I', 'V', 'X', 'L', 'C', 'D', 'M', 'i', 'v', 'x', 'l', 'c', 'd', 'm']
#Quadruple 1 char error
if "IIII" in raw or "XXXX" in raw or "CCCC" in raw or "MMMM" in raw:
print("Error: repetition of the 1,10,100,1000 characters 4 or more times")
e=True
#Double 5 char error
if "VV" in raw or "LL" in raw or "DD" in raw:
print("Error: repetition of 5,50,500 characters")
e=True
#shitty solution time
for h in raw:
al.append(h)
c+=1
if pn == 0:
pn = h
count= table[pn]
else:
pn=l.index(pn)
cn=l.index(h)
#if pn is bigger than cn it's all good fam and we have to sum. MC --> 1000+100=1100 {CX: C->bigger X ->smaller; also this is the general case}
if pn>cn:
count= count+ table[l[cn]]
#we trust that if there was an error of repetition it would block the loop. MMCCXX --> 1000+1000... {CC: C->equals}
if pn==cn:
count= count+ table[l[cn]]
#if the pn is smaller than the cn that means that we are subtracting (and the mx difference in grades allowed is 2 [10]) {XC: X ->smaller; C->bigger}
if pn<cn and cn-pn<=2 :
count+= table[l[cn]]- (2*(table[l[pn]]))
sv=pn
#Subctr error i'm not proud of this
if pn<cn and cn-pn<=2 and len(al) >= 3 and al[-3] == al[-2]:
print("Error: subtract only one number at a time")
e=True
break
#Items error
if pn<cn and cn-pn>2:
print("Error: treat 1,10,100,1000 as separate items")
e=True
break
#Double subtraction error
if pn<cn and cn-pn<=2 and pn==sv:
e=True
print("Error: double subtraction")
dsub=True
pn = h
if dsub ==True:
break
#debbugging infos
#print(al)
#print(count,h)
#print(count,h,table[l[cn]],table[l[cn]])
#print(h,raw[((raw.index(h))-2)],raw[((raw.index(h))-1)])
if count != 0 and raw != "" and e==False:
print(count) #output in stdout | true |
94e7877e1a73b2f4f6d5e138e79f55459112b2fe | Python | mesnardo/petibm-decoupledibpm | /runs/cylinder2dRe40/500_markers/scripts/plot_pressure_coefficient.py | UTF-8 | 2,748 | 2.828125 | 3 | [
"BSD-3-Clause"
] | permissive | """Plot the surface pressure coefficient at final time step."""
from matplotlib import pyplot
import numpy
import pathlib
import petibmpy
import rodney
name = 'p' # name of the field variable to load
timestep = 5000 # final time-step index
# Set the simulation and data directories.
args = rodney.parse_command_line()
simudir = pathlib.Path(__file__).absolute().parents[1]
datadir = simudir / 'output'
# Load the gridlines from file.
filepath = datadir / 'grid.h5'
x, y = petibmpy.read_grid_hdf5(filepath, name)
# Load the field from file.
filepath = datadir / f'{timestep:0>7}.h5'
p = petibmpy.read_field_hdf5(filepath, name)
# Load boundary coordinates from file.
filepath = simudir / 'cylinder.body'
xb, yp = petibmpy.read_body(filepath, skiprows=1)
# Define circle outside support region of delta function.
N = 500
dx = 1.5 / 90 # grid-spacing size in the uniform region
R = 0.5 + 3 * dx # radius 3 cells away from real boundary
theta = numpy.linspace(0.0, 2 * numpy.pi, num=N + 1)[:-1]
xc, yc = 0.0, 0.0
xb_ext, yb_ext = xc + R * numpy.cos(theta), yc + R * numpy.sin(theta)
# Interpolate the field on extended boundary.
pb = numpy.empty_like(xb_ext)
for i, (xbi, ybi) in enumerate(zip(xb_ext, yb_ext)):
pi = petibmpy.linear_interpolation(p, y, ybi)
pb[i] = petibmpy.linear_interpolation(pi, x, xbi)
# Compute the pressure coefficient.
rho = 1.0 # fluid density
U_inf = 1.0 # freestream speed
p_inf = 0.0 # far-away pressure
cp = (pb - p_inf) / (0.5 * rho * U_inf**2)
# Re-arrange values to split apart lower and upper surfaces.
cp_lower = numpy.append(cp[N // 2:], [cp[-1]])
theta_lower = numpy.linspace(0.0, 180.0, num=cp_lower.size)
cp_upper = cp[:N // 2 + 1][::-1]
theta_upper = numpy.linspace(0.0, 180.0, num=cp_upper.size)
# Plot the distribution of the surface pressure coefficient.
pyplot.rc('font', family='serif', size=14)
fig, ax = pyplot.subplots(figsize=(6.0, 4.0))
ax.set_xlabel(r'$\theta$')
ax.set_ylabel('$C_p$')
ax.plot(theta_lower, cp_lower, label='Lower surface')
ax.plot(theta_upper, cp_upper, label='Upper surface', linestyle='--')
if args.extra_data:
# Load digitized values from Li et al. (2016).
theta_li, cp_li = rodney.lietal2016_load_cp(40)
ax.scatter(theta_li, cp_li, label='Li et al. (2016)',
c='black', marker='s', s=10)
ax.legend(frameon=False)
ax.set_xlim(0.0, 180.0)
ax.set_ylim(-1.5, 1.5)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
fig.tight_layout()
if args.save_figures:
# Save the figure.
figdir = simudir / 'figures'
figdir.mkdir(parents=True, exist_ok=True)
filepath = figdir / f'cp_{timestep:0>7}.png'
fig.savefig(filepath, dpi=300, bbox_inches='tight')
if args.show_figures:
pyplot.show()
| true |
387ea72d41aca8a77f33f79510f6efcf917d8a1b | Python | cletourneau/yose_challenge | /features/challenges/world2/level2_1_test.py | UTF-8 | 935 | 2.78125 | 3 | [] | no_license | from httplib import OK
import unittest
from hamcrest import assert_that, is_
from nose.plugins.attrib import attr
from nose.tools import istest
import requests
from testconfig import config
@attr(needs_server=True)
class PowerOfTwoChallenge(unittest.TestCase):
def setUp(self):
self.response = requests.get('{0}/primeFactors?number=16'.format(config['server_url']))
@istest
def responds_json_content_type(self):
assert_that(self.response.headers['content-type'], is_('application/json'))
@istest
def responds_status_OK(self):
assert_that(self.response.status_code, is_(OK))
@istest
def responds_number_in_request(self):
response = self.response.json()
assert_that(response['number'], is_(16))
@istest
def responds_decomposition_in_request(self):
response = self.response.json()
assert_that(response['decomposition'], is_([2, 2, 2, 2]))
| true |
61bfccec6a3b86fcbfb155762ba19141fdca1449 | Python | gaoyang836/python_text | /RF/testlib3.py | UTF-8 | 294 | 2.65625 | 3 | [] | no_license | # -*-coding:utf-8 -*-
from robot.api.logger import console #在控制台引用
def check_score(score):
if int(score) >= 60:
print("恭喜你及格了")
console("恭喜你及格了")
else:
print("回去复习吧!")
console("回去复习吧!")
| true |
89c9eabed98df728521b66f97d8e9e0154729397 | Python | sdlylzhr/pyCrawler | /crawler.py | UTF-8 | 1,308 | 3.34375 | 3 | [] | no_license | from bs4 import BeautifulSoup
from urllib import request
# 获取知乎某一条答案的数据
def zhihuAn(url):
# 伪造一个代理浏览器(windows的chrome浏览器)
headerList = {"User-Agent": "Mozilla/5.0(WindowsNT6.1;rv:2.0.1)Gecko/20100101Firefox/4.0.1"}
# 构建一个请求
req = request.Request(url, headers=headerList)
# 根据请求获取请求结果
result = request.urlopen(req).read()
# 使用BS解析HTML字符串
soup = BeautifulSoup(result, "html5lib")
# 打印标题
print(soup.title.string)
# 获取知乎问题的答案标签
anList = soup.select(".RichContent-inner")
# 打印答案内容文本
for an in anList:
print(an.get_text())
# 获取知乎推荐中所有的答案
def zhihuQuestions():
questionUrl = "https://www.zhihu.com/explore/recommendations"
headerList = {"User-Agent": "Mozilla/5.0(WindowsNT6.1;rv:2.0.1)Gecko/20100101Firefox/4.0.1"}
req = request.Request(questionUrl, headers=headerList)
result = request.urlopen(req).read()
soup = BeautifulSoup(result, "html5lib")
quList = soup.select(".question_link")
for qu in quList:
zhUrl = "https://www.zhihu.com" + qu.attrs['href']
print(zhUrl)
zhihuAn(zhUrl)
# 调用函数
zhihuQuestions() | true |
c8294ce1e4f8c6b4b0f37f048075e9b76b9e33c3 | Python | William-SKC/SLAM_Simulation | /Tsang-Kai-multi-robot-local/simulation/test.py | UTF-8 | 1,375 | 2.625 | 3 | [] | no_license | from numpy import matrix
import robot
N = 5 # number of robot
M = 1 # number of landmark
initial = matrix([1, 1, 1, 2, 2, 1, -1, -1, 1, 3], dtype=float).T
robots = [None] * N
for n in range(N):
robots[n] = robot.Robot(n, initial.copy())
landmarks = [None] * M
for m in range(M):
landmarks[m] = robot.Landmark(m, matrix([0.01, 0.02], dtype=float).getT())
###
dt = 1
for i in range(10):
# motion propagation
robots[0].prop_update()
robots[1].prop_update()
robots[2].prop_update()
robots[3].prop_update()
robots[4].prop_update()
# communication
robots[0].comm(robots[4].s, robots[4].sigma)
# observation - robot 0
[dis, phi] = robot.relative_measurement(robots[0], robots[1])
robots[0].rela_obsv(1, [dis, phi])
[dis, phi] = robot.relative_measurement(robots[0], robots[2])
robots[0].rela_obsv(2, [dis, phi])
[dis, phi] = robot.relative_measurement(robots[0], robots[3])
robots[0].rela_obsv(3, [dis, phi])
# observation - robot 4
[dis, phi] = robot.relative_measurement(robots[4], landmarks[0])
robots[4].ablt_obsv([dis, phi], landmarks[0])
[dis, phi] = robot.relative_measurement(robots[4], robots[3])
robots[4].rela_obsv(3, [dis, phi])
print('\n\nt = ' + str(i) + ' ===================================\n\n\n')
robots[0].status()
#robots[1].status()
#robots[2].status()
#robots[3].status()
robots[4].status()
| true |
61b48853f407dc11ee2f19d23e262eee33e69932 | Python | Ericgoodboy/mayeye | /mayeye/mo2.py | UTF-8 | 1,065 | 2.65625 | 3 | [] | no_license | import math
def func(string:str):
j=list(string)
mapc = {}
for i in j:
if i in mapc:
mapc[i]+=1
else:
mapc[i]=1
des = 1
for i in mapc:
nn = mapc[i]
k = 1
for j in range(1, nn + 1):
k *= j
des *= k
m=string.lower()
c=list(m)
s = set("aeio")
x = 0
y = 0
for i in c:
if i not in s:
x+=1
else:
y+=1
#print(x,y)
if x<y-1:
return 0
num = math.pow(y+1,x-y+1)
#print(y+1,x-y+1)
if x-y+1==0:
num=1
nx=1
ny=1
for i in range(1,x+1):
nx*=i
if i==y:
ny=nx
# for i in range(1,y+1):
# nx*=i
print(nx,ny,num)
if y==0:
return nx/des
if y==1:
return nx*(x+1)
return nx*ny*num/des
# line = "aefgh"
# print(int(func(line)))
import sys
n = int(sys.stdin.readline().strip())
for i in range(n):
# 读取每一行
line = sys.stdin.readline().strip()
print(int(func(line)))
| true |
3570d58a5e8e49ce6dc744b5345706f0d1254e8c | Python | Code-Institute-Submissions/PRO4-Mealdeals | /memberships/tests/test_models.py | UTF-8 | 759 | 2.5625 | 3 | [] | no_license | from django.test import TestCase
from memberships.models import Customer
from django.contrib.auth.models import User
class TestCustomer(TestCase):
"""
Test Order Model
"""
@classmethod
def CustomerTestData(self):
# expect failure if trying to create an order without an owner
product = Customer.get(pk=1)
user = User.objects.get(pk=1)
num_orders = Customer.objects.filter(user=user).count()
Customer.objects.create(
payment_status="payment_collected",
total=product.price,
product=product,
user=user,
)
user = User.objects.get(pk=1)
self.assertEqual(
num_orders + 1, Customer.objects.filter(user=user).count())
| true |
d31d3b08255014d4c75d9adb15131b40800089f6 | Python | Lanckie/Image-Retrieval | /functions/Node.py | UTF-8 | 1,192 | 2.6875 | 3 | [] | no_license | import numpy as np
import math
flatten = lambda x : [z for y in x for z in y]
class Node(object):
"""docstring for Node"""
def __init__(self, Parent, Feat, Depth):
super(Node, self).__init__()
self.Parent = Parent
self.Feat = Feat
self.Depth = Depth
self.children = []
self.isLeaf = False
def pushChild(self, child):
self.children.append(child)
def setLeaf(self):
self.isLeaf = True
setattr(self, "Images", {})
def tfidf(self, des, i):
if self.isLeaf:
self.Images[i] = self.Images[i]+1 if i in self.Images else 1
else:
#print('Push Images Not a Leaf ')
index = np.argsort([np.linalg.norm(x.Feat-des) for x in self.children])[0]
self.children[index].tfidf(des, i)
def allLeaves(self):
if self.isLeaf:
return [self]
else:
return flatten([x.allLeaves() for x in self.children])
def weight(self):
if self.isLeaf:
return math.log1p(500/1.0*len(self.Images))
else:
print('weight : not a leaf')
return None
def query(self, des):
if self.isLeaf:
return self
else:
#print('Push Images Not a Leaf ')
index = np.argsort([np.linalg.norm(x.Feat-des) for x in self.children])[0]
return self.children[index].query(des) | true |
b07c80d4d3e41dfc70ba1082f866037205cef079 | Python | chenshiyang/Algorithm-python | /sort/quick_sort.py | UTF-8 | 729 | 3.828125 | 4 | [] | no_license | def quickSort(array):
if array is None or len(array) <= 1:
return array
partition(array, 0, len(array) - 1)
def partition(array, start, end):
if end <= start:
return
pivot = getPivot(array, start, end)
i = start + 1;
while i <= end and array[i] <= array[pivot]:
i += 1
j = i
while j <= end:
if array[j] < array[pivot]:
array[i], array[j] = array[j], array[i]
i += 1
j += 1
array[pivot], array[i - 1] = array[i - 1], array[pivot]
partition(array, start, i - 2)
partition(array, i, end)
def getPivot(array, i, j):
return i
if __name__ == '__main__':
array = [5, 3, 4, 9, 1]
quickSort(array)
print(array) | true |
a5629e1b47e81da79c797d5403dc02778f686d2c | Python | henrylindev/yahtzee_game | /unit_tests/test_is_yahtzee.py | UTF-8 | 469 | 2.921875 | 3 | [] | no_license | from unittest import TestCase
from yahtzee import is_yahtzee
class TestIsYahtzee(TestCase):
def test_is_yahtzee_yes(self):
held_dice = [1, 1, 1, 1, 1]
expected = True
actual = is_yahtzee(held_dice)
self.assertEqual(expected, actual)
def test_is_yahtzee_no(self):
held_dice = [1, 1, 1, 1, 6]
expected = False
actual = is_yahtzee(held_dice)
self.assertEqual(expected, actual)
| true |
db7182ba59e04cfd998c29f48c614622a6c75a89 | Python | june07/packagecontrol.io | /app/lib/package_control/downloaders/basic_auth_downloader.py | UTF-8 | 2,100 | 3.171875 | 3 | [
"MIT"
] | permissive | import base64
try:
# Python 3
from urllib.parse import urlparse
except (ImportError):
# Python 2
from urlparse import urlparse
class BasicAuthDownloader(object):
"""
A base for downloaders to add an HTTP basic auth header
"""
def build_auth_header(self, url):
"""
Constructs an HTTP basic auth header for a URL, if present in
settings
:param url:
A unicode string of the URL being downloaded
:return:
A dict with an HTTP header name as the key and the value as the
value. Both are unicode strings.
"""
auth_string = self.get_auth_string(url)
if not auth_string:
return {}
b64_auth = base64.b64encode(auth_string.encode('utf-8')).decode('utf-8')
return {"Authorization": "Basic %s" % b64_auth}
def get_auth_string(self, url):
"""
Constructs a string of username:password for use in HTTP basic auth
:param url:
A unicode string of the URL being downloaded
:return:
None, or a unicode string of the username:password for the URL
"""
username, password = self.get_username_password(url)
if username and password:
return "%s:%s" % (username, password)
return None
def get_username_password(self, url):
"""
Returns a tuple of (username, password) for use in HTTP basic auth
:param url:
A unicode string of the URL being downloaded
:return:
A 2-element tuple of either (None, None) or (username, password)
as unicode strings
"""
domain_name = urlparse(url).netloc
auth_settings = self.settings.get('http_basic_auth')
domain_name = urlparse(url).netloc
if auth_settings and isinstance(auth_settings, dict):
params = auth_settings.get(domain_name)
if params and isinstance(params, (list, tuple)) and len(params) == 2:
return (params[0], params[1])
return (None, None)
| true |
ab800c626814ad0a320e18a85b05b215fec31b57 | Python | nishultomar/lockebot | /util.py | UTF-8 | 4,021 | 2.84375 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
import re
import os
import logging
from colored import fore, back, style
STY_DESC = fore.LIGHT_GREEN + back.BLACK
STY_DESC_DEBUG = fore.SKY_BLUE_1 + back.BLACK + style.DIM
STY_USER = style.RESET + fore.WHITE + back.BLACK
STY_CURSOR = fore.LIGHT_GOLDENROD_2B + back.BLACK + style.BOLD
STY_RESP = fore.WHITE + back.MEDIUM_VIOLET_RED + style.BOLD
STY_RECIPIENT = fore.WHITE + back.DODGER_BLUE_2 + style.BOLD
# STY_RESP = fore.WHITE + back.GREY_11 + style.BOLD #+ style.NORMAL
STY_EMAIL = fore.WHITE + back.GREY_11 + style.BOLD
def setup_custom_logger(name):
formatter = logging.Formatter(
STY_DESC_DEBUG + '%(asctime)s - %(module)s - %(levelname)8s - %(message)s' +
style.RESET, datefmt='%Y-%b-%d %H:%M:%S')
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger = logging.getLogger(name)
logger.setLevel(logging.WARN)
logger.addHandler(handler)
return logger
# self.logger = logging.getLogger(self.BOTNAME)
# self.logger.setLevel(logging.DEBUG)
# self.ch.setLevel(logging.WARN)
# # ch.setLevel(logging.DEBUG)
# self.ch.setFormatter(self.formatter)
# self.logger.addHandler(self.ch)
def clear_screen():
"""Simple cross-platform way to clear the terminal"""
os.system('cls' if os.name == 'nt' else 'clear')
def nthwords2int(nthword):
"""Takes an "nth-word" (eg 3rd, 21st, 28th) strips off the ordinal ending
and returns the pure number."""
ordinal_ending_chars = 'stndrh' # from 'st', 'nd', 'rd', 'th'
try:
int_output = int(nthword.strip(ordinal_ending_chars))
except Exception as e:
raise Exception('Illegal nth-word: ' + nthword)
return int_output
def text2int(textnum, numwords={}):
"""Takes nuberic words (one, two, ninety) or ordinal words ("first",
"thirteenth") and returns the number.
It is from code found here: http://stackoverflow.com/a/598322/142780"""
if not numwords:
units = [
'zero', 'one', 'two', 'three', 'four', 'five', 'six',
'seven', 'eight', 'nine', 'ten', 'eleven', 'twelve',
'thirteen', 'fourteen', 'fifteen', 'sixteen', 'seventeen',
'eighteen', 'nineteen']
tens = [
'', '', 'twenty', 'thirty', 'forty', 'fifty', 'sixty',
'seventy', 'eighty', 'ninety']
scales = [
'hundred', 'thousand', 'million', 'billion', 'trillion',
'quadrillion', 'quintillion', 'sexillion', 'septillion',
'octillion', 'nonillion', 'decillion']
numwords['and'] = (1, 0)
for idx, word in enumerate(units):
numwords[word] = (1, idx)
for idx, word in enumerate(tens):
numwords[word] = (1, idx * 10)
for idx, word in enumerate(scales):
numwords[word] = (10 ** (idx * 3 or 2), 0)
ordinal_words = {
'first': 1, 'second': 2, 'third': 3, 'fifth': 5, 'eighth': 8,
'ninth': 9, 'twelfth': 12}
ordinal_endings = [('ieth', 'y'), ('th', '')]
current = result = 0
tokens = re.split(r'[\s-]+', textnum)
for word in tokens:
if word in ordinal_words:
scale, increment = (1, ordinal_words[word])
else:
for ending, replacement in ordinal_endings:
if word.endswith(ending):
word = '%s%s' % (word[:-len(ending)], replacement)
if word not in numwords:
raise Exception('Illegal word: ' + word)
scale, increment = numwords[word]
if scale > 1:
current = max(1, current)
current = current * scale + increment
if scale > 100:
result += current
current = 0
return result + current
def clean_input(u_input):
"""Fairly basic whitelisting based text cleaning function"""
keepcharacters = (' ', '.', ',', ';', '\'', '?', '-')
return ''.join(
c for c in u_input if c.isalnum() or
c in keepcharacters).rstrip()
| true |
d1f9bea5bdfa2d1784bc082e326a22c80203f51a | Python | kundan4U/Python | /Core Python/Function/GlobalFun.py | UTF-8 | 138 | 3.15625 | 3 | [] | no_license | a=5000
def show():
a=10000
print("Local Variable A : ",a)
x=globals()['a']
print( " X is :",x)
show()
print(" global Varialle A : ",a) | true |
735f79c2a8e8aea378278ff67f73e4027cccb0fd | Python | hiop5155/minst_CNN | /minst_prediction_use_CNN.py | UTF-8 | 3,245 | 2.78125 | 3 | [] | no_license | from keras.datasets import mnist
from keras.utils import np_utils
import numpy as np
import tensorflow as tf
np.random.seed(10)
#讀dataset
(x_Train, y_Train),(x_Test, y_Test) = mnist.load_data()
#資料預處理
x_Train4D = x_Train.reshape(x_Train.shape[0],28,28,1).astype('float32')
x_Test4D = x_Test.reshape(x_Test.shape[0],28,28,1).astype('float32')
#normalize
x_Train4D_normalize = x_Train4D/255
x_Test4D_normalize = x_Test4D/255
y_TrainOneHot = np_utils.to_categorical(y_Train)
y_TestOneHot = np_utils.to_categorical(y_Test)
#modeling
from keras.models import Sequential
from keras.layers import Dense,Dropout,Flatten,Conv2D,MaxPooling2D
model = Sequential()
#convolution layer 1
model.add(Conv2D(filters=16,
kernel_size=(5,5),
padding='same',
input_shape=(28,28,1),
activation='relu'))
#pooling layer 1
model.add(MaxPooling2D(pool_size=(2,2)))
#covolution layer 2
model.add(Conv2D(filters=36,
kernel_size=(5,5),
padding='same',
activation='relu'))
#pooling layer 2
model.add(MaxPooling2D(pool_size=(2,2)))
#Dropout to avoid overfitting
model.add(Dropout(0.25))
#reshape to 1D input
model.add(Flatten())
#hidden layer 128 units
model.add(Dense(128,activation='relu'))
model.add(Dropout(0.5))
#output layer
model.add(Dense(10,activation='softmax'))
#training
model.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy'])
for i in range(10):
train_history=model.fit(x=x_Train4D_normalize,
y=y_TrainOneHot,validation_split=0.2,
epochs=10, batch_size=300,verbose=2)
import matplotlib.pyplot as plt
def show_train_history(train_history,train,validation):
plt.plot(train_history.history[train])
plt.plot(train_history.history[validation])
plt.title('Train History')
plt.ylabel(train)
plt.xlabel('Epoch')
plt.legend(['train','validation'], loc='upper left')
plt.show()
show_train_history(train_history,'acc','val_acc')
show_train_history(train_history,'loss','val_loss')
#prdiction rate
scores = model.evaluate(x_Test4D_normalize, y_TestOneHot)
print('Test loss:', scores[0])
print('accuracy',scores[1])
prediction = model.predict_classes(x_Test4D_normalize)
prediction[:10]
#look lots image function
def plot_images_labels_prediction(images, labels, prediction, idx, num=10):
fig = plt.gcf()
fig.set_size_inches(12,14)
if num>25: num=25
for i in range(0, num):
ax = plt.subplot(5,5, 1+i)
ax.imshow(images[idx], cmap='binary')
title="label="+ str(labels[idx])
if len(prediction)>0:
title+=",prediction="+str(prediction[idx])
ax.set_title(title,fontsize=10)
ax.set_xticks([]);ax.set_yticks([])
idx+=1
plt.show
plot_images_labels_prediction(x_Test,y_Test,prediction,idx=0)
#confusion matrix
import pandas as pd
pd.crosstab(y_Test,prediction,
rownames=['label'],colnames=['predict'])
df = pd.DataFrame({'label':y_Test, 'predict':prediction})
df[:2]
#save model
model.save('minst_prediction_use_CNN.h5')
#load model
model = tf.contrib.keras.models.load_model('minst_prediction_use_CNN.h5') | true |
01d26444b7cb9a535ae267ea3c0f8cf8137b01cf | Python | iintarnoo/Pgame-16070 | /code_tugas2.py | UTF-8 | 265 | 2.65625 | 3 | [] | no_license | # Tugas Ke2. MK Pemrograman Game
print("Halo nama saya Iin Tarno")
print("Halo angkatan 2016")
print("Halo Teknik Info UMMU Ternate")
#baris ini tidak akan dieksekusi
print ("Halo Anak Info....") # baris ini dieksekui
#Baris ini juga merupakan Baris Komentar
| true |
e90abc7db3e08380d7d6e26dbbfa1bd8c88070ac | Python | devscheffer/IGTI-Cientista_de_Dados | /Modulo 2/Arquivos Modulo 2/3 - PraticaColetaMongoDB/PyCode/coletaMongoDB.py | UTF-8 | 4,005 | 3.171875 | 3 | [] | no_license | #!/usr/bin/env python
# coding: utf-8
# # Coleta de Dados no MongoDB com Python
# **OBSERVAÇÃO:**
#
# Antes de usar o pymongo pela primeira vez, é necessário instalar o pacote. Para isso, acesse o prompt do Anaconda e execute o comando abaixo:
#
# *conda install -c wakari pymongo*
# In[ ]:
#importação de biblioteca
import pymongo
# In[ ]:
#Conexão ao banco de dados
#from pymongo import MongoClient
con = pymongo.MongoClient('localhost', 27017)
# In[ ]:
con
# In[ ]:
#listar os bancos de dados
con.list_database_names()
# In[ ]:
#seleciona o banco de dados
db = con.igti
# In[ ]:
db
# In[ ]:
#listar as coleções
db.list_collection_names()
# In[ ]:
#seleciona a coleção
collection = db.megasena
# In[ ]:
#seleciona o primeiro elemento da coleção
collection.find_one()
# In[ ]:
#seleciona o primeiro documento da coleção que atenda a condição
print(collection.find_one({"Ganhadores_Sena": "0"}))
# In[ ]:
#importação de pacote/biblioteca
import pprint #outro pacote para imprimir
# In[ ]:
pprint.pprint(collection.find_one({"Ganhadores_Sena": "0"}))
# In[ ]:
#seleciona o primeiro documento da coleção que atenda a condição
pprint.pprint(collection.find_one({"Concurso":"100"}))
# In[ ]:
#selecionas os documentos da coleção que atendam a condição, e imprime os documentos
for documets in collection.find({"Ganhadores_Sena": "5"}):
pprint.pprint(documets)
# In[ ]:
#listar as coleções
db.list_collection_names()
# #### Criar banco de dados e coleção
# In[ ]:
con = pymongo.MongoClient("mongodb://localhost:27017/")
# In[ ]:
#listar os bancos de dados
con.list_database_names()
# In[ ]:
db = con["Vendas"]
# In[ ]:
con.list_database_names()
# In[ ]:
colecao = db["clientes"]
# In[ ]:
db.list_collection_names()
# In[ ]:
documento = {"nome" : "maria", "idade" : 23}
# In[ ]:
resultado = colecao.insert_one(documento)
# In[ ]:
print(resultado)
# In[ ]:
con.list_database_names()
# In[ ]:
db.list_collection_names()
# In[ ]:
print(colecao.find_one())
# In[ ]:
#selecionas os documentos da coleção que atendam a condição, e imprime os documentos
for resultado in colecao.find():
pprint.pprint(resultado)
# In[ ]:
documento = [
{"nome" : "jorge", "idade" : 33},
{"nome" : "ana"},
{"nome": "William", "endereco": "Avenida Central n. 954"},
{"nome" : "ana", "endereco": "Avenida Central n. 954"},
{"nome": "William", "endereco": "Avenida Central n. 954"},
{"nome": "William"}
]
# In[ ]:
pprint.pprint(documento)
# In[ ]:
resultado = colecao.insert_one(documento)
# In[ ]:
#inserir documento em uma coleção
resultado = colecao.insert_many(documento)
# In[ ]:
print(resultado)
# In[ ]:
#selecionas os documentos da coleção que atendam a condição, e imprime os documentos
for resultado in colecao.find():
pprint.pprint(resultado)
# In[ ]:
condicao = { 'nome': 'ana' }
valor = { "$set": { "logradouro": "Avenida JK","num": 345 } }
colecao.update_one(condicao, valor)
for resultado in colecao.find():
pprint.pprint(resultado)
# In[ ]:
for resultado in colecao.find({ 'nome': 'ana' }):
pprint.pprint(resultado)
# In[ ]:
condicao = { 'nome': 'ana' }
valor = { "$set": { "logradouro": "Avenida JK","num": 345 } }
colecao.update_many(condicao, valor)
# In[ ]:
for resultado in colecao.find({ 'nome': 'ana' }):
pprint.pprint(resultado)
# In[ ]:
for resultado in colecao.find({ 'nome': 'William' }):
pprint.pprint(resultado)
# In[ ]:
condicao = { 'nome': 'William' }
colecao.delete_one(condicao)
# In[ ]:
for resultado in colecao.find({ 'nome': 'William' }):
pprint.pprint(resultado)
# In[ ]:
condicao = { 'nome': 'William' }
colecao.delete_many(condicao)
# In[ ]:
for resultado in colecao.find({ 'nome': 'William' }):
pprint.pprint(resultado)
# In[ ]:
for resultado in colecao.find():
pprint.pprint(resultado)
# In[ ]:
| true |
be0eeb55b554145caf3edc74fe1990a2f7c940cb | Python | psj8532/problem_solving | /BOJ/브루트포스/영화감독숌.py | UTF-8 | 327 | 3.109375 | 3 | [] | no_license | #08:49
n=int(input())
# result = (n-1)*(10**3)+666
# print(result)
cnt=0
num=666
while cnt!=n:
num=str(num)
for idx in range(len(num)-2):
if num[idx]=='6' and num[idx+1]=='6' and num[idx+2]=='6':
cnt+=1
result = int(num)
break
num=int(num)
num+=1
print(result)
#09:15 | true |
e6d101d6e0c44b12001fe44f2a6340b1dca8f5a5 | Python | mpUrban/python_problems | /problem6.py | UTF-8 | 566 | 3.890625 | 4 | [
"MIT"
] | permissive | # written in VS Code with jupyter extension
#https://simpleprogrammer.com/programming-interview-questions/
# How are duplicates removed from a given array in Java?
#%%
import numpy as np
#%%
testArray = [2,1,5,8,4,5,4,7,3,9]
#%%
testArraySorted = testArray.copy()
testArraySorted.sort()
print(testArraySorted)
#%%
previous = testArraySorted[0]
resultArray = []
resultArray.append(previous)
#%%
for i, element in enumerate(testArraySorted,start=0):
if(previous != element):
resultArray.append(element)
previous = element
#%%
print(resultArray) | true |
5ce56e9a21f3fc2386b643cbd7e1027eaff45e6c | Python | SnowSongLabs/PyAPI | /PyApi.py | UTF-8 | 2,706 | 2.828125 | 3 | [] | no_license | # This API test is based off of the flask API tutorial located at:
# https://www.codementor.io/sagaragarwal94/building-a-basic-restful-api-in-python-58k02xsiq
# and
# https://blog.miguelgrinberg.com/post/designing-a-restful-api-with-python-and-flask
#
# This will be the basic, barebones API as descried on this site
from flask import Flask, request
from flask_restful import Resource, Api
from sqlalchemy import create_engine
from json import dumps
from flask import jsonify
from EmployeeHandler import EmployeeHandler
db_connect = create_engine('sqlite:///chinook.db')
app = Flask(__name__)
api = Api(app)
@app.route('/employees', methods=['GET'])
def get_employees():
conn = db_connect.connect()
query = conn.execute("select EmployeeId, LastName, FirstName from employees")
result = {'data': [dict(zip(tuple (query.keys()) ,i)) for i in query.cursor]}
return jsonify(result)
@app.route('/tracks', methods=['GET'])
def get_tracks():
"""Get all track information"""
conn = db_connect.connect()
query = conn.execute('select trackid, name, composer, unitprice from tracks;')
result = {'data': [dict(zip(tuple (query.keys()) ,i)) for i in query.cursor]}
return jsonify(result)
@app.route('/employees/<employee_id>', methods=['GET', 'POST'])
def employee_id(employee_id):
if request.method == 'GET':
"""Get the informaiton for the <employee_id>"""
conn = db_connect.connect()
query = conn.execute('select * from employees where EmployeeID = %d;' % int(employee_id))
result = {'data': [dict(zip(tuple (query.keys()) ,i)) for i in query.cursor]}
return jsonify(result)
if request.method == 'POST':
"""Modify/Update the informaiton for <employee_id>"""
# Make a dictionary of received params
receivedArgs = {}
for sentArgs in request.args:
receivedArgs[sentArgs] = request.args.get(sentArgs)
# Send the dictionary to the employee handler and then return the status to the
# API caller.
statusDict = EmployeeHandler(employee_id, receivedArgs)
print(statusDict)
for key in statusDict:
if key == 1:
return "Failed - %s " % (statusDict[key])
else:
return "Pass"
# If something goes wrong - the default it to fail
return "Failed - Reached end of method without any actions"
@app.route('/genres', methods=['GET'])
def get_genres():
conn = db_connect.connect()
query = conn.execute('select * from genres')
result = {'data': [dict(zip(tuple (query.keys()) ,i)) for i in query.cursor]}
return jsonify(result)
if __name__ == '__main__':
app.run(port='5002') | true |
86e42d754185838b9ef4021e27a563cb6ffbeab2 | Python | osynetskyi/coursera-crypto | /task4.py | UTF-8 | 3,388 | 2.75 | 3 | [] | no_license | import urllib2
import sys
import requests as rq
ct = "f20bdba6ff29eed7b046d1df9fb7000058b1ffb4210a580f748b4ac714c001bd4a61044426fb515dad3f21f18aa577c0bdf302936266926ff37dbf7035d5eeb4"
net = rq.session()
TARGET = 'http://crypto-class.appspot.com/po?er='
#--------------------------------------------------------------
# padding oracle
#--------------------------------------------------------------
class PaddingOracle(object):
'''def query(self, q):
target = TARGET + urllib2.quote(q) # Create query URL
req = urllib2.Request(target) # Send HTTP request to server
try:
f = urllib2.urlopen(req) # Wait for response
except urllib2.HTTPError, e:
print "We got: %d" % e.code # Print response code
if e.code == 404:
return True # good padding
return False # bad padding'''
def query(self, q):
#if q == ct:
# return True
target = TARGET + q # Create query URL
res = net.get(target)
return res.status_code == 404
def form_pad(guess, pad):
res = []
for i in (0, pad):
res.append(guess)
return ''.join(res)
def dec2hex(num):
res = hex(num).split('x')[1]
if len(res) == 1:
res = '0' + res
return res
#print int(str(9) + '09', 16)
#print form_pad('02', 2)
#ct = "f20bdba6ff29eed7b046d1df9fb7000058b1ffb4210a580f748b4ac714c001bd4a61044426fb515dad3f21f18aa577c0bdf302936266926ff37dbf7035d5eeb4"
chunks = ['f20bdba6ff29eed7b046d1df9fb70000', '58b1ffb4210a580f748b4ac714c001bd', '4a61044426fb515dad3f21f18aa577c0', 'bdf302936266926ff37dbf7035d5eeb4']
cur = chunks[2]
po = PaddingOracle()
#print po.query("f20bdba6ff29eed7b046d1df9fb7000058b1ffb4210a580f748b4ac714c001bd4a61044426fb515dad3f21f18aa577c0bdf302936266926ff37dbf7035d5eeb4")
bytes_ = []
for i in range(0, len(cur)/2):
bytes_.append(cur[2*i:2*(i+1)])
'''for guess in range(0, 255):
byte = bytes_[-1]
#print byte, guess,
byte = hex((int(byte, 16) ^ guess )^ 1).split('x')[1]
if len(byte) == 1:
byte = "0" + byte
guessed = ''.join(bytes_[:-1]) + byte
send = chunks[0] + chunks[1] + guessed + chunks[3]
if po.query(send):
print "The correct guess is", guess'''
#correct = ['73', '69', '66', '72', '61', '67', '65','09', '09','09','09','09','09','09','09','09']
correct = []
for pad in range(1, 17):
#print "padding is", dec2hex(pad)*pad
for guess in range(0, 255):
#print byte, guess,
#print dec2hex(pad)*pad
'''if len(correct) > 7:
print "BOOM!", dec2hex(guess) + ''.join(correct)'''
pwn = dec2hex(int(chunks[1], 16) ^ int(dec2hex(guess) + ''.join(correct), 16) ^
int(dec2hex(pad)*pad, 16))
#guessed = ''.join(bytes_[:-1]) + byte
send = chunks[0] + pwn[:-1] + chunks[2]
print "Sending", pwn[:-1],
if po.query(send):
print "\nThe correct guess is", guess
correct.insert(0, dec2hex(guess))
print "correct is now", ''.join(correct), "\n"
break
'''for guess in range(0, 255):
#print byte, guess,
#print dec2hex(pad)*pad
pwn = hex((int(chunks[2], 16) ^ int((dec2hex(guess) + '0909090909090909'), 16) ^
int('090909090909090909', 16))).split('x')[1]
#guessed = ''.join(bytes_[:-1]) + byte
send = chunks[0] + chunks[1] + pwn[:-1] + chunks[3]
#print send
if po.query(send):
print "The correct guess is", guess
#correct.insert(0, dec2hex(guess))
#print "correct is now", ''.join(correct)
break'''
| true |
cfb972524e0fdb4973ec0430aa673096aea717e0 | Python | JamesHizon/Hadoop-Mini-Project | /Hadoop_Mini_Project/final_reducer.py | UTF-8 | 2,019 | 3.640625 | 4 | [] | no_license | # Reducer 1 - Python script used to populate values and obtain value count
import sys
# Initialize master vehicle_info dictionary
master_info = {}
# Create flush function to print out desired output
def flush():
"""
Observe each key inside our master_info dictionary we created.
We will print the make, year and accident_count as result.
:return: Prints make, year and accident_count to be read with second mapper Python script.
"""
for key in master_info.keys():
print(f'{(master_info[key]["make"], master_info[key]["year"])}\t{master_info[key]["accident_count"]}')
# Debug - Ignore
# count = 0
# for line in sys.stdin:
# line = line.strip()
# print(line)
# count += 1
# if count == 3:
# break
for line in sys.stdin:
# Recall that each line has four values.
# I may need to edit code so that I instead have other three values vs. a values_arr.
line = line.strip()
# print(line)
vin_number, values = line.split('\t')
# List comprehension to remove unnecessary characters and extract desired values.
values_list = [val.replace("'", "").replace("(", "").replace(")", "").replace(" ", "") for val in values.split(",")]
incident_type = values_list[0]
vehicle_make = values_list[1]
vehicle_year = values_list[2]
# Check for whether vin_number is present in dictionary
if vin_number not in master_info:
# Use master_info dictionary to keep track of accident_count for each make and year
master_info[vin_number] = {"make": None, "year": None, "accident_count": 0}
# Collect the vehicle make and year data from master_info where incident type == "I" to propagate
if incident_type == "I":
master_info[vin_number]["make"] = vehicle_make
master_info[vin_number]["year"] = vehicle_year
# Increment the count for each incident type == A (accident records)
if incident_type == "A":
master_info[vin_number]["accident_count"] += 1
# Output reducer values
flush() | true |