blob_id stringlengths 40 40 | language stringclasses 1
value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30
values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2
values | text stringlengths 12 5.47M | download_success bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
ecf5ccf221ab260be60c64afe8d24592fcc7deaa | Python | jxnding/leetcode | /find-all-numbers-disappeared-in-an-array.py | UTF-8 | 776 | 3.296875 | 3 | [] | no_license | class Solution:
def findDisappearedNumbers(self, nums: List[int]) -> List[int]:
def follow(n):
nonlocal nums
if n>len(nums): return
curr = nums[n-1]
if curr > 0: # actual number
nums[n-1] = -1
follow(curr)
elif curr < 0: # already a counter
nums[n-1] -= 1
else:
nums[n-1] = -1
if nums==None or nums==[]: return []
for i, val in enumerate(nums):
if val > 0:
n = val
nums[i] = 0
follow(n)
ans = []
for i, val in enumerate(nums):
if val == 0: ans.append(i+1)
return ans
#### O(n), O(1); 14, 46 Python3 | true |
dabf61970b64fb558612f623398c3a53b3d062f2 | Python | FranFer03/Practica | /Curso/Class_3/practice_class_3.7.py | UTF-8 | 401 | 3.1875 | 3 | [] | no_license | import matplotlib.pyplot as plt
import numpy as np
valores = np.arange(0,2*np.pi, 0.01)
seno = np.sin(valores)
coseno = np.cos(valores)
fig,(gr1, gr2) =plt.subplot(1,2)
fig.subplots_adjust (left=0.15 , wspace=0.7)
box1 = dict(facecolor = "aliceblue", pad = 5, alpha = 0.4)
box2 = dict(facecolor = "darkviolet", pad = 5, alpha = 0.4)
gr1.plot (valores, seno)
gr2.plot(valores, coseno)
plt.show()
| true |
7f76c31a9f51c02c5742769901383856ab88bfc7 | Python | rajasriramoju/CS247-Project-STGCN | /STGCN/data_reader.py | UTF-8 | 10,911 | 3.046875 | 3 | [] | no_license | import os
import gzip
import pandas as pd
from io import StringIO
import numpy as np
import sys
from tqdm import tqdm
from pprint import pprint
import math
import geopy.distance
np.set_printoptions(threshold=sys.maxsize)
# Set up our metadata dictionary {station_id : (latitude, longitutde) }
def create_station_metadata():
# Open up the PEMS metadata - we use this to get lat/long values of stations
df_meta = pd.read_csv('meta2.txt', delimiter = "\t")
# We are interested in several fields - ID of the station, Freeway, and lat/long
df_meta = df_meta[["ID","Fwy","Dir","Latitude","Longitude",]]
# Create a dictionary for our station meta descriptions for latitude and longitude
station_data = df_meta.values
station_location_dict = {int(x[0]):[ (x[3],x[4]), x[1],x[2] ] for x in station_data}
return station_location_dict
# Get n_stations randomly (requires that the station have metadata, and it is in the dataset)
def get_n_stations(n_stations, dataset_stations, metadata_stations):
# Get intersection of dataset_stations and metadata_stations
# station_intersections = np.intersect1d(dataset_stations, metadata_stations)
station_intersections = set(dataset_stations.tolist()).intersection(metadata_stations)
# Randomly pick n_stations
chosen_stations = np.random.choice(list(station_intersections), n_stations)
return chosen_stations
# Map station ids to indexes (maps from ID to a value between 0 and n_stations-1 )
def map_station_to_index(station_id, chosen_stations):
return np.where(chosen_stations == station_id)
def map_index_to_station(index, chosen_stations):
return chosen_stations[index]
# Calculate distance in latlong between two nodes
def latlong_distance(latlong1, latlong2):
return np.linalg.norm( np.array(latlong1) - np.array(latlong2) )
# return geopy.distance.distance(latlong1, latlong2).m
# Compute the adjaceny matrix for our chosen stations
def compute_weight_matrix(chosen_stations, station_location_dict):
# Initialize our matrix (n_stations, n_stations)
weight_matrix = np.zeros((chosen_stations.shape[0], chosen_stations.shape[0]))
# Iterate through every element of the matrix
for i in tqdm(range(chosen_stations.shape[0])):
for j in range(chosen_stations.shape[0]):
# We only alter the values if i!=j
if i != j:
# Compute the weight value (see data preprocessing section:
# https://github.com/VeritasYin/STGCN_IJCAI-18)
station_i = map_index_to_station(i, chosen_stations)
station_j = map_index_to_station(j, chosen_stations)
station_i_fwy = station_location_dict[station_i][1]
station_j_fwy = station_location_dict[station_j][1]
station_i_dir = station_location_dict[station_i][2]
station_j_dir = station_location_dict[station_j][2]
latlong_i = station_location_dict[station_i][0]
latlong_j = station_location_dict[station_j][0]
distance = latlong_distance(latlong_i, latlong_j)
sigma = 0.1 # according to the paper, sigma should be 10
# weight = math.exp( - (distance ** 2) / sigma )
# weight = distance # math.exp( - (distance ) / sigma)
# According to the paper, sigma^2 = 10 and eps = 0.5
# if weight > 0.5 and station_i_fwy == station_j_fwy and station_i_dir == station_j_dir:
weight_matrix[i,j] = distance
# According to the calculation from their github
# sigma2 = 0.1
# epsilon = 0.5
# n = weight_matrix.shape[0]
# weight_matrix = weight_matrix / 10000.
# W2, W_mask = weight_matrix * weight_matrix, np.ones([n, n]) - np.identity(n)
# weight_matrix = np.exp(-W2 / sigma2) * (np.exp(-W2 / sigma2) >= epsilon) * W_mask
# print(weight_matrix)
return weight_matrix
# Reorder our stations so those from the same freeway cluster together
def cluster_stations(chosen_stations, chosen_indexes, station_location_dict):
cluster_list = []
for i,x in enumerate(chosen_stations):
# Get the freeway
freeway = station_location_dict[x][1]
cluster_list.append((x, chosen_indexes[i], freeway))
# Sort cluster list by freeway
cluster_list = sorted(cluster_list, key=lambda x:x[2])
# print([x[2] for x in cluster_list])
# Get new chosen stations and chosen indexes
chosen_stations = [x[0] for x in cluster_list]
chosen_indexes = [x[1] for x in cluster_list]
return np.array(chosen_stations), np.array(chosen_indexes)
# Create our dataset for the adjancency matrix and historical road data
# We randomly select n_stations which form the adjanceny matrix.
def create_custom_dataset(n_stations=228):
# Get station metadata (locations for each station)
station_location_dict = create_station_metadata()
metadata_stations = list(station_location_dict.keys())
# print(station_location_dict.keys())
# Open up the PEMS dataset
data_dir = "PEMS_7_May_June_include_weekends"
data_files = os.listdir(data_dir)
chosen_stations = [] # Used to select which stations we are interested in.
# There are a number of errors that arise with the stations
# sometimes a text file won't have data for a particular id
# or all values will be nan. In these cases, we actually take more stations
# than n_stations to give us more options later
buffer_n_stations = int(n_stations * 3)
# Some overall notes:
# There are 4589 stations - we form our adjacency matrix based on
# these stations, so we should shrink this value down
# There are only 21 freeways, but that value doesn't matter in our system.
# To get latlong data for each station, you must find the PEMS metadata:
# https://pems.dot.ca.gov/?dnode=Clearinghouse&type=meta&district_id=7&submit=Submit
# 2d list, where each inner list is a column for a station's avg speed over time
new_data = [[] for x in range(buffer_n_stations)]
found_stations = []
# Open up a file (make sure our data is aligned over time, which requires sorting)
# One special note - the first file in our list actually is missing some data - skip it.
# Otherwise, each file has 288 datapoints across each station (meaning that all stations)
# have a full day's worth of data.
for file_item in tqdm(sorted(data_files)[1:]):
# Get filedir and data
filedir = os.path.join(data_dir, file_item)
gzip_file = gzip.open(filedir, "rb")
# First, decode from bytes, then StringIO for reading into pandas as csv
data = StringIO(gzip_file.read().decode("utf-8"))
df = pd.read_csv(data, header=None) # There are no column names here...
# Drop columns where all columns are NaN
# df = df.dropna(axis=1, how='all')
# Convert to NP - pandas DF is too slow for row iteration.
numpy_data = df.values
# If we haven't chosen our list of stations for our dataset, do so now
if not len(chosen_stations):
# Check what stations are available in the dataset
unique_stations = np.unique(numpy_data[:,1])
# Randomly get some stations for the new dataset
chosen_stations = get_n_stations(buffer_n_stations, unique_stations, metadata_stations)
# Iterate through each row, getting the station and freeway
# There's probably a faster way to do this, but I'm too tired.
# For info on each row:
# https://drive.google.com/file/d/1muiKe1uAWJwz2uIz5DZHR1GTEYPa2uGw/view?usp=sharing
for row in numpy_data: # Assuming this is aligned in time
station = row[1] # Station
freeway = row[3] # Freeway number
avg_occupancy = row[10] # Average occupancy of all lanes over 5 mins [0,1]
avg_speed = row[11] # Average mph of cars over all lanes in 5 mins
# If this is a chosen station, keep the data
if station in chosen_stations:
# Add to our new data
idx = map_station_to_index(station, chosen_stations)[0][0]
new_data[idx].append(avg_speed)
found_stations.append(station)
# if freeway in freeway_dict:
# freeway_dict[freeway].add(station)
# else:
# freeway_dict[freeway] = set([station])
# break
# Now that we are done with the files, get the new data
# new_data should be (n_stations, num datapoints)
n_datapoints = max([len(x) for x in new_data])
# Iterate through new_data, and make sure we get rid of the bad cases
bad_indexes = []
for i,x in enumerate(new_data):
# Check if the number of datapoints is correct
if len(x) != n_datapoints:
bad_indexes.append(i)
# Make sure values are not all nan for this data
if np.isnan(x).all():
bad_indexes.append(i)
new_data = [np.array(x) for i,x in enumerate(new_data) if i not in bad_indexes]
chosen_stations = [x for i,x in enumerate(chosen_stations) if i not in bad_indexes]
# Convert new_data to ndarray
new_data = np.array(new_data)
chosen_stations = np.array(chosen_stations)
print(new_data.shape)
print(chosen_stations.shape)
# Now we actually pick the correct number of stations from our better list
chosen_station_indexes = np.random.choice( np.arange(chosen_stations.shape[0]), n_stations)
chosen_stations = chosen_stations[chosen_station_indexes]
# One note - for the sake of later visualization, we need to cluster these stations
# where those that share a freeway are closer together
chosen_stations, chosen_station_indexes = cluster_stations(\
chosen_stations, chosen_station_indexes, station_location_dict)
new_data = np.transpose(new_data[chosen_station_indexes])
print(new_data.shape)
print(chosen_stations.shape)
# Now calculate our adjacency matrix based on lat/long
weight_matrix = compute_weight_matrix(chosen_stations, station_location_dict)
# Now we save our data
filename = "preprocessed/PEMSD7_" + str(n_stations) + ".npz"
with open(filename, 'wb') as f:
np.savez(f, processed_dataset=new_data, adj_matrix=weight_matrix, station_ids=chosen_stations)
# Save the V and W matrices
np.savetxt("preprocessed/V_" + str(n_stations) + ".csv", new_data, delimiter=',')
np.savetxt("preprocessed/W_" + str(n_stations) + ".csv", weight_matrix, delimiter=',')
create_custom_dataset(n_stations=228) # TODO: make sure you iterate through all the files
# TODOS:
# - The distance might actually be in meters, so you have to convert from latlong
# double check this with the original W by checking range of values
| true |
117a1b4f62b1622ffffd7d4868abd31a40beb043 | Python | shafferjohn/toolkit | /MD5 File of Directory Traversal/md5.py | UTF-8 | 1,435 | 3.28125 | 3 | [] | no_license | # -*- coding: UTF-8 -*-
# !/bin/python2
# name: MD5 encrypted File of Directory Traversal
# author: Shaffer John
# homepage: http://www.shaffer.cn
import os
import sys, getopt
from hashlib import md5
def md5_file(name):
m = md5()
f = open(name, 'rb') #需要使用二进制格式读取文件内容
m.update(f.read())
f.close()
return name+" : "+m.hexdigest()
opts, args = getopt.getopt(sys.argv[1:], "d:o:")
for op, value in opts:
if op == "-d":
folderpath = value.replace('\\','/')
# folderpath = value.replace('/',os.sep).replace('\\',os.sep)
if op == "-o":
output = value
filepaths=list()
f = open(output, 'a')
for dirpath, dirnames, filenames in os.walk(folderpath):
for filename in filenames:
filepaths.append(os.path.join(dirpath,filename).replace('\\','/'))
for filename in filepaths:
print md5_file(filename)
f.write(md5_file(filename)+'\n')
f.close()
# Readme:
# launch with params, like:
# python md5.py -d ./file -o md5.txt
# the filename of this script is md5.py
# -d means which folder (all files in it) do you traverse
# -o output a bunch of md5 value to a certain file (append method).
# You can do what you want about this script.
# By shaffer.cn
# 使用方法:
# 带参数执行,例如python md5.py -d ./file -o md5.txt
# 脚本文件是md5.py
# -d 遍历文件夹(下的所有文件)
# -o 输出MD5值到某一文件(追加方式)
# 你可以任意修改此脚本
# By shaffer.cn | true |
24545f8f88e6578e374497cef65704a5a65810cc | Python | pkarthik15/pytorch-lightning-image-classification | /model.py | UTF-8 | 2,000 | 2.578125 | 3 | [] | no_license | import torch
from torch import nn, optim
from torchvision import models
import pytorch_lightning as pl
from pytorch_lightning.metrics.functional import accuracy
from config import learning_rate, total_number_of_classes, pre_trained
class ClassificationModel(pl.LightningModule):
def __init__(self):
super(ClassificationModel, self).__init__()
self.net = models.resnet50(pretrained=pre_trained)
self.net.fc = nn.Linear(in_features=self.net.fc.in_features, out_features=total_number_of_classes)
def forward(self, x):
return self.net(x)
def configure_optimizers(self):
return optim.Adam(self.parameters(), lr=learning_rate)
def training_step(self, batch, batch_idx):
# Output from Dataloader
imgs, labels = batch
# Prediction
preds = self.forward(imgs)
# Calc Loss
loss = nn.CrossEntropyLoss()(preds, labels)
# Calc Accuracy
acc = accuracy(preds, labels)
logs = {
'loss': loss,
'accuracy': acc
}
return {'loss': loss, 'logs': logs}
def validation_step(self, batch, batch_idx):
results = self.training_step(batch, batch_idx)
return results
def validation_epoch_end(self, outputs):
avg_loss = torch.tensor([x['logs']['loss'] for x in outputs]).mean()
avg_accu = torch.tensor([x['logs']['accuracy'] for x in outputs]).mean()
self.log('validation loss', avg_loss, logger=True, prog_bar=True)
self.log('validation accuracy', avg_accu, logger=True, prog_bar=True)
def training_epoch_end(self, outputs):
avg_loss = torch.tensor([x['logs']['loss'] for x in outputs]).mean()
avg_accu = torch.tensor([x['logs']['accuracy'] for x in outputs]).mean()
self.log('training loss', avg_loss, logger=True, prog_bar=True)
self.log('training accuracy', avg_accu, logger=True, prog_bar=True)
| true |
a5e01e1150f53194865b9015af66ca2aa5a367e1 | Python | mspranger/icdl2016language | /general_multiclass_multilabel_mlp3.py | UTF-8 | 8,888 | 2.578125 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
import numpy
import pickle
import keras.models
import keras.layers
import keras.optimizers
import description_game
class MLP3:
clf = None;
batch_size = 20
nb_epoch = 100
def __init__( self, layers, dropout, activation, input_dim = 17, output_dim = 100):
self.layers = layers
self.dropout = dropout
self.activation = activation
self.input_dim = input_dim # can be different from actual
self.output_dim = output_dim # can be different from actual
def __str__( self):
return "MLP3(layers=%s,dropout=%f,activation=%s,input_dim=%i,output_dim=%i)" % ( self.layers, self.dropout, self.activation, self.input_dim, self.output_dim)
def fit( self, X, y, verbose = 1):
input_dim = X.shape[1]
output_dim = y.shape[1]
# create model
clf = keras.models.Sequential()
for idx, l in enumerate( self.layers):
if idx == 0:
clf.add( keras.layers.Dense( l, input_dim = input_dim, activation = self.activation))
else:
clf.add( keras.layers.Dense( l, activation = self.activation))
clf.add( keras.layers.Dropout( self.dropout))
clf.add( keras.layers.Dense( output_dim, activation = 'sigmoid'))
clf.compile( optimizer = keras.optimizers.Adam(), loss = 'binary_crossentropy')
# fit
self.clf = clf
self.clf.fit( X, y, batch_size = self.batch_size, nb_epoch = self.nb_epoch, verbose = verbose)
def predict( self, X):
y_pred = self.clf.predict( X)
y_pred[y_pred >= .5] = 1
y_pred[y_pred < .5] = 0
return y_pred
def clone( self):
return MLP3( self.layers, self.dropout, self.activation, self.input_dim, self.output_dim)
def create_mlp3_from_hyper_optimize( optimization_result, input_dim = 17, output_dim = 100):
return MLP3( layers = optimization_result["layers"],
dropout = optimization_result["dropout"],
activation = optimization_result["activation"],
input_dim = input_dim,
output_dim = output_dim)
def hyper_optimize( nr_samples = 4532,
nr_dimensions = 17,
nr_words = 100,
nr_words_per_utterance = 5,
batch_size = 20,
nb_epoch = 100,
file_name = "mlp3_optimize_results_17.pickle"):
print( "hyper_optimize MLP3 %s" % [nr_samples, nr_dimensions, nr_words, nr_words_per_utterance, batch_size, nb_epoch])
X = numpy.random.uniform( size = ( nr_samples, nr_dimensions))
y_cat, y_bin = description_game.compute_tutor_weighted( X, nr_words, nr_words_per_utterance)
X_train, y_train, X_test, y_test = X[:3399], y_bin[:3399], X[3399:], y_bin[3399:]
best_result = { "f-score" : 0.0 }
for layer_size in [64,128,256,512,1024]:
for nr_layers in range( 1, 3):
for dropout in numpy.arange( 0.1, 1.0, 0.2):
for activation in ["relu"]: #, "tanh", "sigmoid"
layers = [layer_size for i in range(nr_layers)]
print( "\n-----\nMLP3 %s with %s layers and dropout %.2f" % ( activation, layers, dropout))
try:
clf = MLP3( layers, dropout, activation, X_train.shape[1], y_train.shape[1])
clf.fit( X_train, y_train)
y_pred = clf.predict( X_train)
# print( sklearn.metrics.f1_score( y_train, y_pred, average = "samples"))
f_score_train = description_game.compute_f_scores( y_train, y_pred)
print( f_score_train)
y_pred = clf.predict( X_test)
# print( sklearn.metrics.f1_score( y_test, y_pred, average = "samples"))
f_score_test = description_game.compute_f_scores( y_test, y_pred)
result = { "layers" : layers,
"dropout" : dropout,
"activation" : activation,
"f-score" : f_score_test[2],
"result-train" : f_score_train,
"result-test" : f_score_test }
print( f_score_test)
if result["f-score"] > best_result["f-score"]:
best_result = result
print("\n\n Best so far %s" % best_result)
all_results = []
try:
all_results = pickle.load( open( file_name, "rb"))
except:
pass
all_results.append( result)
pickle.dump( all_results, open( file_name, "wb"))
except:
print( "ERROR processing MLP3 %s layers, dropout %.2f, activation %s" % ( layers, dropout, activation))
return best_result
def load_optimized( nr_dimensions = 17):
all_results = pickle.load( open( "mlp3_optimize_results_%i.pickle" % nr_dimensions, "rb"))
all_results = sorted( all_results, key = lambda r: r["result-test"][2], reverse = True)
return all_results[0]
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument( '--experiment-1',
action = "store_true",
dest = "experiment_1",
default = False,
help = "")
parser.add_argument( '--experiment-scaling-dimensions',
action = "store",
dest = "experiment_scaling_dimensions",
default = None,
type = int,
help = "")
parser.add_argument( '--experiment-scaling-p',
action = "store",
dest = "experiment_scaling_p",
default = None,
type = float,
help = "")
parser.add_argument('--hyper-optimize',
action = "store_true",
dest = "hyper_optimize",
default = False,
help= "grid search for best number of layers, activations and dropout")
args = parser.parse_args()
# optimize
if args.hyper_optimize:
hyper_optimize()
for nr_dimensions in [ 10, 100, 1000, 10000]:
hyper_optimize( nr_dimensions = nr_dimensions, file_name = "mlp3_optimize_results_%i.pickle" % nr_dimensions)
# experiment robot data
if args.experiment_1:
clf = create_mlp3_from_hyper_optimize( load_optimized())
import general_multiclass_multilabel
for optimized_str in ["optimized","non-optimized"]:
general_multiclass_multilabel.run_simulated_data_clfs( [clf],
save_results = "gmm-results-simulated-data-%s.pickle" % optimized_str,
workers = None)
general_multiclass_multilabel.run_robot_data_all_clfs( [clf],
save_results = "gmm-results-robot-data-all-%s.pickle" % optimized_str,
workers = None)
general_multiclass_multilabel.run_robot_data_clfs( [clf],
save_results = "gmm-results-robot-data-%s.pickle" % optimized_str,
workers = None)
# scaling dimensions
if args.experiment_scaling_dimensions:
clf = create_mlp3_from_hyper_optimize( load_optimized())
import general_multiclass_multilabel
for optimized_str in ["optimized","non-optimized"]:
general_multiclass_multilabel.run_simulated_data_clfs( [clf],
nr_dimensions = args.experiment_scaling_dimensions,
save_results = "gmm-results-simulated-data--nr-dimensions=%i-%s.pickle" % (args.experiment_scaling_dimensions, optimized_str),
workers = None)
# scaling p
if args.experiment_scaling_p:
clf = create_mlp3_from_hyper_optimize( load_optimized())
import general_multiclass_multilabel
for optimized_str in ["optimized","non-optimized"]:
general_multiclass_multilabel.run_simulated_data_clfs( [clf],
nr_dimensions = 10,
p = args.experiment_scaling_p,
save_results = "gmm-results-simulated-data--p=%.2f-%s.pickle" % (args.experiment_scaling_p, optimized_str),
workers = None) | true |
b8b72f6ee1b6a6c7968a2184e4033ba06f1df2f2 | Python | ashmastaflash/gwdetect | /gwdfunctions.py | UTF-8 | 12,984 | 2.59375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/python
'''
this is why we can't have nice things. Also, where
we hide our functions.
'''
import gwdglobals
import pcapy
import ConfigParser
#import os.path
from netaddr import IPNetwork, IPAddress
from connected import Connected
from remote import Remote
from router import Router
def eth_addr(a):
b = '%.2x:%.2x:%.2x:%.2x:%.2x:%.2x' % (ord(a[0]) , ord(a[1]) , ord(a[2]), ord(a[3]), ord(a[4]) , ord(a[5]))
return b
def createNode(ip, mac):
subnet = gwdglobals.subnet
if ipInSubnet(ip,subnet):
print ip, ' is in subnet ' , subnet
mac = Connected(ip, mac)
else:
ip = Remote(ip)
print ip, ' is not in subnet ' , subnet
def ipInSubnet(ipaddr, subnet):
if IPAddress(ipaddr) in IPNetwork(subnet):
messagebody = 'IP: ' + ipaddr + ' Subnet: ' + subnet
firemessage('1017',messagebody)
return True
else:
return False
def printoutput():
outfile = gwdglobals.outlog
insidehost = ''
gateway = ''
if not outfile == '':
f = open(outfile,'a')
f.write('Routes: inside, outside, gateway, observed direction:\n')
for h in gwdglobals.routes:
route_entry = h[0] + '\t' + h[1] + '\t' + h[2] + '\t' + h[3] +'\n'
f.write(str(route_entry))
f.write('Connected Nodes:\n')
for i in gwdglobals.connected_nodes:
f.write(i.mac + ' ' + i.ip + '\n')
f.write('Remote Nodes:\n')
for j in gwdglobals.connected_nodes:
f.write(j.ip + '\n')
f.write('Routers:\n')
for k in gwdglobals.routers:
f.write(k.mac + '\n')
if any(m.mac == k.mac for m in gwdglobals.connected_nodes):
routerIP = [connNode for connNode in gwdglobals.connected_nodes if connNode.mac == k.mac]
f.write(k.mac + ' ' + routerIP[0].ip)
return()
print 'Routes: inside, outside, gateway, observed direction'
for i in gwdglobals.routes:
print i
print 'Connected Nodes: '
for j in gwdglobals.connected_nodes:
print j.mac , ' ' , j.ip
print 'Remote Nodes:'
for k in gwdglobals.remote_nodes:
print k.ip
print 'Routers:'
for l in gwdglobals.routers:
print l.mac
if any(m.mac == l.mac for m in gwdglobals.connected_nodes):
routerIP = [connNode for connNode in gwdglobals.connected_nodes if connNode.mac == l.mac]
print l.mac , ' ' , routerIP[0].ip
def write_circos():
outfile = gwdglobals.circos_report
outmatrix = []
router_labels = []
host_labels = []
# Create the gateway labels
for l in gwdglobals.routers:
if any(m.mac == l.mac for m in gwdglobals.connected_nodes):
routerIP = [connNode for connNode in gwdglobals.connected_nodes if connNode.mac == l.mac]
router_labels.append(routerIP[0].ip)
# Now, node labels
for o in gwdglobals.connected_nodes:
if any(x[0] == o.mac for x in gwdglobals.routes):
host_labels.append(o.ip)
# Build tabular data structure:
# Write header
f = open(outfile,'w+')
f.truncate()
f = open(outfile,'a')
topline = 'data'
headercolors = 'data'
for i in router_labels:
if i in gwdglobals.gateway_whitelist :
headercolors = headercolors + ' 100,100,100'
else:
headercolors = headercolors + ' 200,0,0'
topline = topline + ' GW_' + str(i)
topline = topline.replace('.','_').replace(':','_')
f.write('#Please visit http://mkweb.bcgsc.ca/tableviewer/visualize to generate this graphic the easy way.\n')
f.write('#Here is a hint: Row with Column Colors...\n')
f.write(headercolors + '\n')
f.write(topline + '\n')
for y in gwdglobals.connected_nodes:
line = y.ip
for i in gwdglobals.routers:
line = line + ' ' + count_routers(y.mac,i.mac)
line = 'IP_' + line.replace('.','_').replace(':','_').replace(' 0',' -')
f.write(line + '\n')
f.close()
def count_routers(host_mac,rtr_mac):
count = 0
for i in gwdglobals.routes:
if i[3] == 'confirmed' and i[0] == host_mac and i[2] == rtr_mac:
count += 1
return(str(count))
# Testing output...
print 'Router Labels:'
for p in router_labels:
print p
print 'Host Labels:'
for q in host_labels:
print q
def disposition(sip,dip,smac,dmac):
mac_src = smac
mac_dest = dmac
ip_src = sip
ip_dest = dip
sip_local = ''
dip_local = ''
sip_unique = ''
dip_unique = ''
router = ''
nodename = ''
subnet = gwdglobals.subnet
direction = 'indeterminite'
# Test if IPs exist in connected net, then determine if they are unique
# Determine if source IP is local
if ipInSubnet(ip_src,subnet):
sip_local = 1
else:
sip_local = 0
# If the source IP is local, determine if it is new
if sip_local == 1:
if not check_local_exists(mac_src):
sip_unique = 1
else:
sip_unique = 0
# If the source IP is not local, determine if it is new
elif sip_local == 0:
if not check_remote_exists(ip_src):
sip_unique = 1
else:
sip_unique = 0
else:
print 'Source IP is in Schrodinger box. with the cat.'
# Determine if destination IP is local
if ipInSubnet(ip_dest,subnet):
dip_local = 1
else:
dip_local = 0
# If dest IP is local, determine if it is new
if dip_local == 1:
if not check_local_exists(mac_dest):
dip_unique = 1
else:
dip_unique = 0
# If destinationIP is not local, determine if it is new
elif dip_local == 0:
if not check_remote_exists(ip_dest):
dip_unique = 1
else:
dip_unique = 0
else:
print ' Destination IP is in Schrodinger box, playing with the cat.'
# If Create nodes if necessary
if sip_unique == 1:
if sip_local == 1:
nodename = mac_src
gwdglobals.connected_nodes.append(Connected(ip_src,mac_src))
messagebody = 'IP: ' + ip_src + ' MAC: ' + mac_src
firemessage('1010',messagebody)
elif sip_local == 0:
nodename = ip_src
gwdglobals.remote_nodes.append(Remote(ip_src))
messagebody = 'IP: ' + ip_src
firemessage('1011',messagebody)
else:
print 'Failed node disposition, creation with source ' , mac_src , ' ' , ip_src
if dip_unique == 1:
if dip_local == 1:
nodename = mac_dest
gwdglobals.connected_nodes.append(Connected(ip_dest,mac_dest))
messagebody = 'IP: ' + ip_dest + ' MAC: ' + mac_dest
firemessage('1010',messagebody)
elif dip_local == 0:
nodename = ip_dest
gwdglobals.remote_nodes.append(Remote(ip_dest))
messagebody = 'IP: ' + ip_dest
firemessage('1011',messagebody)
else:
print 'Failed node disposition, creation with destination ' , mac_dest , ' ' , ip_src
#Now we want to know if they are inbound or outbound
if sip_local == 1 and dip_local == 1:
router = 'Layer 2'
return()
if sip_local == 0 and dip_local == 0:
router = 'Who Cares'
return()
if sip_local == 1 and dip_local == 0:
direction = 'outbound'
if sip_local == 0 and dip_local == 1:
direction = 'inbound'
# Now, we determine router MAC address
if direction == 'outbound':
router = mac_dest
if direction == 'inbound':
router = mac_src
# Now, we create a router if none exists already...
if not check_router_exists(router):
rtrmac = router
gwdglobals.routers.append(Router('undefined',rtrmac))
else:
if router == '':
print 'Source: '+ mac_src + ':' + ip_src +' Destination: ' + mac_dest + ':' + ip_dest + 'EMPTY ROUTER'
# Depending on direction, we create routes
# Routes will be consolidated if inverse routes
# of opposite directions exist. The inbound and outbound
# routes will be replaced by confirmed when both directions
# have been observed
if direction == 'outbound':
if check_route_exists(mac_src,ip_dest,router,'confirmed'):
return()
if check_route_exists(mac_src,ip_dest,router,'outbound'):
return()
if check_route_exists(mac_src,ip_dest,router,'inbound'):
gwdglobals.routes.remove([mac_src,ip_dest,router,'inbound'])
gwdglobals.routes.append([mac_src,ip_dest,router,'confirmed'])
messagebody = 'Inside: ' + mac_src + ' Outside: ' + ip_dest + \
' Router: ' + router
firemessage('1015',messagebody)
return()
else:
gwdglobals.routes.append([mac_src,ip_dest,router,'outbound'])
messagebody = 'Inside: ' + mac_src + ' Outside: ' + ip_dest + \
' Router: ' + router
firemessage('1014',messagebody)
return()
if direction == 'inbound':
if check_route_exists(mac_dest,ip_src,router,'confirmed'):
return()
if check_route_exists(mac_dest,ip_src,router,'inbound'):
return()
if check_route_exists(mac_dest,ip_src,router,'outbound'):
gwdglobals.routes.remove([mac_dest,ip_src,router,'outbound'])
gwdglobals.routes.append([mac_dest,ip_src,router,'confirmed'])
messagebody = 'Inside: ' + mac_dest + ' Outside: ' + ip_src + \
' Router: ' + router
firemessage(1016,messagebody)
return()
else:
gwdglobals.routes.append([mac_dest,ip_src,router,'inbound'])
messagebody = 'Inside: ' + mac_dest + ' Outside: ' + ip_src + \
' Router: ' + router
firemessage('1015',messagebody)
return()
def check_ip_is_local(ipaddress):
subnet = gwdglobals.subnet
if ipInSubnet(ipaddress,subnet):
return True
else:
return False
def check_local_exists(macaddress):
firemessage('1018',macaddress)
if any(x.mac == macaddress for x in gwdglobals.connected_nodes):
firemessage('1022',macaddress)
return True
else:
firemessage('1023',macaddress)
return False
def check_remote_exists(ipaddress):
firemessage('1019',ipaddress)
if any(x.ip == ipaddress for x in gwdglobals.remote_nodes):
return True
else:
return False
def check_router_exists(macaddress):
if macaddress == '':
firemessage('1024',macaddress)
return True
firemessage('1020',macaddress)
if any(x.mac == macaddress for x in gwdglobals.routers):
firemessage('1021',macaddress)
return True
else:
firemessage('1012',macaddress)
return False
def check_route_exists(inner,outer,rtr,direction):
if [inner,outer,rtr,direction] in gwdglobals.routes:
return True
else:
return False
def firemessage(code , message):
messages = gwdglobals.messages
level = gwdglobals.debuglevel
leveltext = ''
if not gwdglobals.outlog == '':
outlog = gwdglobals.outlog
f=open(outlog,'a')
if any(i[0] == code for i in messages):
messagematch = [messages for messages in messages if messages[0] == code]
if messagematch[0][1] <= level:
if messagematch[0][1] == 1:
leveltext = 'ERROR'
elif messagematch[0][1] == 2:
leveltext = 'INFO'
elif messagematch[0][1] == 3:
leveltext = 'DEBUG'
else:
f.write('Undefined Message Alert Level!!\n')
f.write(leveltext + ':' + code + ' ' + messagematch[0][2] + message + '\n')
return()
elif any(i[0] == code for i in messages):
messagematch = [messages for messages in messages if messages[0] == code]
if messagematch[0][1] <= level :
if messagematch[0][1] == 1:
leveltext = 'ERROR'
elif messagematch[0][1] == 2:
leveltext = 'INFO'
elif messagematch[0][1] == 3:
leveltext = 'DEBUG'
else:
print 'Undefined message alert level!!'
print leveltext + ':' + code , ' ' + messagematch[0][2] , message
return()
def parse_config_file():
config = ConfigParser.RawConfigParser(allow_no_value=True)
config.read(gwdglobals.configfile)
gwdglobals.infile = config.get("Input","filename")
gwdglobals.interface = config.get("Input","interface")
gwdglobals.gateway_whitelist = config.get("Filter","whitelist_gateways")
gwdglobals.subnet = config.get("Filter","protected_subnet")
gwdglobals.circos_report = config.get("Output","circos_report")
gwdglobals.timedebug = config.get("Output","time_debug")
| true |
d589913cda46f248368f87d89eca102cc0b4cc48 | Python | Feng-Xu/TechNotes | /python/geektime/exercise/3_1.py | UTF-8 | 361 | 4 | 4 | [] | no_license | # 练习一 变量的定义和使用
# 定义两个变量分别为美元和汇率
# 通过搜索引擎找到美元兑人民币汇率
# 使用Python计算100美元兑换的人民币数量并用print( )进行输出
# 美元
dollar = 100
# 汇率
exchange = 6.8846
print('{dol}美元兑换的人民币数量为{yuan}'.format(dol=dollar, yuan=dollar * exchange)) | true |
8925ba76bdbb868098bafa88d61e4f39f5f69188 | Python | Qiskit/rustworkx | /tests/rustworkx_tests/graph/test_pickle.py | UTF-8 | 1,606 | 2.703125 | 3 | [
"Apache-2.0"
] | permissive | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pickle
import unittest
import rustworkx as rx
class TestPickleGraph(unittest.TestCase):
def test_noweight_graph(self):
g = rx.PyGraph()
for i in range(4):
g.add_node(None)
g.add_edges_from_no_data([(0, 1), (1, 2), (3, 0), (3, 1)])
g.remove_node(0)
gprime = pickle.loads(pickle.dumps(g))
self.assertEqual([1, 2, 3], gprime.node_indices())
self.assertEqual([None, None, None], gprime.nodes())
self.assertEqual({1: (1, 2, None), 3: (3, 1, None)}, dict(gprime.edge_index_map()))
def test_weight_graph(self):
g = rx.PyGraph()
g.add_nodes_from(["A", "B", "C", "D"])
g.add_edges_from([(0, 1, "A -> B"), (1, 2, "B -> C"), (3, 0, "D -> A"), (3, 1, "D -> B")])
g.remove_node(0)
gprime = pickle.loads(pickle.dumps(g))
self.assertEqual([1, 2, 3], gprime.node_indices())
self.assertEqual(["B", "C", "D"], gprime.nodes())
self.assertEqual({1: (1, 2, "B -> C"), 3: (3, 1, "D -> B")}, dict(gprime.edge_index_map()))
| true |
3d02d863d3068f1e6df752f8d2ca79f044a28baf | Python | kathleen-cavanagh/dynamic-systems-control-estimation | /systems/base.py | UTF-8 | 1,350 | 2.953125 | 3 | [
"MIT"
] | permissive | """Define dynamic policy and system"""
from abc import ABC, abstractmethod
import numpy as np
class DynamicSystem(ABC):
"""Define a dynamic system such as a pendulum, cart pole, acrobot."""
@abstractmethod
def derivative(
self, t: float, state: np.ndarray, u: np.ndarray) -> np.ndarray:
"""Calculate system derivative at ``t`` given ``state`` and ``u``."""
pass
@abstractmethod
def jacobian(
self, t: float, state: np.ndarray, u: np.ndarray) -> np.ndarray:
"""Calculate jacobian of system with respect to state."""
pass
@abstractmethod
def linearization(
self, t: float, state: np.ndarray, u: np.ndarray) -> np.ndarray:
"""Linearize system."""
pass
def validate_state(self, state: np.ndarray):
"""Validate or modify state value for any constraints."""
pass
class MeasurementRelation(ABC):
"""Define a relationship between a measurement and the system."""
@abstractmethod
def jacobian(
self, t: float, state: np.ndarray, u: np.ndarray) -> np.ndarray:
"""Calculate jacobian of measurement with respect to state."""
pass
@abstractmethod
def calculate(self, state: np.ndarray, u: np.ndarray) -> np.ndarray:
"""Calculate measurement given state and input."""
pass | true |
9d81fa27e12311fb9f6d9176e620b47e864fea0a | Python | Aasthaengg/IBMdataset | /Python_codes/p03448/s196449981.py | UTF-8 | 221 | 3 | 3 | [] | no_license | A,B,C,X = map(int,[input() for i in range(4)])
ans = 0
for i in range(A+1):
for j in range (B+1):
for k in range(C+1):
if i *500 + j * 100 + k * 50 == X:
ans = ans + 1
print(ans)
| true |
b0dc74193a48ff02f406699cbb2040c1a3ba6ed5 | Python | project-renard-survey/sarasvati | /plugins/storage/local/cache.py | UTF-8 | 1,926 | 3.109375 | 3 | [] | no_license | from sarasvati.brain import Thought
class StorageCache:
"""Storage cache"""
def __init__(self):
"""
Initializes new instance of the StorageCache class.
"""
self.thoughts = {}
self.lazy = {}
def status(self, key):
"""
Returns thought by key, None if nothing found
:type key: str
:rtype: Thought
:param key: Key
:return: Thought
"""
return self.thoughts.get(key, None), self.lazy.get(key, False)
def get(self, key):
"""
Returns thought by key, None if nothing found
:type key: str
:rtype: Thought
:param key: Key
:return: Thought
"""
return self.thoughts.get(key, None)
def add(self, thought, lazy=False):
"""
Adds thought to cache
:type lazy: bool
:type thought: Thought
:param thought: Thought
:param lazy: Is thought lazy?
"""
self.thoughts[thought.key] = thought
self.lazy[thought.key] = lazy
def remove(self, thought):
"""
Remove thought from cache
:type thought: Thought
:param thought: Thought
"""
if thought.key in self.thoughts:
del self.thoughts[thought.key]
if thought.key in self.lazy:
del self.lazy[thought.key]
def is_cached(self, key):
"""
Is thought cached?
:rtype: bool
:type key: str
:param key: Key
:return: True if thought cached
"""
return key in self.thoughts
def is_lazy(self, key):
"""
Is thought lazy?
:rtype: bool
:param key: Key
:return: True if thought lazy
"""
return self.lazy.get(key, False)
def clear(self):
"""
Clears cache
"""
self.thoughts.clear()
self.lazy.clear()
| true |
5c3de699a19e4dd88b14a8423531b984031983a9 | Python | alexdavidkim/Python3-Notes | /iterables_sequence_types/unpacking.py | UTF-8 | 3,000 | 4.46875 | 4 | [] | no_license | # All iterables, including strings can be unpacked
# a, b, c, d, e = 'hello'
# print(a, b, c, d, e)
# The Pythonic way to swap variables is below. The reason this works in Python and not languages like Java, is because Python interprets the right hand side and packs it into a tuple (a, b). Then it can unpack those items without having a temporary pointer which is required in Java. (Part 1 - Functional - Section 5:64)
# a, b = 10, 20
# print(a, b)
# b, a = a, b
# print(a, b)
# Unpacking *args and **kwargs - custom_print takes in a function, print. *args represent the objects that are passed into the standard print function. **kwargs are optional parameters where sep=' ' and end='\n' are defaults.
# def custom_print(f, *args, **kwargs):
# f(*args, **kwargs)
# custom_print(print, 'i', 'love', 'life', sep='-', end=' :) :) :)\n')
# The exception to unpacking this way are dictionaries and sets because they are unordered and therefore you can not rely on them to unpack in the order which you specify.
# my_dict = {
# 'key1': 1,
# 'key2': 2,
# 'key3': 3,
# }
# my_set = {'p', 'y', 't', 'h', 'o', 'n'}
# a, b, c = my_dict
# print(a, b, c)
# a, b, c, d, e, f = my_set
# print(a, b, c, d, e, f)
# * for unpacking ORDERED types (not dictionaries and sets)
# my_list = [1,2,3,4,5,6,7]
# a, *b = my_list
# print(a, b)
# Regardless of the iterable type, the remaining variables will be put into a list
# my_tuple = (1,2,3,4,5,6,7)
# c, *d = my_tuple
# print(c, d)
# my_str = 'hello world'
# e, f, *g = my_str
# print(e, f, g)
# Variations of below also work
# my_list = [1,2,3,4,5,6,7]
# a, b, *c, d = my_list
# print(a, b, c, d)
# Unpacking on the right hand side of an expression
# my_list_1 = [None, {'1': 1, '2': 2}, 3.14, 'hello']
# my_list_2 = [True, False, None, 'world']
# my_new_tuple = *my_list_1, *my_list_2
# print(type(my_new_tuple))
# print(my_new_tuple)
# for unpacking UNORDERED types (dictionaries and sets) - Unpacking on the left hand side the way ordered types are unpacked is pointless due to the unordered nature. However, unpacking on the right hand side is useful. (Order still not guaranteed)
# my_dict_1 = {'one': 1, 'two': 2}
# my_dict_2 = {'three': 3, 'four': 4}
# my_dict_3 = {'four': 4, 'five': 4}
# new_unpacked_list = [*my_dict_1, *my_dict_2, *my_dict_3]
# new_unpacked_set = {*my_dict_1, *my_dict_2, *my_dict_3}
# print(new_unpacked_list)
# print(new_unpacked_set)
# Nested unpacking - can perform with any iterable
# a, b, (c, d) = [1, 2, [3, 4]]
# print(a)
# print(b)
# print(c)
# print(d)
# Dummy variables - when we don't care about a particular variable (it still counts as a variable but we are indicating we don't need it)
# city, _, population = ('Beijing', 'China', 21_000_000)
# Dummy variables (more than one)
# record = ('DJIA', 2018, 1, 19, 25987.35, 26071.72, 25942.83, 26071.71)
# Instead of this
# symbol, year, month, day, open, high, low, close = record
# Do this
# symbol, year, month, day, *_, close = record
# print(*_) | true |
7cbfe88a76cce94f42bccf82a5c0aee8f976d5e2 | Python | krfurlong/hdx-python-utilities | /src/hdx/utilities/__init__.py | UTF-8 | 994 | 3.046875 | 3 | [
"MIT"
] | permissive | import sys
from uuid import UUID
import six
def raisefrom(exc_type, message, exc):
# type: (Any, str, BaseException) -> None
"""Call Python 3 raise from or emulate it for Python 2
Args:
exc_type (Any): Type of Exception
message (str): Error message to display
exc (BaseException): original exception
Returns:
None
"""
if sys.version_info[:2] >= (3, 2):
six.raise_from(exc_type(message), exc)
else:
six.reraise(exc_type, '%s - %s' % (message, exc), sys.exc_info()[2])
def is_valid_uuid(uuid_to_test, version=4):
# type: (str, int) -> bool
"""
Check if uuid_to_test is a valid UUID.
Args:
uuid_to_test (str): UUID to test for validity
version (int): UUID version. Defaults to 4.
Returns:
str: Current script's directory
"""
try:
uuid_obj = UUID(uuid_to_test, version=version)
except:
return False
return str(uuid_obj) == uuid_to_test
| true |
0a216e63b54c6fddf72b25741b1f4f25571ce95b | Python | tinyurl-com-ItsBigBrainTimeXD/backend | /handler/frontend_helper/put_helper.py | UTF-8 | 490 | 2.5625 | 3 | [
"MIT"
] | permissive | from Database.database import Database
from Core.ResponseBuilder import ResponseBuilder
def handle_put(serial_no: str, name: str, location: str, count: int, db: Database):
"""Handle the data"""
if not db.get(serial_no):
status_code = 404
else:
try:
db.update(serial_no, name, count, location)
status_code = 200
except Exception as e:
status_code = 500
return ResponseBuilder(status_code).get_response()
| true |
9a15004d03cf3825dffbec7cf1a45f4aa13e26d7 | Python | Nelg4242/StructPy | /pickle_sections.py | UTF-8 | 816 | 3 | 3 | [] | no_license | import pickle
import openpyxl as xl
def loadAISC():
wb2 = xl.load_workbook('shapes.xlsx')
item2pickle = wb2.get_sheet_by_name('Database v15.0')
return item2pickle
def database2list():
sheet = xl.load_workbook('shapes.xlsx')['Database v15.0']
data = []
labels = []
for row in sheet.iter_rows(max_row=2092):
labels.append(row[2].value)
row_data = []
for cell in row:
row_data.append(cell.value)
data.append(row_data)
return (data, labels)
def pickleObject(item2pickle, filename='pickleditem.txt'):
fileObject = open(filename, 'wb')
pickle.dump(item2pickle, fileObject)
fileObject.close()
def unPickleObject(filename):
fileObject = open(filename, 'rb')
b = pickle.load(fileObject)
return b
def main():
a = database2list()
pickleObject(a)
if __name__ == '__main__':
main()
| true |
711e1bf2751eb7a77007833a7ddc022876ff9bfb | Python | betty29/code-1 | /recipes/Python/577838_Credit_Card_Validation/recipe-577838.py | UTF-8 | 473 | 3.203125 | 3 | [
"MIT"
] | permissive | import re
def validate(number):
'Validates any credit card number using LUHN method'
number = str(number)
re.sub(r' ', '', number)
count = 0
for i in range(len(number)):
val = int(number[-(i+1)])
if i % 2 == 0:
count += val
else:
count += int(str(2 * val)[0])
if val > 5:
count += int(str(2 * val)[1])
if count % 10 == 0:
return True
else:
return False
| true |
08476faaea699e754d4e6d61e25345bee127b8bc | Python | DenisSaraev/test_troykahat | /fan.py | UTF-8 | 531 | 2.625 | 3 | [] | no_license | import troykahat
from time import sleep
PIN_AP_BUZZER = 7
PIN_AP_BUZZERs = 3
#ap = troykahat.analog_io()
#ap.pinMode(PIN_AP_BUZZER, ap.OUTPUT)
wp = troykahat.wiringpi_io()
wp.pinMode(PIN_AP_BUZZER, wp.OUTPUT)
try:
while True:
wp.digitalWrite(PIN_AP_BUZZER, True) #Hight brightness in sensor = Low brightness in LED
except KeyboardInterrupt:
print('The program was stopped by keyboard.')
finally:
wp.digitalWrite(PIN_AP_BUZZER, False)
wp.digitalWrite(PIN_AP_BUZZERs, False)
print('LED disabled.')
| true |
daa745dda0bb75bf6d4994d8ee32bd30da366451 | Python | muremwa/Python-Pyramids | /inverted_triangle.py | UTF-8 | 281 | 3.828125 | 4 | [] | no_license | """
Triangle in the following manner
********
*******
******
*****
****
***
**
*
"""
def inverted_right(num_rows):
num_rows += 1
for i in range(1, num_rows):
line = "*"*(num_rows-i)
print(line)
rows = int(input("How many rows?: "))
inverted_right(rows)
| true |
e8401bd951d15749bc666fa41dc8f27ce0e5ed65 | Python | antonioqc/Programacion1920 | /primerTrimestre/introduccion_programacion_python/2alternativas_py/ej18alt.py | UTF-8 | 990 | 4.84375 | 5 | [] | no_license | # Programa: ej18alt.py
# Propósito: Realiza un programa que pida el día de la semana (del 1 al 7) y escriba el día correspondiente.
# Si introducimos otro número nos da un error.
#
# Autor: Antonio Quesada
# Fecha: 23/10/2019.
#
# Variables a usar:
# * dia
#
# Algoritmo:
# Si el dia es 1 es lunes
# Si el dia es 2 es martes
# Si el dia es 3 es miercoles
# Si el dia es 4 es jueves
# Si el dia es 5 es viernes
# Si el dia es 6 es sabado
# Si el dia es 7 es domingo
#Petición de datos.
dia = int(input("Introduce el día de la semana (del 1 al 7): "))
print("--------------------------------------------------------------------")
#Proceso y salida.
if dia == 1:
print("Es Lunes")
elif dia == 2:
print("Es Martes")
elif dia == 3:
print("Es Miércoles")
elif dia == 4:
print("Es Jueves")
elif dia == 5:
print("Es Viernes")
elif dia == 6:
print("Es Sábado")
elif dia == 7:
print("Es Domingo")
else:
print("Numero de la semana incorrecto")
| true |
bf525375b36ce994eae6381b4975190e2564fe46 | Python | eugeneai/paropt | /socktest.py | UTF-8 | 820 | 2.75 | 3 | [] | no_license | from sympy import Symbol
from sympy.matrices import *
from sympy.printing import *
from sympy import sin, cos, Function, diff
from sympy.parsing import Maxima
x=Symbol('x')
y=Symbol('y')
f = Function('f')
M=Matrix([[x**2,y,0],[0,0,0],[2,2,2]])
DM=diff(M,x)
print ()
print(DM)
pprint (DM)
m = Maxima()
m.run_command("factor(8);")
m.run_command("factor(x^2 + 2*x*y + y^2);")
quit()
"""
import socket
HOST = '' # Symbolic name meaning the local host
PORT = 5007 # Arbitrary non-privileged port
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((HOST, PORT))
s.listen(1)
conn, addr = s.accept()
print ('Connected by', addr)
while 1:
# import pudb; pu.db
data = conn.recv(1024)
print ("Received:", data)
if not data: break
conn.send(data)
conn.close()
"""
| true |
0e52af0c89d65c32211f706640d23c67377cf1cb | Python | Printutcarsh/Stitching-images-and-detecting-humans-using-OpenCV | /stitch_detect.py | UTF-8 | 1,263 | 3 | 3 | [] | no_license | import cv2
import imutils
# Reading the Images
left = cv2.imread("left.png")
right = cv2.imread("right.png")
cv2.imshow("Image_1", left)
cv2.imshow("Image_2", right)
images = []
images.append(left)
images.append(right)
#Stitching the two images
stitcher = cv2.Stitcher.create()
ret, pano = stitcher.stitch(images)
#It will only stitch if the left and right image has something common
if ret==cv2.STITCHER_OK:
cv2.imshow("Stitched_image", pano)
# Initializing the HOG person detector
hog = cv2.HOGDescriptor()
hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
# Detecting all the regions in the image that has a pedestrians inside it
(regions, _) = hog.detectMultiScale(pano,
winStride=(4, 4),
padding=(4, 4),
scale=1.05)
# Drawing the regions in the Image
for (x, y, w, h) in regions:
cv2.rectangle(pano, (x, y),
(x + w, y + h),
(0, 0, 255), 2)
# Showing the output Image
cv2.imshow("Final_output", pano)
cv2.waitKey()
else:
print("Not possible")
cv2.destroyAllWindows()
| true |
3ad4473bcf6973583ae95c52166bd3563bfaf953 | Python | inwardik/YoutubeSearch | /Youtube/video.py | UTF-8 | 1,478 | 2.578125 | 3 | [] | no_license | import json
import requests
API_KEY = 'AIzaSyAmVfqK9tJKNKcV9ochOOSetUyb_cGKo6Y'
def get_videos_links(params):
params['key'] = API_KEY
params['part'] = 'snippet'
params['maxResults'] = str(params['maxResults'])
if params.get('publishedAfter'):
params['publishedAfter'] = params['publishedAfter'].strftime("%Y-%m-%dT%H:%M:%SZ")
if params.get('publishedBefore'):
params['publishedBefore'] = params['publishedBefore'].strftime("%Y-%m-%dT%H:%M:%SZ")
if params['location_radius']:
params['type'] = 'video'
params['location_radius'] = str(params['location_radius']) + 'km'
for key, value in params.copy().items():
if not value:
del (params[key])
print(params)
url = 'https://youtube.googleapis.com/youtube/v3/search'
r = requests.get(url, params=params)
resp_dict = json.loads(r.text)
if resp_dict.get('items'):
return resp_dict['items']
return []
def get_video_stat(video_id):
params = {'key': API_KEY, 'part': 'statistics', 'id': video_id}
url = 'https://youtube.googleapis.com/youtube/v3/videos'
r = requests.get(url, params=params)
resp_dict = json.loads(r.text)
statistics = resp_dict['items'][0]['statistics']
print(resp_dict)
return statistics
#https://youtube.googleapis.com/youtube/v3/videos?part=statistics&id=bh9Txxt8z2M&key=AIzaSyAmVfqK9tJKNKcV9ochOOSetUyb_cGKo6Y
#https://www.googleapis.com/youtube/v3/commentThreads?key=AIzaSyAmVfqK9tJKNKcV9ochOOSetUyb_cGKo6Y&textFormat=plainText&part=snippet&videoId=l3Px1lru8OI&maxResults=50
| true |
f21096d66814a7b4ab8fd3f2411be5660f9899da | Python | RobertYin-SA/CNN-binary-test-classification | /code/gen_split_words.py | UTF-8 | 1,162 | 3.078125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
author : Robert Yin
date : 2017/05/27
usage :
python gen_split_words.py --input_dir ../data/whole.txt --output_dir ../data/whole_split_words.txt
"""
import argparse
import jieba
def main(args):
num_bugs = 0
with open(args.output_dir, 'w') as output_file:
with open(args.input_dir, 'r') as input_file:
for line in input_file:
try:
_, label, sentence = line.decode('utf-8').strip().split('\t')
except Exception:
num_bugs += 1
continue
sentence_words = list(jieba.cut(sentence))
new_sentence = ' '.join(sentence_words)
output_file.write((label + '\t' + new_sentence + '\n').encode('utf-8'))
print 'There are %s bugs' % str(num_bugs)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Specify arguments')
parser.add_argument('--input_dir', help='Original classification text file path')
parser.add_argument('--output_dir', help='Output split words classification text file path')
args = parser.parse_args()
main(args)
| true |
2856d26437b664b165bed51a111992de1ef3d8cf | Python | Mrzhenc/DataManage | /MainWindow.py | UTF-8 | 4,491 | 2.53125 | 3 | [] | no_license | #!/usr/bin/python
# encoding=utf-8
from utils import *
from FuncWindow import CFuncWindow, CRegisterDlg, CForgotPassword
class CMainWindow(gtk.Window):
def __init__(self):
super(CMainWindow, self).__init__(gtk.WINDOW_TOPLEVEL)
self.fixed = gtk.Fixed()
self.func_fixed = gtk.Fixed()
self.init_window()
self.login_btn = gtk.Button('登录')
self.exit_btn = gtk.Button('关机')
self.username_entry = gtk.Entry()
self.password_entry = gtk.Entry()
self.login = False
self.__conf = CConfig(os.getcwd()+ "/conf.ini")
self.user_name = self.__conf.get('user_info', 'user_name')
self.password = self.__conf.get('user_info', 'password')
self.init_login_window()
self.init_func_window()
def init_window(self):
self.set_modal(True)
self.set_decorated(False)
self.connect("destroy", gtk.main_quit)
self.set_position(gtk.WIN_POS_CENTER)
self.set_size_request(WINDOW_X_SIZE, WINDOW_Y_SIZE)
self.set_keep_above(True)
self.set_title("医药管理系统")
image = new_image_from_name("bg.jpg")
self.fixed.put(image, 0, 0)
self.add(self.fixed)
def switch_ui(self):
if not self.login:
self.fixed.set_no_show_all(0)
self.func_fixed.set_no_show_all(1)
self.fixed.show_all()
self.func_fixed.hide()
else:
self.fixed.set_no_show_all(1)
self.func_fixed.set_no_show_all(0)
self.fixed.hide()
self.func_fixed.show_all()
def init_func_window(self):
image = new_image_from_name('bg.jpg')
self.func_fixed.put(image, 0, 0)
def init_login_window(self):
label_size_x = 100
label_size_y = 30
entry_size_x = 300
entry_size_y = 30
start_x = (WINDOW_X_SIZE-label_size_x-entry_size_x)/2
start_y = (WINDOW_Y_SIZE)/2
user_label = gtk.Label('用户名:')
user_label.set_size_request(label_size_x, label_size_y)
self.fixed.put(user_label, start_x, start_y)
self.username_entry.set_size_request(entry_size_x, entry_size_y)
self.fixed.put(self.username_entry, start_x+100, start_y)
start_y += 40
password_label = gtk.Label('密码:')
password_label.set_size_request(label_size_x, label_size_y)
self.fixed.put(password_label, start_x, start_y)
self.password_entry.set_size_request(entry_size_x, entry_size_y)
self.password_entry.set_visibility(False)
self.fixed.put(self.password_entry, start_x+100, start_y)
start_y += 40
start_x += 320
self.login_btn.set_size_request(80, 30)
self.login_btn.connect("clicked", self.btn_cb, "login")
self.fixed.put(self.login_btn, start_x, start_y)
start_x -= 90
self.exit_btn.set_size_request(80, 30)
self.exit_btn.connect("clicked", self.btn_cb, "shutdown")
self.fixed.put(self.exit_btn, start_x, start_y)
start_x -= 90
_btn = gtk.Button('忘记密码')
_btn.set_size_request(80, 30)
_btn.connect("clicked", self.btn_cb, "forgot_password")
self.fixed.put(_btn, start_x, start_y)
start_x -= 90
_btn = gtk.Button('注册')
_btn.set_size_request(80, 30)
_btn.connect("clicked", self.btn_cb, "register")
self.fixed.put(_btn, start_x, start_y)
def btn_cb(self, widget, opt):
if "shutdown" == opt:
os.system("shutdown -h now")
elif "login" == opt:
self.login_system()
elif "forgot_password" == opt:
CForgotPassword(self, self.__conf)
elif "register" == opt:
CRegisterDlg(self, self.__conf)
def login_system(self):
_password = self.password_entry.get_text()
if _password == "" or self.username_entry.get_text() == "":
CNotifyDlg('请输入用户名和密码')
return
self.password = self.__conf.get('user_info', 'password')
self.user_name = self.__conf.get('user_info', 'user_name')
if self.user_name != self.username_entry.get_text():
CNotifyDlg('用户名或密码不正确')
return
if self.password != _password:
CNotifyDlg('密码不正确')
return
CFuncWindow(self)
def run(self):
self.show_all()
gtk.main()
| true |
e0a931027134431c617e8180f3852ed31aab6ea1 | Python | abhinavgunwant/hackerrank-solutions | /Tutorials/10 Days of Statistics/Day 0 - Mean, Median, and Mode/solution_numpy.py | UTF-8 | 426 | 3.15625 | 3 | [
"MIT"
] | permissive | #### Note: This example uses numpy! It would be better to
#### consider the solution.py example for learning
#### perspective. This example here is to demonstrate
#### the ease with which programs can be written using
#### numpy...
import numpy
from scipy import stats
n = int(input())
xArr = [int(i) for i in input().split()]
X = numpy.array(xArr)
print(numpy.mean(X))
print(numpy.median(X))
print(stats.mode(X).mode[0]) | true |
d8cd2874dd758f7280e8be05e9ad5c58a3ae4476 | Python | aCoffeeYin/pyreco | /repoData/serkanyersen-underscore.py/allPythonContent.py | UTF-8 | 97,163 | 3.046875 | 3 | [] | no_license | __FILENAME__ = underscore
#!/usr/bin/env python
import inspect
from types import *
import re
import functools
import random
import time
from threading import Timer
class _IdCounter(object):
""" A Global Dictionary for uniq IDs
"""
count = 0
pass
class __(object):
"""
Use this class to alter __repr__ of
underscore object. So when you are using
it on your project it will make sense
"""
def __init__(self, repr, func):
self._repr = repr
self._func = func
functools.update_wrapper(self, func)
def __call__(self, *args, **kw):
return self._func(*args, **kw)
def __repr__(self):
return self._repr(self._func)
def u_withrepr(reprfun):
""" Decorator to rename a function
"""
def _wrap(func):
return __(reprfun, func)
return _wrap
@u_withrepr(lambda x: "<Underscore Object>")
def _(obj):
"""
_ function, which creates an instance of the underscore object,
We will also assign all methods of the underscore class as a method
to this function so that it will be usable as a static object
"""
return underscore(obj)
class underscore(object):
"""
Instead of creating a class named _ (underscore) I created underscore
So I can use _ function both statically and dynamically just it
is in the original underscore
"""
object = None
""" Passed object
"""
VERSION = "0.1.6"
chained = False
""" If the object is in a chained state or not
"""
Null = "__Null__"
"""
Since we are working with the native types
I can't compare anything with None, so I use a Substitute type for checking
"""
_wrapped = Null
"""
When object is in chained state, This property will contain the latest
processed Value of passed object, I assign it no Null so I can check
against None results
"""
def __init__(self, obj):
""" Let there be light
"""
self.chained = False
self.object = obj
class Namespace(object):
""" For simulating full closure support
"""
pass
self.Namespace = Namespace
def __str__(self):
if self.chained is True:
return "Underscore chained instance"
else:
return "Underscore instance"
def __repr__(self):
if self.chained is True:
return "<Underscore chained instance>"
else:
return "<Underscore instance>"
@property
def obj(self):
"""
Returns passed object but if chain method is used
returns the last processed result
"""
if self._wrapped is not self.Null:
return self._wrapped
else:
return self.object
@obj.setter
def obj(self, value):
""" New style classes requires setters for @propert methods
"""
self.object = value
return self.object
def _wrap(self, ret):
"""
Returns result but ig chain method is used
returns the object itself so we can chain
"""
if self.chained:
self._wrapped = ret
return self
else:
return ret
@property
def _clean(self):
"""
creates a new instance for Internal use to prevent problems
caused by chaining
"""
return _(self.obj)
def _toOriginal(self, val):
""" Pitty attempt to convert itertools result into a real object
"""
if self._clean.isTuple():
return tuple(val)
elif self._clean.isList():
return list(val)
elif self._clean.isDict():
return dict(val)
else:
return val
"""
Collection Functions
"""
def each(self, func):
"""
iterates through each item of an object
:Param: func iterator function
"""
if self._clean.isTuple() or self._clean.isList():
for index, value in enumerate(self.obj):
r = func(value, index, self.obj)
if r is "breaker":
break
else:
for index, key in enumerate(self.obj):
r = func(self.obj[key], key, self.obj, index)
if r is "breaker":
break
return self._wrap(self)
forEach = each
def map(self, func):
""" Return the results of applying the iterator to each element.
"""
ns = self.Namespace()
ns.results = []
def by(value, index, list, *args):
ns.results.append(func(value, index, list))
_(self.obj).each(by)
return self._wrap(ns.results)
collect = map
def reduce(self, func, memo=None):
"""
**Reduce** builds up a single result from a list of values,
aka `inject`, or foldl
"""
if memo is None:
memo = []
ns = self.Namespace()
ns.initial = True # arguments.length > 2
ns.memo = memo
obj = self.obj
def by(value, index, *args):
if not ns.initial:
ns.memo = value
ns.initial = True
else:
ns.memo = func(ns.memo, value, index)
_(obj).each(by)
return self._wrap(ns.memo)
foldl = inject = reduce
def reduceRight(self, func):
""" The right-associative version of reduce, also known as `foldr`.
"""
#foldr = lambda f, i: lambda s: reduce(f, s, i)
x = self.obj[:]
x.reverse()
return self._wrap(functools.reduce(func, x))
foldr = reduceRight
def find(self, func):
"""
Return the first value which passes a truth test.
Aliased as `detect`.
"""
self.ftmp = None
def test(value, index, list):
if func(value, index, list) is True:
self.ftmp = value
return True
self._clean.any(test)
return self._wrap(self.ftmp)
detect = find
def filter(self, func):
""" Return all the elements that pass a truth test.
"""
return self._wrap(list(filter(func, self.obj)))
select = filter
def reject(self, func):
""" Return all the elements for which a truth test fails.
"""
return self._wrap(list(filter(lambda val: not func(val), self.obj)))
def all(self, func=None):
""" Determine whether all of the elements match a truth test.
"""
if func is None:
func = lambda x, *args: x
self.altmp = True
def testEach(value, index, *args):
if func(value, index, *args) is False:
self.altmp = False
self._clean.each(testEach)
return self._wrap(self.altmp)
every = all
def any(self, func=None):
"""
Determine if at least one element in the object
matches a truth test.
"""
if func is None:
func = lambda x, *args: x
self.antmp = False
def testEach(value, index, *args):
if func(value, index, *args) is True:
self.antmp = True
return "breaker"
self._clean.each(testEach)
return self._wrap(self.antmp)
some = any
def include(self, target):
"""
Determine if a given value is included in the
array or object using `is`.
"""
if self._clean.isDict():
return self._wrap(target in self.obj.values())
else:
return self._wrap(target in self.obj)
contains = include
def invoke(self, method, *args):
""" Invoke a method (with arguments) on every item in a collection.
"""
def inv(value, *ar):
if (
_(method).isFunction() or
_(method).isLambda() or
_(method).isMethod()
):
return method(value, *args)
else:
return getattr(value, method)(*args)
return self._wrap(self._clean.map(inv))
def pluck(self, key):
"""
Convenience version of a common use case of
`map`: fetching a property.
"""
return self._wrap([x.get(key) for x in self.obj])
def where(self, attrs=None, first=False):
"""
Convenience version of a common use case of `filter`:
selecting only objects
containing specific `key:value` pairs.
"""
if attrs is None:
return None if first is True else []
method = _.find if first else _.filter
def by(val, *args):
for key, value in attrs.items():
try:
if attrs[key] != val[key]:
return False
except KeyError:
return False
return True
return self._wrap(method(self.obj, by))
def findWhere(self, attrs=None):
"""
Convenience version of a common use case of `find`:
getting the first object
containing specific `key:value` pairs.
"""
return self._wrap(self._clean.where(attrs, True))
def max(self):
""" Return the maximum element or (element-based computation).
"""
if(self._clean.isDict()):
return self._wrap(list())
return self._wrap(max(self.obj))
def min(self):
""" Return the minimum element (or element-based computation).
"""
if(self._clean.isDict()):
return self._wrap(list())
return self._wrap(min(self.obj))
def shuffle(self):
""" Shuffle an array.
"""
if(self._clean.isDict()):
return self._wrap(list())
cloned = self.obj[:]
random.shuffle(cloned)
return self._wrap(cloned)
def sortBy(self, val=None):
""" Sort the object's values by a criterion produced by an iterator.
"""
if val is not None:
if _(val).isString():
return self._wrap(sorted(self.obj, key=lambda x,
*args: x.get(val)))
else:
return self._wrap(sorted(self.obj, key=val))
else:
return self._wrap(sorted(self.obj))
def _lookupIterator(self, val):
""" An internal function to generate lookup iterators.
"""
if val is None:
return lambda el, *args: el
return val if _.isCallable(val) else lambda obj, *args: obj[val]
def _group(self, obj, val, behavior):
""" An internal function used for aggregate "group by" operations.
"""
ns = self.Namespace()
ns.result = {}
iterator = self._lookupIterator(val)
def e(value, index, *args):
key = iterator(value, index)
behavior(ns.result, key, value)
_.each(obj, e)
if len(ns.result) == 1:
try:
return ns.result[0]
except KeyError:
return list(ns.result.values())[0]
return ns.result
def groupBy(self, val):
"""
Groups the object's values by a criterion. Pass either a string
attribute to group by, or a function that returns the criterion.
"""
def by(result, key, value):
if key not in result:
result[key] = []
result[key].append(value)
res = self._group(self.obj, val, by)
return self._wrap(res)
def indexBy(self, val=None):
"""
Indexes the object's values by a criterion, similar to
`groupBy`, but for when you know that your index values will be unique.
"""
if val is None:
val = lambda *args: args[0]
def by(result, key, value):
result[key] = value
res = self._group(self.obj, val, by)
return self._wrap(res)
def countBy(self, val):
"""
Counts instances of an object that group by a certain criterion. Pass
either a string attribute to count by, or a function that returns the
criterion.
"""
def by(result, key, value):
if key not in result:
result[key] = 0
result[key] += 1
res = self._group(self.obj, val, by)
return self._wrap(res)
def sortedIndex(self, obj, iterator=lambda x: x):
"""
Use a comparator function to figure out the smallest index at which
an object should be inserted so as to maintain order.
Uses binary search.
"""
array = self.obj
value = iterator(obj)
low = 0
high = len(array)
while low < high:
mid = (low + high) >> 1
if iterator(array[mid]) < value:
low = mid + 1
else:
high = mid
return self._wrap(low)
def toArray(self):
""" Safely convert anything iterable into a real, live array.
"""
return self._wrap(list(self.obj))
def size(self):
""" Return the number of elements in an object.
"""
return self._wrap(len(self.obj))
def first(self, n=1):
"""
Get the first element of an array. Passing **n** will return the
first N values in the array. Aliased as `head` and `take`.
The **guard** check allows it to work with `_.map`.
"""
res = self.obj[0:n]
if len(res) is 1:
res = res[0]
return self._wrap(res)
head = take = first
def initial(self, n=1):
"""
Returns everything but the last entry of the array.
Especially useful on the arguments object.
Passing **n** will return all the values in the array, excluding the
last N. The **guard** check allows it to work with `_.map`.
"""
return self._wrap(self.obj[0:-n])
def last(self, n=1):
"""
Get the last element of an array. Passing **n** will return the last N
values in the array.
The **guard** check allows it to work with `_.map`.
"""
res = self.obj[-n:]
if len(res) is 1:
res = res[0]
return self._wrap(res)
def rest(self, n=1):
"""
Returns everything but the first entry of the array. Aliased as `tail`.
Especially useful on the arguments object.
Passing an **index** will return the rest of the values in the
array from that index onward.
The **guard** check allows it to work with `_.map`.
"""
return self._wrap(self.obj[n:])
tail = rest
def compact(self):
""" Trim out all falsy values from an array.
"""
return self._wrap(self._clean.filter(lambda x: x))
def _flatten(self, input, shallow=False, output=None):
ns = self.Namespace()
ns.output = output
if ns.output is None:
ns.output = []
def by(value, *args):
if _.isList(value) or _.isTuple(value):
if shallow:
ns.output = ns.output + value
else:
self._flatten(value, shallow, ns.output)
else:
ns.output.append(value)
_.each(input, by)
return ns.output
def flatten(self, shallow=None):
""" Return a completely flattened version of an array.
"""
return self._wrap(self._flatten(self.obj, shallow))
def without(self, *values):
"""
Return a version of the array that does not
contain the specified value(s).
"""
if self._clean.isDict():
newlist = {}
for i, k in enumerate(self.obj):
# if k not in values: # use indexof to check identity
if _(values).indexOf(k) is -1:
newlist.set(k, self.obj[k])
else:
newlist = []
for i, v in enumerate(self.obj):
# if v not in values: # use indexof to check identity
if _(values).indexOf(v) is -1:
newlist.append(v)
return self._wrap(newlist)
def partition(self, predicate=None):
"""
Split an array into two arrays: one whose elements all satisfy the given
predicate, and one whose elements all do not satisfy the predicate.
"""
predicate = self._lookupIterator(predicate)
pass_list = []
fail_list = []
def by(elem, index, *args):
(pass_list if predicate(elem) else fail_list).append(elem)
_.each(self.obj, by)
return self._wrap([pass_list, fail_list])
def uniq(self, isSorted=False, iterator=None):
"""
Produce a duplicate-free version of the array. If the array has already
been sorted, you have the option of using a faster algorithm.
Aliased as `unique`.
"""
ns = self.Namespace()
ns.results = []
ns.array = self.obj
initial = self.obj
if iterator is not None:
initial = _(ns.array).map(iterator)
def by(memo, value, index):
if ((_.last(memo) != value or
not len(memo)) if isSorted else not _.include(memo, value)):
memo.append(value)
ns.results.append(ns.array[index])
return memo
ret = _.reduce(initial, by)
return self._wrap(ret)
# seen = set()
# seen_add = seen.add
# ret = [x for x in seq if x not in seen and not seen_add(x)]
# return self._wrap(ret)
unique = uniq
def union(self, *args):
"""
Produce an array that contains the union: each distinct element
from all of the passed-in arrays.
"""
# setobj = set(self.obj)
# for i, v in enumerate(args):
# setobj = setobj + set(args[i])
# return self._wrap(self._clean._toOriginal(setobj))
args = list(args)
args.insert(0, self.obj)
return self._wrap(_.uniq(self._flatten(args, True, [])))
def intersection(self, *args):
"""
Produce an array that contains every item shared between all the
passed-in arrays.
"""
if type(self.obj[0]) is int:
a = self.obj
else:
a = tuple(self.obj[0])
setobj = set(a)
for i, v in enumerate(args):
setobj = setobj & set(args[i])
return self._wrap(list(setobj))
def difference(self, *args):
"""
Take the difference between one array and a number of other arrays.
Only the elements present in just the first array will remain.
"""
setobj = set(self.obj)
for i, v in enumerate(args):
setobj = setobj - set(args[i])
return self._wrap(self._clean._toOriginal(setobj))
def zip(self, *args):
"""
Zip together multiple lists into a single array -- elements that share
an index go together.
"""
args = list(args)
args.insert(0, self.obj)
maxLen = _(args).chain().collect(lambda x, *args: len(x)).max().value()
for i, v in enumerate(args):
l = len(args[i])
if l < maxLen:
args[i]
for x in range(maxLen - l):
args[i].append(None)
return self._wrap(zip(*args))
def zipObject(self, values):
"""
Zip together two arrays -- an array of keys and an array
of values -- into a single object.
"""
result = {}
keys = self.obj
i = 0
l = len(keys)
while i < l:
result[keys[i]] = values[i]
l = len(keys)
i += 1
return self._wrap(result)
def indexOf(self, item, isSorted=False):
"""
Return the position of the first occurrence of an
item in an array, or -1 if the item is not included in the array.
"""
array = self.obj
ret = -1
if not (self._clean.isList() or self._clean.isTuple()):
return self._wrap(-1)
if isSorted:
i = _.sortedIndex(array, item)
ret = i if array[i] is item else -1
else:
i = 0
l = len(array)
while i < l:
if array[i] is item:
return self._wrap(i)
i += 1
return self._wrap(ret)
def lastIndexOf(self, item):
"""
Return the position of the last occurrence of an
item in an array, or -1 if the item is not included in the array.
"""
array = self.obj
i = len(array) - 1
if not (self._clean.isList() or self._clean.isTuple()):
return self._wrap(-1)
while i > -1:
if array[i] is item:
return self._wrap(i)
i -= 1
return self._wrap(-1)
def range(self, *args):
""" Generate an integer Array containing an arithmetic progression.
"""
args = list(args)
args.insert(0, self.obj)
return self._wrap(range(*args))
"""
Function functions
"""
def bind(self, context):
"""
Create a function bound to a given object (assigning `this`,
and arguments, optionally).
Binding with arguments is also known as `curry`.
"""
return self._wrap(self.obj)
curry = bind
def partial(self, *args):
"""
Partially apply a function by creating a version that has had some of
its arguments pre-filled, without changing its dynamic `this` context.
"""
def part(*args2):
args3 = args + args2
return self.obj(*args3)
return self._wrap(part)
def bindAll(self, *args):
"""
Bind all of an object's methods to that object.
Useful for ensuring that all callbacks defined on an
object belong to it.
"""
return self._wrap(self.obj)
def memoize(self, hasher=None):
""" Memoize an expensive function by storing its results.
"""
ns = self.Namespace()
ns.memo = {}
if hasher is None:
hasher = lambda x: x
def memoized(*args, **kwargs):
key = hasher(*args)
if key not in ns.memo:
ns.memo[key] = self.obj(*args, **kwargs)
return ns.memo[key]
return self._wrap(memoized)
def delay(self, wait, *args):
"""
Delays a function for the given number of milliseconds, and then calls
it with the arguments supplied.
"""
def call_it():
self.obj(*args)
t = Timer((float(wait) / float(1000)), call_it)
t.start()
return self._wrap(self.obj)
def defer(self, *args):
"""
Defers a function, scheduling it to run after
the current call stack has cleared.
"""
# I know! this isn't really a defer in python. I'm open to suggestions
return self.delay(1, *args)
def throttle(self, wait):
"""
Returns a function, that, when invoked, will only be triggered
at most once during a given window of time.
"""
ns = self.Namespace()
ns.timeout = None
ns.throttling = None
ns.more = None
ns.result = None
def done():
ns.more = ns.throttling = False
whenDone = _.debounce(done, wait)
wait = (float(wait) / float(1000))
def throttled(*args, **kwargs):
def later():
ns.timeout = None
if ns.more:
self.obj(*args, **kwargs)
whenDone()
if not ns.timeout:
ns.timeout = Timer(wait, later)
ns.timeout.start()
if ns.throttling:
ns.more = True
else:
ns.throttling = True
ns.result = self.obj(*args, **kwargs)
whenDone()
return ns.result
return self._wrap(throttled)
# https://gist.github.com/2871026
def debounce(self, wait, immediate=None):
"""
Returns a function, that, as long as it continues to be invoked,
will not be triggered. The function will be called after it stops
being called for N milliseconds. If `immediate` is passed, trigger
the function on the leading edge, instead of the trailing.
"""
wait = (float(wait) / float(1000))
def debounced(*args, **kwargs):
def call_it():
self.obj(*args, **kwargs)
try:
debounced.t.cancel()
except(AttributeError):
pass
debounced.t = Timer(wait, call_it)
debounced.t.start()
return self._wrap(debounced)
def once(self):
"""
Returns a function that will be executed at most one time,
no matter how often you call it. Useful for lazy initialization.
"""
ns = self.Namespace()
ns.memo = None
ns.run = False
def work_once(*args, **kwargs):
if ns.run is False:
ns.memo = self.obj(*args, **kwargs)
ns.run = True
return ns.memo
return self._wrap(work_once)
def wrap(self, wrapper):
"""
Returns the first function passed as an argument to the second,
allowing you to adjust arguments, run code before and after, and
conditionally execute the original function.
"""
def wrapped(*args, **kwargs):
if kwargs:
kwargs["object"] = self.obj
else:
args = list(args)
args.insert(0, self.obj)
return wrapper(*args, **kwargs)
return self._wrap(wrapped)
def compose(self, *args):
"""
Returns a function that is the composition of a list of functions, each
consuming the return value of the function that follows.
"""
args = list(args)
def composed(*ar, **kwargs):
lastRet = self.obj(*ar, **kwargs)
for i in args:
lastRet = i(lastRet)
return lastRet
return self._wrap(composed)
def after(self, func):
"""
Returns a function that will only be executed after being
called N times.
"""
ns = self.Namespace()
ns.times = self.obj
if ns.times <= 0:
return func()
def work_after(*args):
if ns.times <= 1:
return func(*args)
ns.times -= 1
return self._wrap(work_after)
"""
Object Functions
"""
def keys(self):
""" Retrieve the names of an object's properties.
"""
return self._wrap(self.obj.keys())
def values(self):
""" Retrieve the values of an object's properties.
"""
return self._wrap(self.obj.values())
def pairs(self):
""" Convert an object into a list of `[key, value]` pairs.
"""
keys = self._clean.keys()
pairs = []
for key in keys:
pairs.append([key, self.obj[key]])
return self._wrap(pairs)
def invert(self):
"""
Invert the keys and values of an object.
The values must be serializable.
"""
keys = self._clean.keys()
inverted = {}
for key in keys:
inverted[self.obj[key]] = key
return self._wrap(inverted)
def functions(self):
""" Return a sorted list of the function names available on the object.
"""
names = []
for i, k in enumerate(self.obj):
if _(self.obj[k]).isCallable():
names.append(k)
return self._wrap(sorted(names))
methods = functions
def extend(self, *args):
"""
Extend a given object with all the properties in
passed-in object(s).
"""
args = list(args)
for i in args:
self.obj.update(i)
return self._wrap(self.obj)
def pick(self, *args):
"""
Return a copy of the object only containing the
whitelisted properties.
"""
ns = self.Namespace()
ns.result = {}
def by(key, *args):
if key in self.obj:
ns.result[key] = self.obj[key]
_.each(self._flatten(args, True, []), by)
return self._wrap(ns.result)
def omit(self, *args):
copy = {}
keys = _(args).flatten()
for i, key in enumerate(self.obj):
if not _.include(keys, key):
copy[key] = self.obj[key]
return self._wrap(copy)
def defaults(self, *args):
""" Fill in a given object with default properties.
"""
ns = self.Namespace
ns.obj = self.obj
def by(source, *ar):
for i, prop in enumerate(source):
if prop not in ns.obj:
ns.obj[prop] = source[prop]
_.each(args, by)
return self._wrap(ns.obj)
def clone(self):
""" Create a (shallow-cloned) duplicate of an object.
"""
import copy
return self._wrap(copy.copy(self.obj))
def tap(self, interceptor):
"""
Invokes interceptor with the obj, and then returns obj.
The primary purpose of this method is to "tap into" a method chain, in
order to perform operations on intermediate results within the chain.
"""
interceptor(self.obj)
return self._wrap(self.obj)
def isEqual(self, match):
""" Perform a deep comparison to check if two objects are equal.
"""
return self._wrap(self.obj == match)
def isEmpty(self):
"""
Is a given array, string, or object empty?
An "empty" object has no enumerable own-properties.
"""
if self.obj is None:
return True
if self._clean.isString():
ret = self.obj.strip() is ""
elif self._clean.isDict():
ret = len(self.obj.keys()) == 0
else:
ret = len(self.obj) == 0
return self._wrap(ret)
def isElement(self):
""" No use in python
"""
return self._wrap(False)
def isDict(self):
""" Check if given object is a dictionary
"""
return self._wrap(type(self.obj) is dict)
def isTuple(self):
""" Check if given object is a tuple
"""
return self._wrap(type(self.obj) is tuple)
def isList(self):
""" Check if given object is a list
"""
return self._wrap(type(self.obj) is list)
def isNone(self):
""" Check if the given object is None
"""
return self._wrap(self.obj is None)
def isType(self):
""" Check if the given object is a type
"""
return self._wrap(type(self.obj) is type)
def isBoolean(self):
""" Check if the given object is a boolean
"""
return self._wrap(type(self.obj) is bool)
isBool = isBoolean
def isInt(self):
""" Check if the given object is an int
"""
return self._wrap(type(self.obj) is int)
# :DEPRECATED: Python 2 only.
# 3 removes this.
def isLong(self):
""" Check if the given object is a long
"""
return self._wrap(type(self.obj) is long)
def isFloat(self):
""" Check if the given object is a float
"""
return self._wrap(type(self.obj) is float)
def isComplex(self):
""" Check if the given object is a complex
"""
return self._wrap(type(self.obj) is complex)
def isString(self):
""" Check if the given object is a string
"""
return self._wrap(type(self.obj) is str)
def isUnicode(self):
""" Check if the given object is a unicode string
"""
return self._wrap(type(self.obj) is unicode)
def isCallable(self):
""" Check if the given object is any of the function types
"""
return self._wrap(callable(self.obj))
def isFunction(self):
""" Check if the given object is FunctionType
"""
return self._wrap(type(self.obj) is FunctionType)
def isLambda(self):
""" Check if the given object is LambdaType
"""
return self._wrap(type(self.obj) is LambdaType)
def isGenerator(self):
""" Check if the given object is GeneratorType
"""
return self._wrap(type(self.obj) is GeneratorType)
def isCode(self):
""" Check if the given object is CodeType
"""
return self._wrap(type(self.obj) is CodeType)
def isClass(self):
""" Check if the given object is ClassType
"""
return self._wrap(inspect.isclass(self.obj))
# :DEPRECATED: Python 2 only.
# 3 removes this.
def isInstance(self):
""" Check if the given object is InstanceType
"""
return self._wrap(type(self.obj) is InstanceType)
def isMethod(self):
""" Check if the given object is MethodType
"""
return self._wrap(inspect.ismethod(self.obj))
# :DEPRECATED: Python 2 only.
# 3 removes this.
def isUnboundMethod(self):
""" Check if the given object is UnboundMethodType
"""
return self._wrap(type(self.obj) is UnboundMethodType)
def isBuiltinFunction(self):
""" Check if the given object is BuiltinFunctionType
"""
return self._wrap(type(self.obj) is BuiltinFunctionType)
def isBuiltinMethod(self):
""" Check if the given object is BuiltinMethodType
"""
return self._wrap(type(self.obj) is BuiltinMethodType)
def isModule(self):
""" Check if the given object is ModuleType
"""
return self._wrap(type(self.obj) is ModuleType)
def isFile(self):
""" Check if the given object is a file
"""
try:
filetype = file
except NameError:
filetype = io.IOBase
return self._wrap(type(self.obj) is filetype)
# :DEPRECATED: Python 2 only.
# 3 removes this.
def isXRange(self):
""" Check if the given object is XRangeType
"""
return self._wrap(type(self.obj) is XRangeType)
def isSlice(self):
""" Check if the given object is SliceType
"""
return self._wrap(type(self.obj) is type(slice))
def isEllipsis(self):
""" Check if the given object is EllipsisType
"""
return self._wrap(type(self.obj) is type(Ellipsis))
def isTraceback(self):
""" Check if the given object is TracebackType
"""
return self._wrap(type(self.obj) is TracebackType)
def isFrame(self):
""" Check if the given object is FrameType
"""
return self._wrap(type(self.obj) is FrameType)
# :DEPRECATED: Python 2 only.
# 3 uses memoryview.
def isBuffer(self):
""" Check if the given object is BufferType
"""
return self._wrap(type(self.obj) is BufferType)
# :DEPRECATED: Python 2 only.
# 3 uses mappingproxy.
def isDictProxy(self):
""" Check if the given object is DictProxyType
"""
return self._wrap(type(self.obj) is DictProxyType)
def isNotImplemented(self):
""" Check if the given object is NotImplementedType
"""
return self._wrap(type(self.obj) is type(NotImplemented))
def isGetSetDescriptor(self):
""" Check if the given object is GetSetDescriptorType
"""
return self._wrap(type(self.obj) is GetSetDescriptorType)
def isMemberDescriptor(self):
""" Check if the given object is MemberDescriptorType
"""
return self._wrap(type(self.obj) is MemberDescriptorType)
def has(self, key):
"""
Shortcut function for checking if an object has a
given property directly on itself (in other words, not on a prototype).
"""
return self._wrap(hasattr(self.obj, key))
def join(self, glue=" "):
""" Javascript's join implementation
"""
j = glue.join([str(x) for x in self.obj])
return self._wrap(j)
def constant(self, *args):
""" High order of identity
"""
return self._wrap(lambda *args: self.obj)
def identity(self, *args):
""" Keep the identity function around for default iterators.
"""
return self._wrap(self.obj)
def property(self):
"""
For easy creation of iterators that pull
specific properties from objects.
"""
return self._wrap(lambda obj, *args: obj[self.obj])
def matches(self):
"""
Returns a predicate for checking whether an object has a given
set of `key:value` pairs.
"""
def ret(obj, *args):
if self.obj is obj:
return True # avoid comparing an object to itself.
for key in self.obj:
if self.obj[key] != obj[key]:
return False
return True
return self._wrap(ret)
def times(self, func, *args):
""" Run a function **n** times.
"""
n = self.obj
i = 0
while n is not 0:
n -= 1
func(i)
i += 1
return self._wrap(func)
def now(self):
return self._wrap(time.time())
def random(self, max_number=None):
""" Return a random integer between min and max (inclusive).
"""
min_number = self.obj
if max_number is None:
min_number = 0
max_number = self.obj
return random.randrange(min_number, max_number)
def result(self, property, *args):
"""
If the value of the named property is a function then invoke it;
otherwise, return it.
"""
if self.obj is None:
return self._wrap(self.obj)
if(hasattr(self.obj, property)):
value = getattr(self.obj, property)
else:
value = self.obj.get(property)
if _.isCallable(value):
return self._wrap(value(*args))
return self._wrap(value)
def mixin(self):
"""
Add your own custom functions to the Underscore object, ensuring that
they're correctly added to the OOP wrapper as well.
"""
methods = self.obj
for i, k in enumerate(methods):
setattr(underscore, k, methods[k])
self.makeStatic()
return self._wrap(self.obj)
def uniqueId(self, prefix=""):
"""
Generate a unique integer id (unique within the entire client session).
Useful for temporary DOM ids.
"""
_IdCounter.count += 1
id = _IdCounter.count
if prefix:
return self._wrap(prefix + str(id))
else:
return self._wrap(id)
_html_escape_table = {
"&": "&",
'"': """,
"'": "'",
">": ">",
"<": "<",
}
def escape(self):
""" Escape a string for HTML interpolation.
"""
# & must be handled first
self.obj = self.obj.replace("&", self._html_escape_table["&"])
for i, k in enumerate(self._html_escape_table):
v = self._html_escape_table[k]
if k is not "&":
self.obj = self.obj.replace(k, v)
return self._wrap(self.obj)
def unescape(self):
"""
Within an interpolation, evaluation, or escaping, remove HTML escaping
that had been previously added.
"""
for i, k in enumerate(self._html_escape_table):
v = self._html_escape_table[k]
self.obj = self.obj.replace(v, k)
return self._wrap(self.obj)
"""
Template Code will be here
"""
templateSettings = {
"evaluate": r"<%([\s\S]+?)%>",
"interpolate": r"<%=([\s\S]+?)%>",
"escape": r"<%-([\s\S]+?)%>"
}
escapes = {
'\\': '\\',
"'": r"'",
"r": r'\r',
"n": r'\n',
"t": r'\t',
"u2028": r'\u2028',
"u2029": r'\u2029',
r'\\': '\\',
r"'": "'",
'br': "r",
'bn': "n",
'bt': "t",
'bu2028': "u2028",
'bu2029': "u2029"
}
def template(self, data=None, settings=None):
"""
Python micro-templating, similar to John Resig's implementation.
Underscore templating handles arbitrary delimiters, preserves
whitespace, and correctly escapes quotes within interpolated code.
"""
if settings is None:
settings = {}
ts = _.templateSettings
_.defaults(ts, self.templateSettings)
_.extend(settings, ts)
# settings = {
# "interpolate": self.templateSettings.get('interpolate'),
# "evaluate": self.templateSettings.get('evaluate'),
# "escape": self.templateSettings.get('escape')
# }
_.extend(settings, {
"escaper": r"\\|'|\r|\n|\t|\u2028|\u2029",
"unescaper": r"\\(\\|'|r|n|t|u2028|u2029)"
})
src = self.obj
#src = re.sub('"', r'\"', src)
#src = re.sub(r'\\', r"\\", src)
ns = self.Namespace()
ns.indent_level = 1
def unescape(code):
def unescapes(matchobj):
a = re.sub("^[\'\"]|[\'\"]$", "", ("%r" % matchobj.group(1)))
# Python doesn't accept \n as a key
if a == '\n':
a = "bn"
if a == '\r':
a = "br"
if a == '\t':
a = "bt"
if a == '\u2028':
a = 'bu2028'
if a == '\u2029':
a = 'bu2029'
return self.escapes[a]
return re.sub(settings.get('unescaper'), unescapes, code)
def escapes(matchobj):
a = matchobj.group(0)
# Python doesn't accept \n as a key
if a == '\n':
a = "bn"
if a == '\r':
a = "br"
if a == '\t':
a = "bt"
if a == '\u2028':
a = 'bu2028'
if a == '\u2029':
a = 'bu2029'
return '\\' + self.escapes[a]
def indent(n=None):
if n is not None:
ns.indent_level += n
return " " * ns.indent_level
def interpolate(matchobj):
if getattr(str, 'decode', False):
key = (matchobj.group(1).decode('string-escape')).strip()
else:
key = (bytes(matchobj.group(1), "utf-8").decode()).strip()
return "' + str(" + unescape(key) + " or '') + '"
def evaluate(matchobj):
if getattr(str, 'decode', False):
code = (matchobj.group(1).decode('string-escape')).strip()
else:
code = (bytes(matchobj.group(1), "utf-8").decode()).strip()
if code.startswith("end"):
return "')\n" + indent(-1) + "ns.__p += ('"
elif code.endswith(':'):
return "')\n" + indent() + unescape(code) + \
"\n" + indent(+1) + "ns.__p += ('"
else:
return "')\n" + indent() + unescape(code) + \
"\n" + indent() + "ns.__p += ('"
def escape(matchobj):
if getattr(str, 'decode', False):
key = (matchobj.group(1).decode('string-escape')).strip()
else:
key = (bytes(matchobj.group(1), "utf-8").decode()).strip()
return "' + _.escape(str(" + unescape(key) + " or '')) + '"
source = indent() + 'class closure(object):\n pass' + \
' # for full closure support\n'
source += indent() + 'ns = closure()\n'
source += indent() + "ns.__p = ''\n"
#src = re.sub("^[\'\"]|[\'\"]$", "", ("%r" % src))
src = re.sub(settings.get("escaper"), escapes, src)
source += indent() + "ns.__p += ('" + \
re.sub(settings.get('escape'), escape, src) + "')\n"
source = re.sub(settings.get('interpolate'), interpolate, source)
source = re.sub(settings.get('evaluate'), evaluate, source)
if getattr(str, 'decode', False):
source += indent() + 'return ns.__p.decode("string_escape")\n'
else:
source += indent() + 'return bytes(ns.__p, "utf-8").decode()\n'
f = self.create_function(settings.get("variable")
or "obj=None", source)
if data is not None:
return f(data)
return f
def create_function(self, args, source):
source = "global func\ndef func(" + args + "):\n" + source + "\n"
ns = self.Namespace()
try:
code = compile(source, '', 'exec')
exec(code) in globals(), locals()
except:
print(source)
raise Exception("template error")
ns.func = func
def _wrap(obj={"this": ""}):
for i, k in enumerate(obj):
if getattr(ns.func, 'func_globals', False):
ns.func.func_globals[k] = obj[k]
else:
ns.func.__globals__[k] = obj[k]
return ns.func(obj)
_wrap.source = source
return _wrap
def chain(self):
""" Add a "chain" function, which will delegate to the wrapper.
"""
self.chained = True
return self
def value(self):
""" returns the object instead of instance
"""
if self._wrapped is not self.Null:
return self._wrapped
else:
return self.obj
@staticmethod
def makeStatic():
""" Provide static access to underscore class
"""
p = lambda value: inspect.ismethod(value) or inspect.isfunction(value)
for eachMethod in inspect.getmembers(underscore,
predicate=p):
m = eachMethod[0]
if not hasattr(_, m):
def caller(a):
def execute(*args):
if len(args) == 1:
r = getattr(underscore(args[0]), a)()
elif len(args) > 1:
rargs = args[1:]
r = getattr(underscore(args[0]), a)(*rargs)
else:
r = getattr(underscore([]), a)()
return r
return execute
_.__setattr__(m, caller(m))
# put the class itself as a parameter so that we can use it on outside
_.__setattr__("underscore", underscore)
_.templateSettings = {}
# Imediatelly create static object
underscore.makeStatic()
# The end
########NEW FILE########
__FILENAME__ = test_arrays
import unittest
from unittesthelper import init
init() # will let you import modules from upper folder
from src.underscore import _
class TestArrays(unittest.TestCase):
def test_first(self):
res = _([1, 2, 3, 4, 5]).first()
self.assertEqual(1, res, "first one item did not work")
res = _([1, 2, 3, 4, 5]).first(3)
self.assertEqual([1, 2, 3], res, "first multi item did not wok")
def test_initial(self):
res = _([1, 2, 3, 4, 5]).initial()
self.assertEqual([1, 2, 3, 4], res, "initial one item did not work")
res = _([1, 2, 3, 4, 5]).initial(3)
self.assertEqual([1, 2], res, "initial multi item did not wok")
def test_last(self):
res = _([1, 2, 3, 4, 5]).last()
self.assertEqual(5, res, "last one item did not work")
res = _([1, 2, 3, 4, 5]).last(3)
self.assertEqual([3, 4, 5], res, "last multi item did not wok")
def test_rest(self):
res = _([1, 2, 3, 4, 5]).rest()
self.assertEqual([2, 3, 4, 5], res, "rest one item did not work")
res = _([1, 2, 3, 4, 5]).rest(3)
self.assertEqual([4, 5], res, "rest multi item did not wok")
def test_compact(self):
res = _([False, 1, 0, "foo", None, -1]).compact()
self.assertEqual([1, "foo", -1], res, "compact did not work")
def test_flatten(self):
llist = [1, [2], [3, [[[4]]]]]
self.assertEqual(_.flatten(llist),
[1, 2, 3, 4], 'can flatten nested arrays')
self.assertEqual(_.flatten(llist, True),
[1, 2, 3, [[[4]]]], 'can shallowly'
' flatten nested arrays')
def test_uniq(self):
tlist = [1, 2, 1, 3, 1, 4]
self.assertEqual([1, 2, 3, 4], _.uniq(tlist),
'can find the unique values of an unsorted array')
tlist = [1, 1, 1, 2, 2, 3]
self.assertEqual([1, 2, 3], _.uniq(tlist, True),
'can find the unique values of a sorted array faster')
tlist = [{"name": 'moe'}, {"name": 'curly'},
{"name": 'larry'}, {"name": 'curly'}]
iterator = lambda value, *args: value.get('name')
self.assertEqual(
["moe", "curly", "larry"], _.uniq(tlist, False, iterator),
'can find the unique values of an array using a custom iterator')
tlist = [1, 2, 2, 3, 4, 4]
iterator = lambda value, *args: value + 1
self.assertEqual([2, 3, 4, 5], _.uniq(tlist, True, iterator),
'iterator works with sorted array')
def test_without(self):
tlist = [1, 2, 1, 0, 3, 1, 4]
self.assertEqual([2, 3, 4], _.without(tlist, 0, 1),
'can remove all instances of an object')
tlist = [{"one": 1}, {"two": 2}]
self.assertTrue(len(_.without(tlist, {"one": 1}))
== 2, 'uses real object identity for comparisons.')
self.assertTrue(len(_.without(tlist, tlist[0])) == 1, 'ditto.')
def test_intersection(self):
stooges = ['moe', 'curly', 'larry'],
leaders = ['moe', 'groucho']
self.assertEqual(['moe'], _.intersection(stooges, leaders),
'can take the set intersection of two string arrays')
self.assertEqual(
[1, 2], _.intersection([1, 2, 3], [101, 2, 1, 10], [2, 1]),
'can take the set intersection of two int arrays')
self.assertEqual(['moe'], _(stooges).intersection(leaders),
'can perform an OO-style intersection')
def test_union(self):
result = _.union([1, 2, 3], [2, 30, 1], [1, 40])
self.assertEqual([1, 2, 3, 30, 40], result,
'takes the union of a list of arrays')
result = _.union([1, 2, 3], [2, 30, 1], [1, 40, [1]])
self.assertEqual([1, 2, 3, 30, 40, [1]], result,
'takes the union of a list of nested arrays')
def test_difference(self):
result = _.difference([1, 2, 3], [2, 30, 40])
self.assertEqual([1, 3], result, 'takes the difference of two arrays')
result = _.difference([1, 2, 3, 4], [2, 30, 40], [1, 11, 111])
self.assertEqual([3, 4], result,
'takes the difference of three arrays')
def test_zip(self):
names = ['moe', 'larry', 'curly']
ages = [30, 40, 50]
leaders = [True]
stooges = list(_(names).zip(ages, leaders))
self.assertEqual("[('moe', 30, True), ('larry', 40, None),"
" ('curly', 50, None)]", str(
stooges), 'zipped together arrays of different lengths')
def test_zipObject(self):
result = _.zipObject(['moe', 'larry', 'curly'], [30, 40, 50])
shouldBe = {"moe": 30, "larry": 40, "curly": 50}
self.assertEqual(result, shouldBe,
"two arrays zipped together into an object")
def test_indexOf(self):
numbers = [1, 2, 3]
self.assertEqual(_.indexOf(numbers, 2), 1,
'can compute indexOf, even '
'without the native function')
self.assertEqual(_.indexOf(None, 2), -1, 'handles nulls properly')
numbers = [10, 20, 30, 40, 50]
num = 35
index = _.indexOf(numbers, num, True)
self.assertEqual(index, -1, '35 is not in the list')
numbers = [10, 20, 30, 40, 50]
num = 40
index = _.indexOf(numbers, num, True)
self.assertEqual(index, 3, '40 is in the list')
numbers = [1, 40, 40, 40, 40, 40, 40, 40, 50, 60, 70]
num = 40
index = _.indexOf(numbers, num, True)
self.assertEqual(index, 1, '40 is in the list')
def test_lastIndexOf(self):
numbers = [2, 1, 0, 1, 0, 0, 1, 0, 0, 0]
self.assertEqual(_.lastIndexOf(numbers, 1), 6,
'can compute lastIndexOf, '
'even without the native function')
self.assertEqual(_.lastIndexOf(numbers, 0), 9,
'lastIndexOf the other element')
self.assertEqual(_.lastIndexOf(numbers, 2), 0,
'lastIndexOf the other element')
self.assertEqual(_.indexOf(None, 2), -1, 'handles nulls properly')
def test_range(self):
self.assertEqual(
list(_.range(0)), [], 'range with 0 as a first argument'
' generates an empty array')
self.assertEqual(list(_.range(4)), [0, 1, 2, 3],
'range with a single positive argument generates'
' an array of elements 0,1,2,...,n-1')
self.assertEqual(list(_.range(5, 8)),
[5, 6, 7], 'range with two arguments a & b,'
' a<b generates an array of elements '
' a,a+1,a+2,...,b-2,b-1')
self.assertEqual(list(_.range(8, 5)),
[], 'range with two arguments a & b, b<a'
' generates an empty array')
self.assertEqual(list(_.range(3, 10, 3)),
[3, 6, 9], 'range with three arguments a & b'
' & c, c < b-a, a < b generates an array '
' of elements a,a+c,a+2c,...,b - (multiplier of a) '
' < c')
self.assertEqual(list(_.range(3, 10, 15)),
[3], 'range with three arguments a & b &'
' c, c > b-a, a < b generates an array with '
'a single element, equal to a')
self.assertEqual(list(_.range(12, 7, -2)), [12, 10, 8],
'range with three arguments a & b & c, a'
' > b, c < 0 generates an array of elements'
' a,a-c,a-2c and ends with the number not less than b')
self.assertEqual(list(_.range(0, -10, -1)),
[0, -1, -2, -3, -4, -5, -6, -7, -8, -9], 'final'
' example in the Python docs')
if __name__ == "__main__":
print("run these tests by executing `python -m unittest"
" discover` in unittests folder")
unittest.main()
########NEW FILE########
__FILENAME__ = test_collections
import unittest
from unittesthelper import init
init() # will let you import modules from upper folder
from src.underscore import _
class TestCollections(unittest.TestCase):
eachList = []
def test_each_list(self):
def eachTest(val, *args):
self.eachList.append(val + 1)
_([1, 2, 3, 4]).each(eachTest)
self.assertEqual([2, 3, 4, 5], self.eachList,
"each for lists did not work for all")
# test alias
self.eachList = []
_([1, 2, 3, 4]).forEach(eachTest)
self.assertEqual([2, 3, 4, 5], self.eachList,
"forEach for lists did not work for all")
eachSet = set()
def test_each_dict(self):
def eachTest(val, key, *args):
self.eachSet.add(val)
self.eachSet.add(key)
_({"foo": "bar", "fizz": "buzz"}).each(eachTest)
self.assertEqual({"foo", "bar", "fizz", "buzz"},
self.eachSet, "each for dicts did not work for all")
# alias
self.eachSet = set()
_({"foo": "bar", "fizz": "buzz"}).forEach(eachTest)
self.assertEqual({"foo", "bar", "fizz", "buzz"},
self.eachSet, "forEach for dicts did"
"not work for all")
def test_map_list(self):
def mapTest(val, *args):
return val * 2
map = _([1, 2, 3, 4]).map(mapTest)
self.assertEqual([2, 4, 6, 8], map, "map for list did not work")
# alias
map = _([1, 2, 3, 4]).collect(mapTest)
self.assertEqual([2, 4, 6, 8], map, "collect for list did not work")
def test_map_dict(self):
def mapTest(val, key, *args):
return val.upper()
map = _({"foo": "bar", "bar": "foo"}).map(mapTest)
self.assertEqual({"BAR", "FOO"}, set(map),
"map for dicts did not work")
# alias
map = _({"foo": "bar", "bar": "foo"}).collect(mapTest)
self.assertEqual({"BAR", "FOO"}, set(map),
"collect for dicts did not work")
def test_reduce(self):
res = _([1, 2, 3, 4, 5, 6]).reduce(
lambda sum, num, *args: sum + num, 0)
self.assertEqual(21, res, "did not reduced correctly")
# alias
res = _([1, 2, 3, 4, 5, 6]).foldl(lambda sum, num, *args: sum + num, 0)
self.assertEqual(21, res, "did not foldl correctly")
# alias
res = _([1, 2, 3, 4, 5, 6]).inject(
lambda sum, num, *args: sum + num, 0)
self.assertEqual(21, res, "did not inject correctly")
def test_reduce_right(self):
res = _(["foo", "bar", "baz"]).reduceRight(
lambda sum, num, *args: sum + num)
self.assertEqual("bazbarfoo", res, "did not reducedRight correctly")
# alias
res = _(["foo", "bar", "baz"]).foldr(lambda sum, num, *args: sum + num)
self.assertEqual("bazbarfoo", res, "did not foldr correctly")
def test_find(self):
res = _([1, 2, 3, 4, 5]).find(lambda x, *args: x > 2)
self.assertEqual(3, res, "find didn't work")
# alias
res = _([1, 2, 3, 4, 5]).detect(lambda x, *args: x > 2)
self.assertEqual(3, res, "detect didn't work")
def test_filter(self):
res = _(["foo", "hello", "bar", "world"]
).filter(lambda x, *args: len(x) > 3)
self.assertEqual(["hello", "world"], res, "filter didn't work")
# alias
res = _(["foo", "hello", "bar", "world"]
).select(lambda x, *args: len(x) > 3)
self.assertEqual(["hello", "world"], res, "select didn't work")
def test_reject(self):
res = _(["foo", "hello", "bar", "world"]
).reject(lambda x, *args: len(x) > 3)
self.assertEqual(["foo", "bar"], res, "reject didn't work")
def test_all(self):
res = _([True, True, True, True]).all()
self.assertTrue(res, "all was not true")
res = _([True, True, False, True]).all()
self.assertFalse(res, "all was not false")
def test_any(self):
res = _([False, False, False, True]).any()
self.assertTrue(res, "any was not true")
res = _([False, False, False, False]).any()
self.assertFalse(res, "any was not false")
def test_include(self):
res = _(["hello", "world", "foo", "bar"]).include('foo')
self.assertTrue(res, "include was not true")
res = _(["hello", "world", "foo", "bar"]).include('notin')
self.assertFalse(res, "include was not false")
def test_include_dict(self):
res = _({"foo": "bar", "hello": "world"}).include('bar')
self.assertTrue(res, "include was not true")
res = _({"foo": "bar", "hello": "world"}).include('notin')
self.assertFalse(res, "include was not false")
def test_invoke(self):
res = _(["foo", "bar"]).invoke(lambda x, *args: x.upper())
self.assertEqual(["FOO", "BAR"], res,
"invoke with lambda did not work")
res = _(["foo", "bar"]).invoke("upper")
self.assertEqual(["FOO", "BAR"], res, "invoke with name did not work")
def test_pluck(self):
res = _([{"name": "foo", "age": "29"}, {"name": "bar", "age": "39"},
{"name": "baz", "age": "49"}]).pluck('age')
self.assertEqual(["29", "39", "49"], res, "pluck did not work")
def test_min(self):
res = _([5, 10, 15, 4, 8]).min()
self.assertEqual(4, res, "min did not work")
def test_max(self):
res = _([5, 10, 15, 4, 8]).max()
self.assertEqual(15, res, "max did not work")
def test_sortBy(self):
res = _([{'age': '59', 'name': 'foo'},
{'age': '39', 'name': 'bar'},
{'age': '49', 'name': 'baz'}]).sortBy('age')
self.assertEqual([{'age': '39', 'name': 'bar'},
{'age': '49', 'name': 'baz'},
{'age': '59', 'name': 'foo'}], res,
"filter by key did not work")
res = _([{'age': '59', 'name': 'foo'},
{'age': '39', 'name': 'bar'},
{'age': '49', 'name': 'baz'}]).sortBy(lambda x: x['age'])
self.assertEqual(
[{'age': '39', 'name': 'bar'}, {'age': '49', 'name': 'baz'},
{'age': '59', 'name': 'foo'}], res,
"filter by lambda did not work")
res = _([50, 78, 30, 15, 90]).sortBy()
self.assertEqual([15, 30, 50, 78, 90], res, "filter list did not work")
def test_groupby(self):
parity = _.groupBy([1, 2, 3, 4, 5, 6], lambda num, *args: num % 2)
self.assertTrue(0 in parity and 1 in parity,
'created a group for each value')
self.assertEqual(_(parity[0]).join(', '), '2, 4, 6',
'put each even number in the right group')
self.assertEqual(_.groupBy([1], lambda num, *args: num), [1])
llist = ["one", "two", "three", "four", "five",
"six", "seven", "eight", "nine", "ten"]
grouped = _.groupBy(llist, lambda x, *args: len(x))
self.assertEqual(_(grouped[3]).join(' '), 'one two six ten')
self.assertEqual(_(grouped[4]).join(' '), 'four five nine')
self.assertEqual(_(grouped[5]).join(' '), 'three seven eight')
def test_countby(self):
parity = _.countBy([1, 2, 3, 4, 5], lambda num, *args: num % 2 == 0)
self.assertEqual(parity[True], 2)
self.assertEqual(parity[False], 3)
self.assertEqual(_.countBy([1], lambda num, *args: num), 1)
llist = ["one", "two", "three", "four", "five",
"six", "seven", "eight", "nine", "ten"]
grouped = _.countBy(llist, lambda x, *args: len(x))
self.assertEqual(grouped[3], 4)
self.assertEqual(grouped[4], 3)
self.assertEqual(grouped[5], 3)
def test_sortedindex(self):
numbers = [10, 20, 30, 40, 50]
num = 35
indexForNum = _.sortedIndex(numbers, num)
self.assertEqual(3, indexForNum, '35 should be inserted at index 3')
indexFor30 = _.sortedIndex(numbers, 30)
self.assertEqual(2, indexFor30, '30 should be inserted at index 2')
def test_shuffle(self):
res = _([5, 10, 15, 4, 8]).shuffle()
self.assertNotEqual([5, 10, 15, 4, 8], res,
"shuffled array was the same")
def test_size(self):
self.assertEqual(_.size({"one": 1, "two": 2, "three": 3}),
3, 'can compute the size of an object')
self.assertEqual(_.size([1, 2, 3]), 3,
'can compute the size of an array')
def test_where(self):
List = [{"a": 1, "b": 2}, {"a": 2, "b": 2},
{"a": 1, "b": 3}, {"a": 1, "b": 4}]
result = _.where(List, {"a": 1})
self.assertEqual(_.size(result), 3)
self.assertEqual(result[-1]['b'], 4)
result = _.where(List, {"a": 1}, True)
self.assertEqual(result["b"], 2)
result = _.where(List, {"a": 1}, False)
self.assertEqual(_.size(result), 3)
def test_findWhere(self):
List = [{"a": 1, "b": 2}, {"a": 2, "b": 2},
{"a": 1, "b": 3}, {"a": 1, "b": 4}]
result = _.findWhere(List, {"a": 1})
self.assertEqual(result["a"], 1)
self.assertEqual(result["b"], 2)
result = _.findWhere(List, {"b": 4})
self.assertEqual(result["a"], 1)
self.assertEqual(result["b"], 4)
result = _.findWhere(List, {"c": 1})
self.assertEqual(result, None)
result = _.findWhere([], {"c": 1})
self.assertEqual(result, None)
def test_indexBy(self):
parity = _.indexBy([1, 2, 3, 4, 5], lambda num, *args: num % 2 == 0)
self.assertEqual(parity[True], 4)
self.assertEqual(parity[False], 5)
self.assertEqual(_.indexBy([1], lambda num, *args: num), 1)
llist = ["one", "two", "three", "four", "five",
"six", "seven", "eight", "nine", "ten"]
grouped = _.indexBy(llist, lambda x, *args: len(x))
self.assertEqual(grouped[3], 'ten')
self.assertEqual(grouped[4], 'nine')
self.assertEqual(grouped[5], 'eight')
array = [1, 2, 1, 2, 3]
grouped = _.indexBy(array)
self.assertEqual(grouped[1], 1)
self.assertEqual(grouped[2], 2)
self.assertEqual(grouped[3], 3)
def test_partition(self):
list = [0, 1, 2, 3, 4, 5]
self.assertEqual(_.partition(list, lambda x, *args: x < 4),
[[0, 1, 2, 3], [4, 5]], 'handles bool return values')
self.assertEqual(_.partition(list, lambda x, *args: x & 1),
[[1, 3, 5], [0, 2, 4]],
'handles 0 and 1 return values')
self.assertEqual(_.partition(list, lambda x, *args: x - 3),
[[0, 1, 2, 4, 5], [3]],
'handles other numeric return values')
self.assertEqual(
_.partition(list, lambda x, *args: None if x > 1 else True),
[[0, 1], [2, 3, 4, 5]], 'handles null return values')
# Test an object
result = _.partition({"a": 1, "b": 2, "c": 3}, lambda x, *args: x > 1)
# Has to handle difference between python3 and python2
self.assertTrue(
(result == [[3, 2], [1]] or result == [[2, 3], [1]]),
'handles objects')
# Default iterator
self.assertEqual(_.partition([1, False, True, '']),
[[1, True], [False, '']], 'Default iterator')
self.assertEqual(_.partition([{"x": 1}, {"x": 0}, {"x": 1}], 'x'),
[[{"x": 1}, {"x": 1}], [{"x": 0}]], 'Takes a string')
if __name__ == "__main__":
print("run these tests by executing `python -m unittest"
" discover` in unittests folder")
unittest.main()
########NEW FILE########
__FILENAME__ = test_functions
import unittest
from unittesthelper import init
init() # will let you import modules from upper folder
from src.underscore import _
from threading import Timer
class TestStructure(unittest.TestCase):
class Namespace:
pass
def test_bind(self):
pass
def test_bindAll(self):
pass
def test_memoize(self):
def fib(n):
return n if n < 2 else fib(n - 1) + fib(n - 2)
fastFib = _.memoize(fib)
self.assertEqual(
fib(10), 55, 'a memoized version of fibonacci'
' produces identical results')
self.assertEqual(
fastFib(10), 55, 'a memoized version of fibonacci'
' produces identical results')
self.assertEqual(
fastFib(10), 55, 'a memoized version of fibonacci'
' produces identical results')
self.assertEqual(
fastFib(10), 55, 'a memoized version of fibonacci'
' produces identical results')
def o(str):
return str
fastO = _.memoize(o)
self.assertEqual(o('upper'), 'upper', 'checks hasOwnProperty')
self.assertEqual(fastO('upper'), 'upper', 'checks hasOwnProperty')
def test_delay(self):
ns = self.Namespace()
ns.delayed = False
def func():
ns.delayed = True
_.delay(func, 150)
def checkFalse():
self.assertFalse(ns.delayed)
print("\nASYNC: delay. OK")
def checkTrue():
self.assertTrue(ns.delayed)
print("\nASYNC: delay. OK")
Timer(0.05, checkFalse).start()
Timer(0.20, checkTrue).start()
def test_defer(self):
ns = self.Namespace()
ns.deferred = False
def defertTest(bool):
ns.deferred = bool
_.defer(defertTest, True)
def deferCheck():
self.assertTrue(ns.deferred, "deferred the function")
print("\nASYNC: defer. OK")
_.delay(deferCheck, 50)
def test_throttle(self):
ns = self.Namespace()
ns.counter = 0
def incr():
ns.counter += 1
throttledIncr = _.throttle(incr, 100)
throttledIncr()
throttledIncr()
throttledIncr()
Timer(0.07, throttledIncr).start()
Timer(0.12, throttledIncr).start()
Timer(0.14, throttledIncr).start()
Timer(0.19, throttledIncr).start()
Timer(0.22, throttledIncr).start()
Timer(0.34, throttledIncr).start()
def checkCounter1():
self.assertEqual(ns.counter, 1, "incr was called immediately")
print("ASYNC: throttle. OK")
def checkCounter2():
self.assertEqual(ns.counter, 4, "incr was throttled")
print("ASYNC: throttle. OK")
_.delay(checkCounter1, 90)
_.delay(checkCounter2, 400)
def test_debounce(self):
ns = self.Namespace()
ns.counter = 0
def incr():
ns.counter += 1
debouncedIncr = _.debounce(incr, 120)
debouncedIncr()
debouncedIncr()
debouncedIncr()
Timer(0.03, debouncedIncr).start()
Timer(0.06, debouncedIncr).start()
Timer(0.09, debouncedIncr).start()
Timer(0.12, debouncedIncr).start()
Timer(0.15, debouncedIncr).start()
def checkCounter():
self.assertEqual(1, ns.counter, "incr was debounced")
print("ASYNC: debounce. OK")
_.delay(checkCounter, 300)
def test_once(self):
ns = self.Namespace()
ns.num = 0
def add():
ns.num += 1
increment = _.once(add)
increment()
increment()
increment()
increment()
self.assertEqual(ns.num, 1)
def test_wrap(self):
def greet(name):
return "hi: " + name
def wrap(func, name):
aname = list(name)
aname.reverse()
reveresed = "".join(aname)
return func(name) + ' ' + reveresed
backwards = _.wrap(greet, wrap)
self.assertEqual(backwards('moe'), 'hi: moe eom',
'wrapped the saluation function')
inner = lambda: "Hello "
obj = {"name": "Moe"}
obj["hi"] = _.wrap(inner, lambda fn: fn() + obj["name"])
self.assertEqual(obj["hi"](), "Hello Moe")
def test_compose(self):
def greet(name):
return "hi: " + name
def exclaim(sentence):
return sentence + '!'
def upperize(full):
return full.upper()
composed_function = _.compose(exclaim, greet, upperize)
self.assertEqual('HI: MOE!', composed_function('moe'),
'can compose a function that takes another')
def test_after(self):
def testAfter(afterAmount, timesCalled):
ns = self.Namespace()
ns.afterCalled = 0
def afterFunc():
ns.afterCalled += 1
after = _.after(afterAmount, afterFunc)
while (timesCalled):
after()
timesCalled -= 1
return ns.afterCalled
self.assertEqual(testAfter(5, 5), 1,
"after(N) should fire after being called N times")
self.assertEqual(testAfter(5, 4), 0,
"after(N) should not fire unless called N times")
self.assertEqual(testAfter(0, 0), 1,
"after(0) should fire immediately")
def test_partial(self):
def func(*args):
return ' '.join(args)
pfunc = _.partial(func, 'a', 'b', 'c')
self.assertEqual(pfunc('d', 'e'), 'a b c d e')
if __name__ == "__main__":
print("run these tests by executing `python -m unittest"
"discover` in unittests folder")
unittest.main()
########NEW FILE########
__FILENAME__ = test_objects
import unittest
from unittesthelper import init
init() # will let you import modules from upper folder
from src.underscore import _
class TestObjects(unittest.TestCase):
def test_keys(self):
self.assertEqual(set(_.keys({"one": 1, "two": 2})),
{'two', 'one'}, 'can extract the keys from an object')
def test_values(self):
self.assertEqual(set(_.values({"one": 1, "two": 2})),
{2, 1}, 'can extract the values from an object')
def test_functions(self):
obj = {"a": 'dash', "b": _.map, "c": ("/yo/"), "d": _.reduce}
self.assertEqual(['b', 'd'], _.functions(obj),
'can grab the function names of any passed-in object')
def test_extend(self):
self.assertEqual(_.extend({}, {"a": 'b'}).get("a"), 'b',
'can extend an object with the attributes of another')
self.assertEqual(_.extend({"a": 'x'}, {"a": 'b'}).get(
"a"), 'b', 'properties in source override destination')
self.assertEqual(_.extend({"x": 'x'}, {"a": 'b'}).get(
"x"), 'x', 'properties not in source dont get overriden')
result = _.extend({"x": 'x'}, {"a": 'a'}, {"b": 'b'})
self.assertEqual(result, {"x": 'x', "a": 'a', "b": 'b'},
'can extend from multiple source objects')
result = _.extend({"x": 'x'}, {"a": 'a', "x": 2}, {"a": 'b'})
self.assertEqual(result, {"x": 2, "a": 'b'},
'extending from multiple source'
' objects last property trumps')
result = _.extend({}, {"a": None, "b": None})
self.assertEqual(set(_.keys(result)),
{"a", "b"}, 'extend does not copy undefined values')
def test_pick(self):
result = _.pick({"a": 1, "b": 2, "c": 3}, 'a', 'c')
self.assertTrue(_.isEqual(result, {'a': 1, 'c': 3}),
'can restrict properties to those named')
result = _.pick({"a": 1, "b": 2, "c": 3}, ['b', 'c'])
self.assertTrue(_.isEqual(result, {"b": 2, "c": 3}),
'can restrict properties to those named in an array')
result = _.pick({"a": 1, "b": 2, "c": 3}, ['a'], 'b')
self.assertTrue(_.isEqual(result, {"a": 1, "b": 2}),
'can restrict properties to those named in mixed args')
def test_omit(self):
result = _.omit({"a": 1, "b": 2, "c": 3}, 'b')
self.assertEqual(result, {"a": 1, "c": 3},
'can omit a single named property')
result = _.omit({"a": 1, "b": 2, "c": 3}, 'a', 'c')
self.assertEqual(result, {"b": 2}, 'can omit several named properties')
result = _.omit({"a": 1, "b": 2, "c": 3}, ['b', 'c'])
self.assertEqual(result, {"a": 1},
'can omit properties named in an array')
def test_defaults(self):
options = {"zero": 0, "one": 1, "empty":
"", "nan": None, "string": "string"}
_.defaults(options, {"zero": 1, "one": 10, "twenty": 20})
self.assertEqual(options["zero"], 0, 'value exists')
self.assertEqual(options["one"], 1, 'value exists')
self.assertEqual(options["twenty"], 20, 'default applied')
_.defaults(options, {"empty": "full"},
{"nan": "none"}, {"word": "word"}, {"word": "dog"})
self.assertEqual(options["empty"], "", 'value exists')
self.assertTrue(_.isNone(options["nan"]), "NaN isn't overridden")
self.assertEqual(options["word"], "word",
'new value is added, first one wins')
def test_clone(self):
moe = {"name": 'moe', "lucky": [13, 27, 34]}
clone = _.clone(moe)
self.assertEqual(clone["name"], 'moe',
'the clone as the attributes of the original')
clone["name"] = 'curly'
self.assertTrue(clone["name"] == 'curly' and moe["name"] == 'moe',
'clones can change shallow attributes'
' without affecting the original')
clone["lucky"].append(101)
self.assertEqual(_.last(moe["lucky"]), 101,
'changes to deep attributes are'
' shared with the original')
self.assertEqual(_.clone(1), 1,
'non objects should not be changed by clone')
self.assertEqual(_.clone(None), None,
'non objects should not be changed by clone')
def test_isEqual(self):
obj = {"a": 1, "b": 2}
self.assertTrue(_.isEqual(obj, {"a": 1, "b": 2}), "Object is equal")
obj = {"a": 1, "b": {"c": 2, "d": 3, "e": {"f": [1, 2, 3, 4, 5]}}}
self.assertTrue(_.isEqual(
obj, {"a": 1, "b": {"c": 2, "d": 3, "e": {"f": [1, 2, 3, 4, 5]}}}),
"Object is equal")
obj = [1, 2, 3, 4, [5, 6, 7, [[[[8]]]]]]
self.assertTrue(
_.isEqual(obj, [1, 2, 3, 4, [5, 6, 7, [[[[8]]]]]]),
"Object is equal")
obj = None
self.assertTrue(_.isEqual(obj, None), "Object is equal")
obj = 1
self.assertTrue(_.isEqual(obj, 1), "Object is equal")
obj = "string"
self.assertTrue(_.isEqual(obj, "string"), "Object is equal")
def test_isEmpty(self):
self.assertTrue(not _([1]).isEmpty(), '[1] is not empty')
self.assertTrue(_.isEmpty([]), '[] is empty')
self.assertTrue(not _.isEmpty({"one": 1}), '{one : 1} is not empty')
self.assertTrue(_.isEmpty({}), '{} is empty')
self.assertTrue(_.isEmpty(None), 'null is empty')
self.assertTrue(_.isEmpty(), 'undefined is empty')
self.assertTrue(_.isEmpty(''), 'the empty string is empty')
self.assertTrue(not _.isEmpty('moe'), 'but other strings are not')
obj = {"one": 1}
obj.pop("one")
self.assertTrue(_.isEmpty(obj),
'deleting all the keys from an object empties it')
pass
def test_isType(self):
# put all the types here and check each for true
pass
class Namespace:
pass
def test_tap(self):
ns = self.Namespace()
ns.intercepted = None
def interceptor(obj):
ns.intercepted = obj
returned = _.tap(1, interceptor)
self.assertEqual(ns.intercepted, 1,
"passes tapped object to interceptor")
self.assertEqual(returned, 1, "returns tapped object")
returned = _([1, 2, 3]).chain().map(
lambda n, *args: n * 2).max().tap(interceptor).value()
self.assertTrue(returned == 6 and ns.intercepted == 6,
'can use tapped objects in a chain')
def test_pairs(self):
r = _.pairs({"one": 1, "two": 2})
self.assertEqual(sorted(r), [["one", 1], ["two", 2]],
'can convert an object into pairs')
def test_invert(self):
obj = {"first": 'Moe', "second": 'Larry', "third": 'Curly'}
r = _(obj).chain().invert().keys().join(' ').value()
self.assertEqual(set(r), set('Larry Moe Curly'),
'can invert an object')
self.assertEqual(_.invert(_.invert(obj)), obj,
"two inverts gets you back where you started")
def test_matches(self):
moe = {"name": 'Moe Howard', "hair": True}
curly = {"name": 'Curly Howard', "hair": False}
stooges = [moe, curly]
self.assertTrue(_.find(stooges, _.matches({"hair": False})) == curly,
"returns a predicate that can"
" be used by finding functions.")
self.assertTrue(_.find(stooges, _.matches(moe)) == moe,
"can be used to locate an object"
" exists in a collection.")
if __name__ == "__main__":
print("run these tests by executing `python -m unittest"
" discover` in unittests folder")
unittest.main()
########NEW FILE########
__FILENAME__ = test_structure
import unittest
from unittesthelper import init
init() # will let you import modules from upper folder
from src.underscore import _
class TestStructure(unittest.TestCase):
def test_oo(self):
min = _([1, 2, 3, 4, 5]).min()
self.assertEqual(1, min, "oo did not work")
def test_static(self):
min = _.min([1, 2, 3, 4, 5])
self.assertEqual(1, min, "static did not work")
def test_chaining(self):
array = range(1, 11)
u = _(array).chain().filter(lambda x: x > 5).min()
self.assertTrue(isinstance(u, _.underscore),
"object is not an instanse of underscore")
self.assertEqual(6, u.value(), "value should have returned")
if __name__ == "__main__":
print("run these tests by executing `python -m unittest"
"discover` in unittests folder")
unittest.main()
########NEW FILE########
__FILENAME__ = test_utility
import unittest
from unittesthelper import init
init() # will let you import modules from upper folder
from src.underscore import _
import math
import time
class TestUtility(unittest.TestCase):
class Namespace():
pass
def setUp(self):
_.templateSettings = {}
def test_identity(self):
moe = {"name": 'moe'}
self.assertEqual(moe, _.identity(moe),
"moe is the same as his identity")
def test_constant(self):
moe = {"name": 'moe'}
self.assertEqual(_.constant(moe)(), moe,
'should create a function that returns moe')
def test_property(self):
moe = {"name": 'moe'}
self.assertEqual(_.property('name')(moe), 'moe',
'should return the property with the given name')
def test_random(self):
array = _.range(1000)
mi = math.pow(2, 31)
ma = math.pow(2, 62)
def check(*args):
return _.random(mi, ma) >= mi
result = _.every(array, check)
self.assertTrue(
result, "should produce a random number greater than or equal"
" to the minimum number")
def check2(*args):
r = _.random(ma)
return r >= 0 and r <= ma
result = _.every(array, check2)
self.assertTrue(
result, "should produce a random number when passed max_number")
def test_now(self):
diff = _.now() - time.time()
self.assertTrue(diff <= 0 and diff > -5,
'Produces the correct time in milliseconds')
def test_uniqueId(self):
ns = self.Namespace()
ns.ids = []
i = 0
for i in range(0, 100):
ns.ids.append(_.uniqueId())
self.assertEqual(len(ns.ids), len(_.uniq(ns.ids)),
"can generate a globally-unique stream of ids")
def test_times(self):
vals = []
_.times(3, lambda i: vals.append(i))
self.assertEqual([0, 1, 2], vals, "is 0 indexed")
vals = []
_(3).times(lambda i: vals.append(i))
self.assertEqual([0, 1, 2], vals, "is 0 indexed")
pass
def test_mixin(self):
_.mixin({
"myUpper": lambda self: self.obj.upper(),
})
self.assertEqual('TEST', _.myUpper('test'), "mixed in a function to _")
self.assertEqual('TEST', _('test').myUpper(),
"mixed in a function to _ OOP")
def test_escape(self):
self.assertEqual("Curly & Moe", _.escape("Curly & Moe"))
self.assertEqual("Curly &amp; Moe", _.escape("Curly & Moe"))
def test_template(self):
basicTemplate = _.template("<%= thing %> is gettin' on my noives!")
result = basicTemplate({"thing": 'This'})
self.assertEqual(result, "This is gettin' on my noives!",
'can do basic attribute interpolation')
sansSemicolonTemplate = _.template("A <% this %> B")
self.assertEqual(sansSemicolonTemplate(), "A B")
backslashTemplate = _.template("<%= thing %> is \ridanculous")
self.assertEqual(
backslashTemplate({"thing": 'This'}), "This is \ridanculous")
escapeTemplate = _.template(
'<%= "checked=\\"checked\\"" if a else "" %>')
self.assertEqual(escapeTemplate({"a": True}), 'checked="checked"',
'can handle slash escapes in interpolations.')
fancyTemplate = _.template(
"<ul><% for key in people: %><li><%= key %></li><% endfor %></ul>")
result = fancyTemplate({"people": ["Larry", "Curly", "Moe"]})
self.assertEqual(
result, "<ul><li>Larry</li><li>Curly</li><li>Moe</li></ul>",
'can run arbitrary javascript in templates')
escapedCharsInJavascriptTemplate = _.template(
"<ul><% def by(item, *args): %><li><%= item %></li><% enddef %>"
"<% _.each(numbers.split('\\n'), by) %></ul>")
# print escapedCharsInJavascriptTemplate.source
result = escapedCharsInJavascriptTemplate(
{"numbers": "one\ntwo\nthree\nfour"})
# print result, "####"
self.assertEqual(
result, "<ul><li>one</li><li>two</li>"
"<li>three</li><li>four</li></ul>",
'Can use escaped characters (e.g. \\n) in Javascript')
namespaceCollisionTemplate = _.template(
"<%= pageCount %> <%= thumbnails[pageCount] %>"
" <% def by(p, *args): %><div class=\"thumbnail\""
" rel=\"<%= p %>\"></div><% enddef %><% _.each(thumbnails, by) %>")
result = namespaceCollisionTemplate({
"pageCount": 3,
"thumbnails": {
1: "p1-thumbnail.gif",
2: "p2-thumbnail.gif",
3: "p3-thumbnail.gif"
}
})
self.assertEqual(
result, '3 p3-thumbnail.gif <div class="thumbnail"'
' rel="p1-thumbnail.gif"></div><div class="thumbnail"'
' rel="p2-thumbnail.gif"></div><div class="thumbnail"'
' rel="p3-thumbnail.gif"></div>')
noInterpolateTemplate = _.template(
"<div><p>Just some text. Hey, I know this is silly"
" but it aids consistency.</p></div>")
result = noInterpolateTemplate()
self.assertEqual(
result, "<div><p>Just some text. Hey, I know this is"
" silly but it aids consistency.</p></div>")
quoteTemplate = _.template("It's its, not it's")
self.assertEqual(quoteTemplate({}), "It's its, not it's")
quoteInStatementAndBody = _.template("<% \
if foo == 'bar': \
%>Statement quotes and 'quotes'.<% endif %>")
self.assertEqual(
quoteInStatementAndBody({"foo": "bar"}),
"Statement quotes and 'quotes'.")
withNewlinesAndTabs = _.template(
'This\n\t\tis: <%= x %>.\n\tok.\nend.')
self.assertEqual(
withNewlinesAndTabs({"x": 'that'}),
'This\n\t\tis: that.\n\tok.\nend.')
template = _.template("<i><%- value %></i>")
result = template({"value": "<script>"})
self.assertEqual(result, '<i><script></i>')
# This wouldn't work in python
# stooge = {
# "name": "Moe",
# "template": _.template("I'm <%= this.name %>")
# }
# self.assertEqual(stooge.template(), "I'm Moe")
_.templateSettings = {
"evaluate": r"\{\{([\s\S]+?)\}\}",
"interpolate": r"\{\{=([\s\S]+?)\}\}"
}
custom = _.template(
"<ul>{{ for key in people: }}<li>{{= key }}</li>{{ endfor }}</ul>")
result = custom({"people": ["Larry", "Curly", "Moe"]})
self.assertEqual(
result, "<ul><li>Larry</li><li>Curly</li><li>Moe</li></ul>",
'can run arbitrary javascript in templates')
customQuote = _.template("It's its, not it's")
self.assertEqual(customQuote({}), "It's its, not it's")
quoteInStatementAndBody = _.template(
"{{ if foo == 'bar': }}Statement quotes and 'quotes'.{{ endif }}")
self.assertEqual(
quoteInStatementAndBody({"foo": "bar"}),
"Statement quotes and 'quotes'.")
_.templateSettings = {
"evaluate": r"<\?([\s\S]+?)\?>",
"interpolate": r"<\?=([\s\S]+?)\?>"
}
customWithSpecialChars = _.template(
"<ul><? for key in people: ?><li><?= key ?></li><? endfor ?></ul>")
result = customWithSpecialChars({"people": ["Larry", "Curly", "Moe"]})
self.assertEqual(
result, "<ul><li>Larry</li><li>Curly</li><li>Moe</li></ul>",
'can run arbitrary javascript in templates')
customWithSpecialCharsQuote = _.template("It's its, not it's")
self.assertEqual(customWithSpecialCharsQuote({}), "It's its, not it's")
quoteInStatementAndBody = _.template(
"<? if foo == 'bar': ?>Statement quotes and 'quotes'.<? endif ?>")
self.assertEqual(
quoteInStatementAndBody({"foo": "bar"}),
"Statement quotes and 'quotes'.")
_.templateSettings = {
"interpolate": r"\{\{(.+?)\}\}"
}
mustache = _.template("Hello {{planet}}!")
self.assertEqual(mustache({"planet": "World"}),
"Hello World!", "can mimic mustache.js")
templateWithNull = _.template("a null undefined {{planet}}")
self.assertEqual(
templateWithNull({"planet": "world"}), "a null undefined world",
"can handle missing escape and evaluate settings")
def test_template_escape(self):
tmpl = _.template('<p>\u2028<%= "\\u2028\\u2029" %>\u2029</p>')
self.assertEqual(tmpl(), '<p>\u2028\u2028\u2029\u2029</p>')
def test_result(self):
obj = {"w": '', "x": 'x', "y": lambda x="x": x}
self.assertEqual(_.result(obj, 'w'), '')
self.assertEqual(_.result(obj, 'x'), 'x')
self.assertEqual(_.result(obj, 'y'), 'x')
self.assertEqual(_.result(obj, 'z'), None)
self.assertEqual(_.result(None, 'x'), None)
def test_template_variable(self):
s = '<%=data["x"]%>'
data = {"x": 'x'}
self.assertEqual(_.template(s, data, {"variable": 'data'}), 'x')
_.templateSettings = {
"variable": 'data'
}
self.assertEqual(_.template(s)(data), 'x')
def test_temp_settings_no_change(self):
self.assertFalse("variable" in _.templateSettings)
_.template('', {}, {"variable": 'x'})
self.assertFalse("variable" in _.templateSettings)
def test_template_undef(self):
template = _.template('<%=x%>')
self.assertEqual(template({"x": None}), '')
templateEscaped = _.template('<%-x%>')
self.assertEqual(templateEscaped({"x": None}), '')
templateWithPropertyEscaped = _.template('<%-x["foo"]%>')
self.assertEqual(templateWithPropertyEscaped({"x": {"foo": None}}), '')
def test_interpolate_only_once(self):
ns = self.Namespace()
ns.count = 0
template = _.template('<%= f() %>')
def test():
self.assertTrue(not ns.count)
ns.count += 1
template({"f": test})
ns.countEscaped = 0
templateEscaped = _.template('<%- f() %>')
def test2():
self.assertTrue(not ns.countEscaped)
ns.countEscaped += 1
templateEscaped({"f": test2})
if __name__ == "__main__":
print("run these tests by executing `python -m unittest"
" discover` in unittests folder")
unittest.main()
########NEW FILE########
__FILENAME__ = unittesthelper
import os
import sys
import inspect
def init():
# realpath() with make your script run, even if you symlink it :)
cmd_folder = os.path.realpath(os.path.abspath(os.path.split(inspect.getfile(inspect.currentframe()))[0]))
if cmd_folder not in sys.path:
sys.path.insert(0, cmd_folder)
# use this if you want to include modules from a subfolder
cmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile(inspect.currentframe()))[0], "../")))
if cmd_subfolder not in sys.path:
sys.path.insert(0, cmd_subfolder)
########NEW FILE########
| true |
4cc5860bdc3c06945a9f85ffda1fe1ff2624e5cb | Python | Dyndyn/python | /lab4.3.py | UTF-8 | 458 | 3.15625 | 3 | [] | no_license | #!/usr/bin/python
#-*- coding: utf-8 -*-
from decimal import *
#getcontext().prec = 2
cents = Decimal('0.01')
salary = Decimal(input("What's your salary? ")).quantize(cents, ROUND_HALF_UP)
tax = (salary * Decimal('0.18')).quantize(cents, ROUND_HALF_UP)
military = (salary * Decimal('0.015')).quantize(cents, ROUND_HALF_UP)
print('Податок на доходи фізичних осіб = %.2f, військовий збір = %.2f' % (tax, military))
| true |
858a5ab22cb6da559581f84ed6cc8e5348706a07 | Python | daniel-reich/ubiquitous-fiesta | /LanWAvTtQetP5xyDu_18.py | UTF-8 | 593 | 2.671875 | 3 | [] | no_license |
def coins_div(lst):
def next_step(i0, rest):
for i, n in enumerate(lst[i0:], i0):
if used[i]: continue
if n <= rest:
used[i] = True
if n < rest:
yield from next_step(i+1, rest-n)
else:
yield True
used[i] = False
if sum(lst) % 3: return False
lst.sort(reverse=True)
used = [False] * len(lst)
for _1 in next_step(0, sum(lst) // 3):
for _2 in next_step(1, sum(lst) // 3):
return True
return False
| true |
c1e8103fed58e5edddf122c4d8588cad433d8d2b | Python | zhaipro/acm | /leetcode/53.py | UTF-8 | 259 | 2.9375 | 3 | [
"MIT"
] | permissive | class Solution:
def maxSubArray(self, nums: List[int]) -> int:
r = nums[0]
c = 0
for x in nums:
c += x
if c > r:
r = c
if c < 0:
c = 0
return r
| true |
f3208ba4f089f126586c74d659e1d28db5b54b96 | Python | ZarinaAfl/Python | /cw_13_11_17/Maps.py | UTF-8 | 189 | 2.828125 | 3 | [] | no_license | import multiprocessing
def f(lst):
s = 0
while (True):
pass
for i in range(1000):
s += i*i
print(s)
p = multiprocessing.Pool()
p.map(f, [1,2,3,4,5])
| true |
8e01b8358e439ac6c5344ac0d456ac3d555bb7c4 | Python | ccsreenidhin/Learning_Python_Part1 | /Learning_Python_part1/python_print/print2.py | UTF-8 | 421 | 3.484375 | 3 | [] | no_license | print "day",56
print " I got", 100, "programs to write"
print "no of days %s" % 56
print "no of days %s" % "56"
print "no of days %d" % 56
#print "no of days %d" % "56" error:%d format: a number is required, not str
#print "the nos are %d %d %d and the sum is" %(1,2,3,1+2+3) error:not all arguments converted during string formatting
print "the nos are %d %d %d and the sum is %d" %(1,2,3,1+2+3)
print round(1.78963)
| true |
f8639fb4e071ecb111e2010d17c5f3ebf21a71bd | Python | ebaustria/regiaoSulTest | /lib/arrival_conversion.py | UTF-8 | 726 | 2.53125 | 3 | [
"MIT"
] | permissive | import lib.coord_conversion as cc
import json
def make_arrivals(local_coordinates: str, gps_coordinates: str):
dict_list = []
arrivals_timestamps = cc.timestamps_list(local_coordinates)
arrivals_gps = cc.gps_list(gps_coordinates)
arrivals_final_coords = cc.final_list(arrivals_timestamps, arrivals_gps)
for name, gps, timestamp, messages in arrivals_final_coords:
new_dict = {}
new_dict["name"] = name
new_dict["coordinates"] = gps
new_dict["timestamp"] = timestamp
new_dict["color"] = [253, 128, 93]
dict_list.append(new_dict)
json_file = json.dumps(dict_list, indent=2)
with open("arrivals.json", "w") as file:
file.write(json_file)
| true |
1539e684c7ba5c46acccd93f8630faadcfc5a223 | Python | ahmadmalbzoor/python_stack | /_pyhton/python_fundamentals/forloop bassics/q3.py | UTF-8 | 115 | 3.15625 | 3 | [] | no_license | for x in range(1, 100):
if x%5==0 and x%10==0:
print("coding")
elif x%10==0:
print("ahmad") | true |
c4a6a8852009a128501e125b6226c7444bf01aa8 | Python | gummie4444/GetToVis- | /viso.py | UTF-8 | 2,586 | 2.765625 | 3 | [] | no_license | #visoscript
# -*- coding: utf-8 -*-
import mechanize
from mechanize._opener import urlopen
from mechanize._form import ParseResponse
from bs4 import BeautifulSoup
import time
import sched
#VIRKAR
def logIn(brow,name):
#visoscript
''' Connect to the website and login to the form
'''
brow = mechanize.Browser()
brow.open('https://www.nord.is/innskra/')
brow.select_form(nr = 0)
brow.form['username'] = name[0]
brow.form['password'] = name[1]
brow.submit()
#RETURN the open browser that is loggedin
return brow
#VIRKAR
def getTheVisos():
''' Scrape the next visos that is about to happen today
'''
browser = mechanize.Browser()
browser.open('https://www.nord.is/atburdir/')
html = browser.response()
parsed_html = BeautifulSoup(html)
'''
search for everything that is with some class
passed events is the events that are over
TODO:check what the class of the not-passed events are
TODO:only return the visos that are happening today
'''
templist = [] #Fylkið sem heldur utan um alla viðburði sem eiga eftir að gerast
for link in parsed_html.find_all('div','upcoming-event'):
print((link.a.get('href').encode('utf-8')))
#print(link.div.string.encode('utf-8')) skoða hvort þurfi að adda bæði dagsetningu inn í fylkið
templist.append((link.a.get('href').encode('utf-8')))
#return the vísós that are gonna happen
return templist
#VIRKAR EKKI
#ÞETTA FER Í GANG KLUKKAN 13:10
def getTheFuckersToViso(sc):
#CALL THE FUNCTION NEXT AFTER 24 HOURS
sc.enter(24*60*60, 1, getTheFuckersToViso, (sc,))
#TODO FIX THIS FOR ADDING OTHER PEOPLE
gummi = ['username', 'password']
]
#Assign it to none
brow1 = mechanize.Browser()
#Get the visos that are happening later today
visos = getTheVisos()
#TODO PUT A VARIABLE HERE IF THE DUDES DONT WANT TO GO TO VISO
brow1 = logIn(brow1,gummi)
#check if there is a viso today
if(visos[0] != 0):
#biddu núna þangað til að klukkan er orðin þú veist 2 min í skráningu
#og byrjaðu að spamma
bla = True
bla2= True
counter = 0
while(bla):
time.sleep(10) # Delay for 10 minute (60*10 seconds)
bla = False
for viso in visos:
visoName ='https://www.nord.is'+ viso +'skraning'
brow1.open(visoName)
print("ja")
#KÖLLUM Á GETTHEFUCKERSTOVISO þegar klukkan er orðin eitthvað víst 13:10 á hverjum þriðjudegi r sum
#NOTA ÞETTA EÐA CRONJOB
s = sched.scheduler(time.time, time.sleep)
s.enter(24*60*60, 1,getTheFuckersToViso, (s,))
s.run()
| true |
aac268d166b1e59228ea810e2532ffd7ae076278 | Python | relientm96/simpleMLP | /script.py | UTF-8 | 466 | 3.109375 | 3 | [] | no_license | '''
Training Simple Neural Network
to make predictions for
x^2 + y^2
'''
import numpy as np
import pandas as pd
from pprint import *
import MLP as mlp
'''
Generating the input training set,
'''
x = np.vstack( (np.random.randint(100, size=100), np.random.randint(100, size=100)) ).T
y = np.array([row[0]**2 + row[1]**2 for row in x]).T
print("=======")
print(x.shape)
print(y.shape)
pprint(x[0:5])
#------------------
# Normalizing Data
#------------------
| true |
e3713a201c109113edc6377d5a6d68c61db082af | Python | Qandi430/python | /jumpToPython/part02/chapter06.py | UTF-8 | 1,424 | 4.34375 | 4 | [] | no_license | # 집합자료형
# 집합(set)은 파이썬2.3부터 지원하기 시작한 자료형으로, 집합에 관련된 것을 쉽게 처리하기 위해 만든 자료형
# 집합 자료형은 set키워드를 사용해서 생성 - 리스트를 입력하여 만들거나 문자열을 입력해서 생성 가능
s1 = set([1,2,3])
print(s1)
s2 = set("Hello")
print(s2)
# 집합 자료형의 특징
# 중복을 허용하지 않는다
# 순서가 없다(Unordered)
# 리스트나 튜플은 순서가 있기 때문에 인덱싱을 통해 자료형의 값을 얻을 수 있지만
# set은 순서가 없기 때문에 인덱싱으로 값을 얻을 수 없다.
# 자료형에서 자정된 값을 인덱싱으로 접근하려면 리스트나 투플로 변환한후 사용
s1 = set([1,2,3])
# print(s1[0])
li = list(s1)
print(li)
print(li[0])
t1 = tuple(s1)
print(t1)
print(t1[0])
# 교집합,합집합,차집합 구하기
s1 = set([1,2,3,4,5,6])
s2 = set([4,5,6,7,8,9])
# 교집합 : & or intersection
print(s1 & s2)
print(s1.intersection(s2))
# 합집합 : | or union
print(s1 | s2)
print(s1.union(s2))
# 차집합 : - or difference
print(s1 - s2)
print(s1.difference(s2))
# 집합 자료형 관련 함수들
# 값 1개 추가하기(add)
s1 = set([1,2,3])
s1.add(4)
print(s1)
# 값 여러개 추가하기(update)
s1 = set([1,2,3])
s1.update([4,5,6])
print(s1)
# 특정 값 제거하기(remove)
s1 = set([1,2,3])
s1.remove(2)
print(s1) | true |
61bd561d84cb7994f5a0d500b1b2256088949623 | Python | sergevkim/KeywordSpotting | /kespo/models/attention_spotter.py | UTF-8 | 4,445 | 2.640625 | 3 | [
"MIT"
] | permissive | from collections import OrderedDict
import einops
import torch
from torch import Tensor
from torch.nn import (
CrossEntropyLoss,
GRU,
Module,
Linear,
Sequential,
Tanh,
)
from torch.optim import Adam
from torch.optim.optimizer import Optimizer
from torchaudio.transforms import MelSpectrogram
class Encoder(Module):
def __init__(
self,
input_size: int=40,
hidden_size: int=128,
num_layers: int=1,
):
super().__init__()
self.cnn = None
self.rnn = GRU(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
)
def forward(
self,
x: Tensor,
) -> Tensor:
#x_1 = self.cnn(x)
x_1 = x
output, hidden = self.rnn(
input=x_1,
)
return output
class AverageAttention(Module):
def __init__(
self,
T: int,
):
super().__init__()
self.T = T
def forward(
self,
x: Tensor,
) -> Tensor:
alpha = torch.full(
size=(x.shape[0], self.T),
fill_value=1 / self.T,
)
alpha = einops.rearrange(alpha, 'h (w 1) -> h w 1')
return alpha
class SoftAttention(Module):
def __init__(
self,
):
super().__init__()
self.blocks_ordered_dict = OrderedDict(
Wb=Linear(#TODO
in_channels=None,
out_channels=None,
),
tanh=Tanh(),
v=Linear(
in_features=None,
out_features=None,
bias=False,
),
softmax=Softmax(),
)
self.alpher = Sequential(self.blocks)
def forward(
self,
x: Tensor,
):
alpha = self.alpher(x)
return alpha
class AttentionSpotter(Module):
def __init__(
self,
T: int,
in_channels: int=40,
hidden_size: int=128,
learning_rate: float=3e-4,
device=torch.device('cpu'),
):
super().__init__()
self.T = T
self.device = device
self.learning_rate = learning_rate
self.criterion = CrossEntropyLoss()
self.mel_spectrogramer = MelSpectrogram(
#n_fft=1024,
sample_rate=16000,
#win_length=1024,
#hop_length=256,
#f_min=0,
#f_max=800,
n_mels=in_channels,
).to(self.device)
self.encoder = Encoder(
input_size=in_channels,
hidden_size=hidden_size,
num_layers=1,
)
self.attention = AverageAttention(
T=self.T,
)
self.epilog_ordered_dict = OrderedDict(
U=Linear(
in_features=hidden_size,
out_features=3,
bias=False,
),
#softmax=Softmax(), #TODO remove
)
self.epilog = Sequential(self.epilog_ordered_dict)
def forward(
self,
x: Tensor,
) -> Tensor:
h = self.encoder(x)
alpha = self.attention(h)
c_0 = alpha * h
c = (alpha * h).sum(dim=1)
p = self.epilog(c)
return p
def training_step(
self,
batch: Tensor,
batch_idx: int,
) -> Tensor:
waveforms, targets = batch
waveforms = waveforms.to(self.device)
targets = targets.to(self.device)
mel_spec = self.mel_spectrogramer(waveforms)
transposed_mel_spec = einops.rearrange(mel_spec, 'bs w h -> bs h w')
predictions = self(torch.log(transposed_mel_spec))
loss = self.criterion(
input=predictions,
target=targets,
)
return loss
def training_step_end(self):
pass
def training_epoch_end(self):
print("Training epoch is over!")
def validation_step(self, batch, batch_idx):
pass
def validation_step_end(self):
pass
def validation_epoch_end(self):
print("Validation epoch is over!")
def configure_optimizers(self) -> Optimizer:
optimizer = Adam(
params=self.parameters(),
lr=self.learning_rate,
)
return optimizer
| true |
f15de6d5b0efc6f2cae8801fc0d2619373797f6c | Python | jasonrbriggs/python-for-kids | /ch10/arcs.py | UTF-8 | 383 | 2.984375 | 3 | [
"Apache-2.0"
] | permissive | from tkinter import *
tk = Tk()
canvas = Canvas(tk, width=400, height=400)
canvas.pack()
canvas.create_arc(10, 10, 200, 80, extent=45, style=ARC)
canvas.create_arc(10, 80, 200, 160, extent=90, style=ARC)
canvas.create_arc(10, 160, 200, 240, extent=135, style=ARC)
canvas.create_arc(10, 240, 200, 320, extent=180, style=ARC)
canvas.create_arc(10, 320, 200, 400, extent=359, style=ARC) | true |
5d25cb182c9673be684ea14c463e7d7451fd5ab6 | Python | kquark/QA-on-ElasticSearch | /QA/build_dict.py | UTF-8 | 2,285 | 2.671875 | 3 | [
"MIT"
] | permissive | import ahocorasick
import _pickle as cPickle
from collections import defaultdict
entity_list_file = 'all_entity.txt' # 所有的实体名
entity_out_path = 'ent_ac.pkl'
attr_list_file = 'attr_mapping.txt' # 属性同义词
attr_out_path = 'attr_ac.pkl'
val_list_file = 'Person_val.txt' # 属性值-属性
def dump_ac_entity_dict(list_file, out_path): # 所有的实体名
A = ahocorasick.Automaton()
f = open(list_file, 'r', encoding='utf-8')
i = 0
for line in f:
word = line.strip()
A.add_word(word, (i, word))
i += 1
A.make_automaton()
cPickle.dump(A, open(out_path, "wb"))
f.close()
def dump_ac_attr_dict(attr_mapping_file, out_path): # 所有的属性
A = ahocorasick.Automaton()
f = open(attr_mapping_file, 'r', encoding='utf-8')
i = 0
for line in f.readlines():
parts = line.strip().split(" ")
for p in parts:
if p != "":
A.add_word(p, (i, p))
i += 1
A.make_automaton()
cPickle.dump(A, open(out_path, 'wb'))
f.close()
def load_ac_dict(out_path):
A = cPickle.load(open(out_path, "rb"))
return A
def load_attr_map(attr_mapping_file): # 所有的同类属性映射为一个
f = open(attr_mapping_file, 'r', encoding='utf-8')
mapping = defaultdict(list)
for line in f.readlines():
parts = line.strip().split(" ")
for p in parts:
if p != '':
mapping[p].append(parts[0])
f.close()
return mapping
def load_entity_dict(entity_file): # 出现过的实体名
f = open(entity_file, 'r', encoding='utf-8')
ents = {}
for line in f.readlines():
ents[line.strip()] = 1
f.close()
return ents
def load_val_dict(val_file): # 属性值2属性
f = open(val_file, 'r', encoding='utf-8')
val_attr_map = {}
for line in f.readlines():
try:
parts = line.strip().split(" ")
val_attr_map[parts[0]] = parts[1]
except Exception:
pass
f.close()
return val_attr_map
if __name__ == '__main__':
# dump_ac_attr_dict(attr_list_file, attr_out_path)
# dump_ac_entity_dict(entity_list_file, entity_out_path)
# load_val_dict(val_list_file)
print(load_attr_map(attr_list_file)) | true |
65d05536388278543f4407026091a92144e3ecca | Python | bergercookie/albert-plugins | /github.py | UTF-8 | 1,573 | 2.609375 | 3 | [] | no_license | """
Search GitHub repos
"""
from albertv0 import *
from os import path
import requests
import json
__iid__ = 'PythonInterface/v0.1'
__prettyname__ = 'GitHub Repos'
__version__ = '1.0'
__trigger__ = 'gh '
__author__ = 'Angelo Gazzola'
__dependencies__ = []
__icon__ = path.dirname(__file__) + '/icons/GitHub.png'
REQUEST_HEADERS = {
'User-Agent': (
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko)'
' Chrome/62.0.3202.62 Safari/537.36'
)
}
session = requests.Session()
session.trust_env = False
def to_item(repo):
description = repo["description"]
if description and len(description) > 40:
description = description[:40] + "..."
subtext = "{} ({} issues - {} forks)".format(
description,
repo["open_issues"],
repo["forks_count"]
)
return Item(
id=str(repo['id']),
text=repo['full_name'],
icon=__icon__,
subtext=subtext,
actions=[
UrlAction('View on Github', repo['html_url']),
ClipAction('Copy clone url', repo['clone_url']),
]
)
def search(query):
response = session.get("https://api.github.com/search/repositories",
headers=REQUEST_HEADERS,
params={
"q": query,
}
)
if response.json().get('items'):
repos = sorted(
response.json()['items'],
key=(lambda el: int(el["stargazers_count"]))
)
return [to_item(repo) for repo in repos]
return []
def handleQuery(query):
if query.isTriggered and len(query.string) > 0:
items = search(query.string)
return items
return [Item(icon=__icon__, text='GitHub repos')]
| true |
f61ae0da8cbe848d21b9ae2358a3134ec9b2061a | Python | OndreWilliams/team-stella | /app/api/review_routes.py | UTF-8 | 3,307 | 2.578125 | 3 | [] | no_license | from flask import Blueprint, request, Response
from app.models import Product, Review, db
from app.forms import ReviewForm
from flask_login import current_user
review_routes = Blueprint("reviews", __name__)
def validation_errors_to_error_messages(validation_errors):
"""
Simple function that turns the WTForms validation errors into a simple list
"""
errorMessages = []
for field in validation_errors:
for error in validation_errors[field]:
errorMessages.append(f"{field} : {error}")
return errorMessages
# /api/reviews
@review_routes.route('/')
def get_all_reviews():
reviews = Review.query.all()
return {"reviews": [review.to_dict() for review in reviews]}
@review_routes.route('', methods=['POST'])
def add_new_review():
form = ReviewForm() #meta={'csrf': False})
form['csrf_token'].data = request.cookies['csrf_token']
if current_user.is_authenticated:
userId = current_user.to_dict()
print(userId)
form['userId'].data = userId['id']
if form.validate_on_submit():
review = Review(
userId=form.data['userId'],
productId=form.data['productId'],
rating=form.data['rating'],
review=form.data['review']
)
db.session.add(review)
db.session.commit()
return review.to_dict()
return {'errors': validation_errors_to_error_messages(form.errors)}, 401
return {'errors': ['Unauthorized']}
@review_routes.route('/<id>')
def get_one_review(id):
review = Review.query.get(id)
return {"reviews": review.to_dict()}
@review_routes.route('/<id>', methods=['PUT'])
def modify_review(id):
form = ReviewForm() #meta={'csrf': False})
form['csrf_token'].data = request.cookies['csrf_token']
if current_user.is_authenticated:
print("enters authenticated")
review = Review.query.get(id)
review_user = str(review.user_id)
if current_user.get_id() == review_user:
print("enters user = users")
form['userId'].data = review_user
form['productId'].data = review.product_id
if form.validate_on_submit():
print("enters form validated")
review.review = form.data['review']
review.rating = form.data['rating']
db.session.add(review)
db.session.commit()
return review.to_dict()
return {'errors': validation_errors_to_error_messages(form.errors)}, 401
return {'errors': ['Unauthorized']}, 403
return Response("You must be logged in", 401)
@review_routes.route('/<id>', methods=['DELETE'])
def delete_review(id):
if current_user.is_authenticated:
review = Review.query.get(id)
review_user = str(review.user_id)
if current_user.get_id() == review_user:
db.session.delete(review)
db.session.commit()
return review.to_dict()
return Response("User is not authorized to Delete this review", 401)
# print(f"Review user is {review_user}")
# print(f"Current user is {current_user.get_id()}")
# print(review_user==current_user.get_id())
return Response("You must be logged in", 401)
| true |
a583a3711df131f074b8d1991dc2a7f7aad60a9f | Python | slavc/test | /uicc/uicc.py | UTF-8 | 2,173 | 2.59375 | 3 | [] | no_license | #!/usr/bin/python
import serial # PySerial
import getopt
import sys
BIT_1 = 1 << 0
BIT_2 = 1 << 1
BIT_3 = 1 << 2
BIT_4 = 1 << 3
BIT_5 = 1 << 4
BIT_6 = 1 << 5
BIT_7 = 1 << 6
BIT_8 = 1 << 7
class UICC:
def __init__(self, path='/dev/ttyUSB0'):
self._s = serial.Serial(path, 9600, timeout=1, rtscts=0, dsrdtr=0, xonxoff=0)
self._s.rts = False
self._s.dtr = False
self._s.dtr = True
self._do_ATR()
self._do_PPS()
def _do_ATR(self):
atr = self._s.read(33) # TS + up to 32 bytes
if len(atr) == 0:
raise Exception("failed to read UICC's Answer-to-Reset")
print 'ATR: %s' % atr.encode("hex")
self._atr = [ord(x) for x in atr]
if self._atr[0] != 0x3b:
raise Exception("FIXME: can't deal with indirect convention")
def _do_PPS(self):
if self._is_TA2_present():
raise Exception("FIXME: TA2 present, can't use default mode")
pps = "\xff\x00\xff"
self._s.write(pps)
data = self._s.read(32)
print "PPS response: %s" % data.encode("hex")
#if data != pps:
# raise Exception("PPS failed, got response %s" % data.encode("hex"))
def _is_TA2_present(self):
if not (self._atr[1] & BIT_8):
return False
td_pos = 0
for bit in (BIT_5, BIT_6, BIT_7, BIT_8):
if self._atr[1] & bit:
td_pos += 1
if self._atr[td_pos+1] & BIT_5:
return True
return False
def STATUS(self):
self._s.write("\x80\xf2\x00\x00\x00")
return self._s.read(0xfe)
def _usage():
print "usage: %s </path/to/device>" % sys.argv[0]
print ""
print "Driver program for a USB UICC (GSM/LTE SIM card) adapter."
if __name__ == '__main__':
opts, args = getopt.getopt(sys.argv[1:], "h")
for opt in opts:
if opt[0] == '-h':
_usage()
sys.exit(0)
else:
_usage()
sys.exit(1)
if len(args) == 0:
_usage()
sys.exit(1)
path = args[0]
uicc = UICC(path)
data = uicc.STATUS()
print data.encode("hex")
| true |
9fcc3c9cba55670bf1d122e8a830447d8af679c0 | Python | nibao/webtest | /Python/gitbook/test6-2.py | GB18030 | 406 | 3.453125 | 3 | [] | no_license | #ѯһַǷһַһ&&ʶPythonؼ
import keyword
str1=raw_input('please input your string:')
str2='fdfdhabcshdshjkabcshdjsj'
length=len(str2)+1
result=str2.count(str1,0,length)
if not keyword.iskeyword(str1):
if result:
print 'Ŷ'
else:
print 'Ŷ'
else:
print 'Ǹ'+str1+'ϵͳؼ'
| true |
fe9c9d61082e3e5bb6907e5ef3d4c1d844376964 | Python | Dyrits/COMPUTER-SCIENCE-CAREER-PATH | /01 - Introduction to Programming/Create Purchasing Information and Receipts for Lovely Loveseats/script.py | UTF-8 | 1,007 | 3.125 | 3 | [] | no_license | lovely_loveseat_description = "Lovely Loveseat. Tufted polyester blend on wood. 32 inches high x 40 inches wide x 30 inches deep. Red or white."
lovely_loveseat_price = 254.0
stylish_settee_description = "Stylish Settee. Faux leather on birch. 29.50 inches high x 54.75 inches wide x 28 inches deep. Black."
stylish_settee_price = 180.5
luxurious_lamp_description = "Luxurious Lamp. Glass and iron. 36 inches tall. Brown with cream shade."
luxurious_lamp_price = 52.15
sales_tax = 0.088
customer_one_total = 0
customer_one_itemization = ""
customer_one_total += lovely_loveseat_price
customer_one_itemization += "\n" + lovely_loveseat_description
customer_one_total += luxurious_lamp_price
customer_one_itemization += "\n" + luxurious_lamp_description
customer_one_tax = customer_one_total * sales_tax
customer_one_total += customer_one_tax
customer_one_total = round(customer_one_total, 2)
print("Customer One Items:" + customer_one_itemization)
print(f"Customer One Total: {customer_one_total}€") | true |
9eeffc3f8af83ccba56a6ce17b198eb6dc7e95bd | Python | samcheck/Scripts | /py3/dedupe.py | UTF-8 | 1,147 | 3.265625 | 3 | [
"MIT"
] | permissive | import os
import sys
import hashSHA1
def find_dupe(folder):
dupes = {}
for root, subdir, files in os.walk(folder):
for filename in files:
path = os.path.join(root, filename)
f_hash = hashSHA1.hashSHA1(path)
if f_hash in dupes:
dupes[f_hash].append(path)
else:
dupes[f_hash] = [path]
return dupes
def join_dicts(dict_1, dict_2):
for key in dict_2.keys():
if key in dict_1:
dict_1[key] = dict_1[key] + dict_2[key]
else:
dict_1[key] = dict_2[key]
def print_dupes(dict_1):
results = list(filter(lambda x: len(x) > 1, dict_1.values()))
if len(results) > 0:
print('Dupes:')
for result in results:
print('='*80)
for subresult in result:
print('{}'.format(subresult))
else:
print('No dupes')
if __name__ == '__main__':
if len(sys.argv) > 1:
dupes = {}
folders = sys.argv[1:]
for i in folders:
if os.path.exists(i):
join_dicts(dupes, find_dupe(i))
print_dupes(dupes)
| true |
e197676ed461ff6cf89bc81cf1bc3a887263f1e1 | Python | ajayvenkat10/Competitive | /rep_cipher.py | UTF-8 | 215 | 3 | 3 | [] | no_license | n = int(input())
encrypted = input()
count = 1
start = 0
ans = ""
while(start<n):
end = start + count
word = encrypted[start:end]
ans = ans + word[0]
count = count+1
start = end
print(ans)
| true |
8481aabf72ff906e21db1c2d84de8a30eb365481 | Python | Sowing/Algo_Trader | /API.py | UTF-8 | 2,131 | 2.84375 | 3 | [] | no_license | #!/usr/bin/env python3
import requests
import json
import sqlite3
from datetime import timedelta, date
def daterange(start_date, end_date):
for n in range(int ((end_date - start_date).days)):
yield start_date + timedelta(n)
def get_currency_data (URL, SOURCE, API_KEY, TYPE, CURRENCY, *args):
API_KEY = '?access_key=' + API_KEY
SOURCE = '&source=' + SOURCE
if CURRENCY != []:
CURRENCY = '¤cies= ' + ','.join(CURRENCY)
else:
CURRENCY = ''
TYPE = TYPE
DATE = ''
if TYPE == 'historical':
DATE = '&date=' + args[0]
full_url = URL + TYPE + API_KEY + DATE + SOURCE + CURRENCY + '& format = 1'
#print(full_url)
data = requests.get(full_url).text
return data
def insert_historical_data (SOURCE, CURRENCY, DATE, RATE):
connection = sqlite3.connect('algoforexdb.db')
cursor = connection.cursor()
try:
cursor.execute(''' INSERT INTO source_target_table (source, target) values (?,?)''', (SOURCE, CURRENCY))
connection.commit()
except:
pass
st_id = cursor.execute(''' SELECT st_id from source_target_table where source = ? and target = ? ''', (SOURCE, CURRENCY)).fetchone()[0]
#print(st_id)
cursor.execute(''' INSERT INTO price_table(st_id, timestamp, price) values (?, ?, ?) ''', (st_id, DATE, RATE))
connection.commit()
if __name__ == '__main__':
USER_ID = 1
URL = 'http://apilayer.net/api/'
SOURCE = 'USD'
CURRENCY = []
API_KEY = '94edd2a08332c5180f5271466a60d760' #'45d4584351c4a10188d67c228f22b2a9'
TYPE = 'historical'
start_date = date(2015, 7, 3)
end_date = date(2017, 7, 4)
for single_date in daterange(start_date, end_date):
DATE = single_date.strftime("%Y-%m-%d")
print(DATE)
data = get_currency_data (URL, SOURCE, API_KEY, TYPE, CURRENCY, DATE)
data = json.loads(data)
for key, value in data['quotes'].items():
insert_historical_data(key[:3], key[3:], DATE, value)
#print(key[:3], key[3:], DATE, value)
#print(data['quotes'])
#print(json.dumps(json.loads(data), indent=4, sort_keys=True))
| true |
30ce57c89d4e279d2ce66856b7c3692766505afe | Python | clauden/jsongraph | /graphyaml.py | UTF-8 | 4,751 | 2.96875 | 3 | [] | no_license | from __future__ import print_function
import yaml
import uuid
import argparse
import sys
import networkx as nx
import matplotlib.pyplot as mpl
def trace(*s):
if trace_level > 0:
print("".join(map(str, s)))
def getlabel(node):
trace( "getlabel: {0}".format(dict(node)))
l = ""
try:
t = node['type']
except:
t = None
try:
k = node['key']
except:
k = None
try:
v = str(node['value'])
except:
v = None
if t:
l = l + "{0} ".format(t)
if k:
l = l + "[{0}]".format(k)
if v:
l = l + v
trace( "label", l)
return l
def dump_graph(g):
dump(g, 0)
#
# n is a node...
#
def dump(graph, node):
print( 'DUMP: ', node, graph.node[node])
for e in graph.out_edges(node, data=True):
print( ' EDGE: ', e)
for e in graph.out_edges(node, data=True):
dump(graph, e[1])
#
# Assume that toplevel object is always a dict
# Returns the graph
#
def toplevel_traverse(data):
trace( "toplevel_traverse({0} [{1}])".format(data, type(data)))
# failure conditions
if data is None:
raise "toplevel object is None"
### if type(data) is not dict:
### raise "toplevel object isn't a dict"
# seed the graph
graph = nx.DiGraph()
graph.add_node(0)
_root = traverse(graph, data)
graph.add_edge(0, _root)
return graph
#
# data is a list, dict, or stringlike
#
def traverse(graph, data, name=''):
trace( "traverse({0}, {1}, {2} [{3}])".format(graph, data, type(data), name))
node_id = str(uuid.uuid1())
if type(data) is dict:
graph.add_node(node_id, {'type':'DICT', 'value':name})
for key in data:
trace( "traverse key {0}".format(key))
key_id = "{0}_DICT_{1}".format(node_id, key)
graph.add_node(key_id, {'type':'KEY', 'key':key})
graph.add_edge(node_id, key_id)
trace( "added key node {0}".format(graph.node[key_id]))
g = nx.DiGraph()
trace( "about to traverse {0} [{1}]".format(data[key], type(data[key])))
_root = traverse(g, data[key])
graph.add_nodes_from(g.nodes(data=True))
graph.add_edges_from(g.edges(data=True))
# build edge from current root to the new root
graph.add_edge(key_id, _root)
trace( "returning from dict: {0}".format(g))
elif type(data) is list:
graph.add_node(node_id, {'type':'LIST', 'value':name, 'top':'yes'})
n = 0
for element in data:
list_id = "{0}_LIST_{1}".format(node_id, n)
graph.add_node(list_id, {'type':'ELEMENT', 'value':n})
n = n + 1
graph.add_edge(node_id, list_id)
g = nx.DiGraph()
_root = traverse(g, element)
graph.add_nodes_from(g.nodes(data=True))
graph.add_edges_from(g.edges(data=True))
graph.add_edge(list_id, _root)
trace( "returning from list: {0}".format(g.nodes()))
else:
# assume string-like...
graph.add_node(node_id, {'type':'VALUE', 'value':data})
trace( "added value node {0}".format(graph.node[node_id]))
trace( "added node {0}".format(graph.node[node_id]))
return node_id
#
# main begins
#
arg_ns = None
arg_p = argparse.ArgumentParser(description='yaml to dot')
arg_p.add_argument('input_file', nargs='?', type=argparse.FileType('r'),
default=sys.stdin)
arg_p.add_argument('-o', '--output-file', action='store', type=argparse.FileType('w'),
default=sys.stdout, required=False)
arg_p.add_argument('-t', '--trace', action='count', required=False)
arg_p.add_argument('-g', '--graph-debug', action='store_true', required=False)
try:
arg_ns = arg_p.parse_args()
except IOError as ioe:
print(ioe, file=sys.stderr)
sys.exit(1)
print("===args")
print(arg_ns.input_file)
print(arg_ns.output_file)
print(arg_ns.trace)
print(arg_ns.graph_debug)
print("===end args")
input_file = arg_ns.input_file
output_file = arg_ns.output_file
trace_level = arg_ns.trace
graph_debug = arg_ns.graph_debug
data = None
# assume input_file is already open...
data = yaml.load(input_file)
# with open(input_file, "r") as f:
# data = yaml.load(f)
# trace( data)
trace(data)
G = toplevel_traverse(data)
if graph_debug:
dump_graph(G)
l = {}
for t in G.nodes(data=True):
_node = t[0]
_data = t[1]
G.node[_node]['label'] = getlabel(_data)
trace( "made labels", l)
dot = str(nx.to_agraph(G))
# assume output_file is already open...
output_file.write(dot)
# with open(output_file, "w") as f:
# f.write(dot)
# if write_file:
# with open("out.dot", "w") as f:
# f.write(dot)
# else:
# print(dot)
"""
l = {}
for t in G.nodes(data=True):
_node = t[0]
_data = t[1]
G.node[_node]['label'] = getlabel(_data)
trace( "made labels", l)
pos = nx.spring_layout(G)
nx.draw(G, pos)
nx.draw_networkx_labels(G, pos, labels = l)
mpl.show()
"""
| true |
ca4075b45822533c5ba2458f3f4b58fdecd0e8a4 | Python | zkandroid/opencvlearn | /src/img_pixel.py | UTF-8 | 3,135 | 2.78125 | 3 | [] | no_license | #coding;utf-8
import cv2
import numpy as np
def print_pixel_values(img):
print('img[0:100,0:100]',img[0:100,0:100])
px = img[100,100]
print('img[100,100] pixel values bgr',px)
#blue = img[100,100,0]
blue = img.item(100,100,0)#快
print('img[100,100] pixel values of blue',blue)
green = img[100,100,1]
print('img[100,100] pixel values of green',green)
red = img[100,100,2]
print('img[100,100] pixel values of red',red)
cv2.imshow('orgimg',img)
if cv2.waitKey(0) == 27 :
cv2.destroyWindow(orgimg)
def change_pixel_values(img):
px = img[10,10]
print('img [10,10] pixel values bgr',px)
red = img.item(10,10,2)
print('img[100,100] pixel values of red',red)
img.itemset((10,10,2),100)
print('img set red 100',img.item(10,10,2))
img.itemset((10,10,0),100)
img.itemset((10,10,1),100)
print('img set bgr 100',img[10,10])
img[20:90,20:90] = [0,0,0]
cv2.imshow('chaimgblack',img)
img[20:90,20:90] = [255,0,0]
cv2.imshow('chaimgred',img)
img[20:90,20:90] = [0,0,255]
cv2.imshow('chaimgblue',img)
img[:,:,2] = 0
cv2.imshow("allred = 0",img)
img[:,:,1] = 0
img[:,:,0] = 0
cv2.imshow("all = 0",img)
if cv2.waitKey(0) == 27 :
cv2.destroyAllWindows()
def add_img():
# Load two images
img1 = cv2.imread('/home/ly/opencvtest/opencvlearn/image/back.jpg')
img1 = cv2.resize(img1,None,fx = 1,fy = 2)
#img2 = cv2.imread('/home/zk/opencvtest/opencvlearn/image/opencvlogo.png')
img2 = cv2.imread('/home/ly/opencvtest/opencvlearn/image/mianju.jpg')
img2 = cv2.resize(img2,None,fx = 0.5,fy = 0.5)
#print(img2)
# I want to put logo on top-left corner, So I create a ROI
rows,cols,channels = img2.shape
print('img2.shape',img2.shape)
print('img.size',img2.size)
print('img.dtype',img.dtype)
roi = img1[50:rows+50, 50:cols+50 ]
# Now create a mask of logo and create its inverse mask also
img2gray = cv2.cvtColor(img2,cv2.COLOR_BGR2GRAY)
print('img2gray,shape',img2gray.shape)
#ret, mask = cv2.threshold(img2gray, 10, 255, cv2.THRESH_BINARY)#大于10的设置为0(黑色),小于设置为255
ret, mask = cv2.threshold(img2gray, 120, 255, cv2.THRESH_BINARY)#大于10的设置为0(黑色),小于设置为255
mask_inv = cv2.bitwise_not(mask)
# Now black-out the area of logo in ROI
img1_bg = cv2.bitwise_and(roi,roi,mask = mask_inv)
# Take only region of logo from logo image.
img2_fg = cv2.bitwise_and(img2,img2,mask = mask)
# Put logo in ROI and modify the main image
dst = cv2.add(img1_bg,img2_fg)
print('dst.shape',dst.shape)
img1[50:rows+50, 50:cols+50 ] = dst
cv2.imshow('res',img1)
cv2.imshow('img1_bg',img1_bg)
cv2.imshow('img2_fg',img2_fg)
cv2.imshow('img2',img2)
#cv2.imshow('dst',dst)
#cv2.imshow('img2',img2)
#cv2.imshow('mask',mask)
cv2.waitKey(0)
cv2.destroyAllWindows()
if __name__ == "__main__":
img = cv2.imread("/home/ly/opencvtest/opencvlearn/image/green.jpg")
#print_pixel_values(img)
#change_pixel_values(img)
add_img()
| true |
ae4b7071f044fa8ec39799f66c8d57b8a18a13d9 | Python | NicoBrun/cs433-machine-learning | /Project 1/model_4jets.py | UTF-8 | 10,697 | 2.796875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
from help_functions import calculate_loss,standardize, logistic_regression,reg_logistic_regression
from proj1_helpers import load_csv_data,load_test_csv,predict_labels,create_csv_submission
data_path = "train.csv"
name_error_image = "valid_train_error_with_thresh.png"
seed = 1
lambda_ = 0.001
gamma = 0.00001
max_iter = 30001
iter_step = 200 #to plot validation and training error
""" returns the columns according to what operations have
to be done on them in order to get the best model. operations
depend on the jet """
def get_columns(i):
col_to_delete = [22] # almost constants values
col_log = [0, 1, 2, 3, 4, 5, 8, 9, 10, 13, 16, 19, 21, 23, 26, 29]
col_sqrt = [0, 13, 16, 21, 23, 26, 29]
col_threshold = [11, 12]
col_nothing_max = [6, 14, 17, 24, 27]
col_nothing_norm = [7]
col_distance = [(15,18),(20,25),(18,28),(14,17),(15,25),(15,28),(18,20),(18,25),(18,28),(20,28)]
col_pow_2 = [3]
col_pow_3 = [19]
col_pow_5 = []
if (i == 0):
col_to_delete = [4, 5, 6, 12, 22, 23, 24, 25, 26, 27, 28, 29]
col_log = [0, 1, 2, 3, 8, 9, 10, 13, 16, 19, 21]
col_sqrt = [0, 13, 16, 21]
col_threshold = [11]
col_nothing_max = [14, 17]
col_nothing_norm = [7]
col_distance = [(15,18),(14,17),(18,20)]
col_pow_2 = []
col_pow_3 = []
col_pow_5 = []
#20
elif (i == 1):
col_to_delete = [4, 5, 6, 12, 22, 26, 27, 28]
col_log = [0, 1, 2, 3, 8, 9, 10, 13, 16, 19, 21, 23, 29]
col_sqrt = [0, 13, 16, 21, 23, 29]
col_threshold = [11]
col_nothing_max = [14, 17, 24]
col_nothing_norm = [7]
col_distance = [(15,18),(20,25),(14,17),(15,25),(18,20),(18,25)]
col_pow_2 = [3]
col_pow_3 = [19]
col_pow_5 = []
elif (i == 3):
col_pow_2 = []
col_pow_3 = [8, 19]
col_pow_5 = [3]
return col_to_delete, col_log, col_sqrt, col_threshold, col_nothing_max, col_nothing_norm, col_distance, col_pow_2, col_pow_3, col_pow_5
""" process each column according to its distribution
to transform it into a normal distribution and then
standardize it to have nice ranges """
def data_processing(data_to_process, jet, train = False, means = 0, stds = 0 ):
data_processed = data_to_process
col_to_delete, col_log, col_sqrt, col_threshold, col_nothing_max, col_nothing_norm, col_distance, col_pow_2, col_pow_3, col_pow_5 = get_columns(jet)
#set first column values to mean where it was -999
first_col = data_processed[:, 0]
flag_col = np.zeros((len(first_col), 1))
pos_value = first_col[first_col > 0]
flag_col[first_col > 0] = 1
first_col[first_col < 0] = np.mean(pos_value)
first_col = np.reshape(first_col,(len(first_col),1))
# apply square root to corresponding columns
data_sqrt = data_processed[:,col_sqrt]
data_sqrt[data_sqrt >= 0] = np.sqrt(data_sqrt[data_sqrt >= 0])
#separate corresponding columns according to a treshold of 0
data_thresh = data_processed[:,col_threshold]
data_thresh[:,0][data_thresh[:,0] > 0] = 1
data_thresh[:,0][data_thresh[:,0] <= 0] = -1
if(data_thresh.shape[1] > 1):
data_thresh[:,1][data_thresh[:,1] > 0.5] = 1
data_thresh[:,1][data_thresh[:,1] <= 0.5] = -1
# apply log to corresponding columns
data_log = data_processed[:,col_log]
data_log[data_log > 0] = np.log(data_log[data_log > 0])
data_log[data_log == 0] = np.mean(data_log[data_log > 0])
# divide by max to get in a [0, 1] range
data_max = data_processed[:,col_nothing_max]
max = np.amax(data_max,axis = 0)
data_max /= max
# get the columns where there are no operations to do
data_norm = data_processed[:,col_nothing_norm]
# process features that go together
columns_data_distance = []
for col_distance_index in range(len(col_distance)):
columns_data_distance.append(np.abs(data_processed[:,[col_distance[col_distance_index][0]]]-data_processed[:,[col_distance[col_distance_index][1]]]))
data_distance = np.concatenate(columns_data_distance,axis = 1)
# apply power
data_pow_2 = data_processed[:,col_pow_2]**2
data_pow_3 = data_processed[:,col_pow_3]**3
data_pow_5 = data_processed[:,col_pow_5]**5
# put new columns together
data_to_standardize = np.concatenate((first_col, data_sqrt, data_log, data_norm, data_distance,data_pow_2,data_pow_3,data_pow_5),axis = 1)
# standardize everything to have nice input data
mean = means
std = stds
if(train) :
mean = np.mean(data_to_standardize,axis = 0)
std = np.std(data_to_standardize,axis = 0)
data_to_standardize = standardize(data_to_standardize,mean,std)
data_processed_standardized = np.concatenate((data_to_standardize,data_thresh,data_max,flag_col,np.ones((data_to_process.shape[0], 1))), axis=1)
return data_processed_standardized, mean, std
""" returns an array of 4 datas sets splitted according to their jet """
def separate_from_jet(data):
indexes = [[], [], [], []]
for ind, item in enumerate(data):
indexes[int(item[22])].append(ind)
return indexes
""" separates data into train/test sets according to ratio """
def split_data(ratio, y_binary, input_data, index, seed = 1):
np.random.seed(seed)
#index = np.arange(len(input_data))
split = int(np.ceil(ratio*len(index)))
np.random.shuffle(index)
y_valid = y_binary[index[:split]]
y_train = y_binary[index[split:]]
x_valid = input_data[index[:split]]
x_train = input_data[index[split:]]
return y_valid, y_train, x_valid, x_train
""" returns the predicted y datas according to jet """
def prediction_solutions(test_path, ws, means, stds):
input_test, ids = load_test_csv(test_path)
#features processing
indexes_test = separate_from_jet(input_test)
sols = []
for i in range(0,4):
x_test = input_test[indexes_test[i]]
#process the first column with adding a flag
data_test, _, _ = data_processing(x_test, i, train= False, means= means[i], stds = stds[i])
#prediction
y_test = predict_labels(ws[i], data_test, threshes[i])
y_test[y_test == 0] = -1
sol = np.concatenate((y_test,np.reshape(ids[indexes_test[i]],(len(y_test),1))), axis = 1)
if(i == 0):
sols.append(sol)
else :
print(sols[0].shape)
sols[0] = np.concatenate((sols[0],sol),axis = 0)
return sols
""" add the subplot to figure. Subplot shows the train and validation
error for each jet """
def update_figure(figure, iter_val_errors, iter_train_errors, jet):
subplots =[221,222,223,224]
ax = figure.add_subplot(subplots[jet])
ax.plot(np.linspace(0,max_iter,num = np.ceil(max_iter/iter_step)),iter_val_errors, 'b', label = 'v')
ax.plot(np.linspace(0, max_iter, num=np.ceil(max_iter / iter_step)),iter_train_errors, 'g', label='t')
ax.legend(loc='upper right')
ax.set_title("jet {i}".format(i=jet))
""" determines the threshold for separating output
that gives the smallest error """
def best_threshold(w, data_train, y_train):
thresholds = np.linspace(-5,3,200)
best_thresh = 0
min_error = 1
for thresh in thresholds :
pred_thr = predict_labels(w,data_train,thresh)
err =np.count_nonzero(np.reshape(y_train, (len(y_train), 1)) - pred_thr)/len(y_train)
if(err <= min_error):
min_error = err
best_thresh = thresh
return best_thresh
print("début")
#load data and separate it into 4 according to their jet
y_binary,input_data,ids = load_csv_data(data_path)
indexes = separate_from_jet(input_data)
ws = []
means = []
stds = []
global_error = 0
fig = plt.figure()
st = fig.suptitle("Train and validation error")
threshes = []
for i in range(4):
col_to_delete, col_log, col_sqrt, col_threshold, col_nothing_max, col_nothing_norm, col_distance, col_pow_2, col_pow_3, col_pow_5 = get_columns(i)
#train/test ratio is 0.75/0.25 for now
y_valid, y_train, x_valid, x_train = split_data(0.25, y_binary, input_data, indexes[i])
data_train, mean, std = data_processing(x_train, i, train = True)
means.append(mean)
stds.append(std)
data_valid, _, _ = data_processing(x_valid, i, train = False, means = mean, stds = std)
#logistic regression
w,loss_train,iter_val_errors,iter_train_errors = logistic_regression(y_train,
data_train,
np.zeros((3+len(col_sqrt)+
len(col_log)+
len(col_nothing_max)+
len(col_threshold)+
len(col_nothing_norm)+
len(col_distance)+
len(col_pow_2)+
len(col_pow_3)+
len(col_pow_5) ,1)),
max_iter,
gamma,
data_valid,
y_valid,
iter_step)
ws.append(w)
print("end training")
update_figure(fig, iter_val_errors, iter_train_errors, i)
loss_valid = calculate_loss(y_valid,data_valid,w)
best_thresh = best_threshold(w, data_train, y_train)
threshes.append(best_thresh)
print("for jet {i} the best thresh is {t}".format(i=i,t=best_thresh))
training_error = np.count_nonzero(
predict_labels(w, data_train, best_thresh) - np.reshape(y_train, (len(y_train), 1))) / len(y_train)
pred = predict_labels(w,data_valid,best_thresh)
nnz = np.count_nonzero(np.reshape(y_valid,(len(y_valid),1))-pred)
validation_error = nnz / len(y_valid)
global_error += (len(y_valid)+len(y_train)) * validation_error #est-ce vraiment juste de compter le train ?
print("For jet {i} loss ={l} validation_error = {e} and training_error = {t}".format(i=i, l = loss_valid, e = validation_error,t=training_error))
global_error /= len(y_binary)
print("global error is {e}".format(e = global_error))
fig.tight_layout()
st.set_y(0.95)
fig.subplots_adjust(top = 0.85)
fig.savefig(name_error_image)
sols = prediction_solutions("test.csv", ws, means, std)
create_csv_submission(sols[0][:,1],sols[0][:,0],"4_models_with_thresh.csv")
| true |
6cdb2b1cd0a16779b73a6b48bca1636d93961a0e | Python | ijon9/SoftDev2 | /16_listcomp/listComp.py | UTF-8 | 1,120 | 3.46875 | 3 | [] | no_license | #Isaac Jon
#SoftDev2 pd7
#K16 -- Do You Even List?
#2019-04-12
# List Comprehension format
# [expression for expression if <statements>]
def pwChecker(pw):
upper = [x for x in pw if x.isupper()]
lower = [x for x in pw if x.islower()]
num = [x for x in pw if x.isdigit()]
if len(upper) > 0 and len(lower) > 0 and len(num) > 0:
return True
return False
#Test
print(pwChecker("AsadSdfs32")) #True
print(pwChecker("aasdfajlkja2")) #False
print(pwChecker("ASDFAJLK2")) #False
print(pwChecker("AASDFsdfasda")) #False
SPECIAL_CHARS = ".?!&#,;:-_*"
def pwStrength(pw):
upper = [x for x in pw if x.isupper()]
lower = [x for x in pw if x.islower()]
num = [x for x in pw if x.isdigit()]
special = [x for x in pw if x in SPECIAL_CHARS]
score = 10
if len(special) == 0:
score -= 3
if len(upper) == 0:
score -= 2
if len(lower) == 0:
score -= 2
if len(num) == 0:
score -= 2
return score
#Test
print(pwStrength("Ab3.")) #10
print(pwStrength("Ab3")) #7
print(pwStrength("Ab")) #5
print(pwStrength("A")) #3
print(pwStrength("")) #1
| true |
c468cf97bb4deb37d1e064f9bdd570c93ed08126 | Python | danielkocher/advanced-image-processing-and-computer-vision-ps | /src/irs.py | UTF-8 | 4,212 | 2.921875 | 3 | [
"MIT"
] | permissive | ################################################################################
# IRS - Image Recognition System
# Advanced Image Processing & Computer Vision class at University of Salzburg.
#
# Author: Daniel Kocher
################################################################################
################################################################################
# Main file (entry point of the project).
################################################################################
from collections import defaultdict
import settings.settings as settings
import scaler.scaler as scaler
import kmeanspp.kmeanspp as kmeanspp
import feature_extraction.feature_extraction as fe
import bow.bow as bow
import scene_recognition.scene_recognition as scene_rec
#
def main ():
computed_feature_vectors = {} # used to avoid recomputation of SIFT features
try:
settings.init()
settings.check_settings()
settings.print_settings()
except NameError as ne:
print('NameError: {0}'.format(ne))
attributes = read_file(settings.filepaths['attributes'])
images = read_file(settings.filepaths['images'])
votes_tmp = read_file(settings.filepaths['votes'])
votes = split_entries(votes_tmp, ' ')
# check if min-max-scaler was already instantiated
# if so, open and use it
# otherwise, geneate min-max-scaler pickle file and use it
try:
used_scaler = scaler.open_if_exists()
print('[IRS] Using existing scaler pickle file')
except IOError as ioe:
print('[IRS] Scaler pickle file does not exist')
print('[IRS] Hence it will now be created (may take some time)')
used_scaler = scaler.create(images, computed_feature_vectors)
# check if k-means clustering was already done
# if so, open and use it
# otherwise, generate k-means clustering pickle file and use it
try:
used_kmeanspp = kmeanspp.open_if_exists()
print('[IRS] Using existing k-means++ pickle file')
except IOError as ioe:
print('[IRS] k-means++ pickle file does not exist')
print('[IRS] Hence this will now be created (may take some time)')
used_kmeanspp = kmeanspp.create(images, computed_feature_vectors, used_scaler)
#
ai_dict, aic_dict = map_images_to_attributes(attributes, images, votes)
# generate splits and train classifiers
classifiers = bow.learn_and_evaluate(attributes, ai_dict, aic_dict,
'symmetric', './', computed_feature_vectors, used_scaler, used_kmeanspp
)
print('Trained classifiers for {} attributes (10 each; total: {})'.format(
len(classifiers), get_total_classifier_count(classifiers)
))
# recognize scenes
scene_rec.learn_and_evaluate(used_scaler, used_kmeanspp, classifiers)
#
def get_total_classifier_count (classifiers):
total_classifier_count = 0
for attribute, classifier_list in classifiers.iteritems():
total_classifier_count += len(classifier_list)
return total_classifier_count
# Reads a given file.
# Returns a list of the read lines.
def read_file (path):
content = []
with open(path, 'r') as f:
for line in f:
content.append(line.strip())
return content
# Splits each element of a given list by a given delimiter and converts each
# entry to a float.
# Returns a list of lists with float entries (representing the votes)
def split_entries (l, delimiter):
l_new = []
for element in l:
l_new.append([float(i) for i in element.split()])
return l_new
# Maps images to attributes given the attributes list, images list and votes list.
# Assumes corresponding indices between the three lists.
# Returns two dictionaries:
# (1) maps attributes to images where the attribute is present
# (2) the complement of the first one (w.r.t. the all attributes/images)
def map_images_to_attributes (attributes, images, votes):
ai_dict = defaultdict(list)
aic_dict = defaultdict(list)
vote_bound = float(2)/float(3)
for image_index, image in enumerate(images):
for attribute_index, attribute in enumerate(attributes):
vote = votes[image_index][attribute_index]
if vote < vote_bound:
aic_dict[attribute].append(image)
else:
ai_dict[attribute].append(image)
return [ai_dict, aic_dict]
if __name__ == "__main__":
main()
| true |
44cc5bc40de99e99ddd9a44ad2bcce3b9a565e2a | Python | AntonDeMeester/udacity-numerai | /models/combiner.py | UTF-8 | 3,920 | 3.265625 | 3 | [
"MIT"
] | permissive | # Python imports
from abc import ABC
import logging
from typing import Iterable, Callable, List, Collection
# Data science imports
from pandas import DataFrame, Series
# Local imports
from data_processing.data_loader import DataLoader
LOGGER = logging.getLogger(__name__)
class Combiner(ABC):
"""
A base class for any Combiners
"""
def __init__(self, score_function: Callable[[DataFrame, DataFrame], float]):
self.score_function = score_function
def combine(self, lables: DataFrame, predictions: List[DataFrame]) -> List[float]:
"""
Combines predictions to provide a better aggregate prediction.
Arguments:
* labels: The correct data
* predictions: The list of predictions by the models
Returns:
* A list of float with the individual weights. Sum of the weights will be 1
"""
return NotImplemented
class NaiveCombiner(Combiner):
"""
This combines models in a dumb way to optimise multiple datasets.
"""
def __init__(
self,
score_function: Callable[[DataFrame, DataFrame], float],
number_of_steps: int = 10,
):
"""
Initialises the Naive Combiner.
Arguments:
* All arguments for the BaseCombiner
* number_of_steps: The number of steps to use. Default 10.
"""
assert number_of_steps >= 1, "Step size must be at least 1"
super().__init__(score_function)
self.number_of_steps = number_of_steps
def combine(self, labels: DataFrame, predictions: List[DataFrame]) -> List[float]:
"""
Combines a number of output predictions to provide a weighted output.
This is a naivie combiner that assigns weights from 0 until and including step per prediction.
Arguments:
* labels: The correct data
* predictions: The list of predictions by the models
Returns:
* A list of float with the individual weights. Sum of the weights will be 1
"""
LOGGER.info("Starting to combine")
number_of_predictions = len(predictions)
total_number_of_steps = (self.number_of_steps + 1) ** (number_of_predictions)
indexes = predictions[0].index
columns = predictions[0].columns
best_score: float = -1
best_weights: List[float] = []
best_Y: Series = None
for i in range(1, total_number_of_steps + 1):
weights = self._convert_number_to_weights(
i, self.number_of_steps, number_of_predictions
)
Y_attempt = DataFrame(0, index=indexes, columns=columns, dtype="float64")
for j, test in enumerate(predictions):
Y_attempt += test * weights[j]
score = self.score_function(labels, Y_attempt)
if score > best_score:
best_score = score
best_weights = weights
best_Y = Y_attempt
LOGGER.info(
f"Got a new best score for a combination: {best_score} with weights {best_weights}"
)
LOGGER.info(
f"Ended the combination job: score {best_score} with weights {best_weights}"
)
return best_weights
def _convert_number_to_weights(
self, index: int, steps_per_prediction: int, number_of_predictions: int
):
weights = []
new_index = index
for i in range(number_of_predictions):
new_weight = (new_index % (steps_per_prediction + 1)) / (
steps_per_prediction + 1
)
weights.append(new_weight)
new_index = new_index // (steps_per_prediction + 1)
total_weight = sum(weight for weight in weights)
if total_weight != 0:
weights = [weight / total_weight for weight in weights]
return weights
| true |
344ec8651db90588851468dfc7438bf5944a0584 | Python | rbricheno/rassh | /rassh/api/send_commands.py | UTF-8 | 3,599 | 2.796875 | 3 | [
"MIT"
] | permissive | import time
import threading
import requests
import copy
from rassh.api.nonblocking_put_request import NonBlockingPutRequest
from rassh.config.config import Config
from rassh.datatypes import Grammar
from rassh.datatypes.well_formed_command import WellFormedCommand
class SendCommands(object):
"""This is the class you should import into your applications if you want to make requests to the rassh API in a
standard way, without having to worry about making HTTP requests yourself. Subclasses may hook in here to do
useful things like update your local database, e.g. to note that you are awaiting feedback from a request.
Typically you will want to send a batch of commands all at once when putting configuration. By default, you can
send these when instantiating a SendCommands and passing a list of request dictionaries as request_dict_list=[...].
Alternatively, if you only want to run one command at a time, instantiate a SendCommands with no arguments, and
call api_command to send each command individually."""
def __init__(self, put_request_dict_list=None):
self.grammar = self._my_grammar()
config_instance = Config()
self.config = config_instance.data
self.__outstanding_api_put_commands = put_request_dict_list
self.url_base = "http://" + self.config['api_host'] + ":" + str(self.config['api_port']) + "/"
if put_request_dict_list:
thread = threading.Thread(target=self.run, args=())
thread.daemon = True
thread.start()
def _my_grammar(self):
# This default Grammar is empty!
return Grammar()
def run(self):
outstanding_request_dict = {}
i = 0
for api_command_dict in self.__outstanding_api_put_commands:
outstanding_request_dict[i] = api_command_dict
i += 1
# Keep trying to send request until this dictionary is empty.
while True:
old_outstanding_request_dict = copy.deepcopy(outstanding_request_dict)
for key, api_command_dict in old_outstanding_request_dict.items():
if self.api_put_command(api_command_dict, True):
outstanding_request_dict.pop(key, None)
if not outstanding_request_dict:
break
time.sleep(120)
def api_get_command(self, request_dict):
cmd = WellFormedCommand(self.grammar, request_dict=request_dict)
send_url = self.url_base + cmd.url
if cmd.command_name:
try:
response = requests.get(send_url, data=cmd.payload)
if response.status_code == 200:
return response.content
except (requests.exceptions.HTTPError, requests.exceptions.Timeout):
return None
return None
def api_put_command(self, request_dict, blocking):
cmd = WellFormedCommand(self.grammar, request_dict=request_dict)
send_url = self.url_base + cmd.url
if cmd.command_name:
try:
if self._send_put_command(send_url, cmd.payload, blocking):
return True
except KeyError:
pass
return False
def _send_put_command(self, url, payload, blocking):
if blocking:
try:
requests.put(url, data=payload)
except (requests.exceptions.HTTPError, requests.exceptions.Timeout):
return False
return True
else:
NonBlockingPutRequest(url, payload)
return True
| true |
85653e3d0a5e30e6a1716d196de9429ab7a6a456 | Python | didierrevelo/holbertonschool-higher_level_programming-1 | /0x07-python-test_driven_development/2-matrix_divided.py | UTF-8 | 1,417 | 3.625 | 4 | [] | no_license | #!/usr/bin/python3
""" matrix divide method """
def matrix_divided(matrix, div):
""" matrix_divide
Args:
matrix ([list]): [is a list of lists]
div ([int or float])
Raises:
TypeError: [matrix must be a matrix (list of lists) of integers/floats]
TypeError: [matrix must be a matrix (list of lists) of integers/floats]
ZeroDivisionError: [division by zero]
TypeError: [div must be a number]
Returns:
[matrix]: [matrix[iterartor] / div]
"""
Err = {
1: "matrix must be a matrix (list of lists) of integers/floats",
2: "Each row of the matrix must have the same size"
}
if not isinstance(matrix, list):
raise TypeError(Err[1])
if len(matrix) == 0:
raise TypeError(Err[1])
if div == 0:
raise ZeroDivisionError("division by zero")
if not isinstance(div, int) and not isinstance(div, float):
raise TypeError("div must be a number")
for row in matrix:
if not isinstance(row, list):
raise TypeError(Err[1])
if len(row) == 0:
raise TypeError(Err[1])
if len(matrix[0]) is not len(row):
raise TypeError(Err[2])
for num in row:
if not isinstance(num, int) and not isinstance(num, float):
raise TypeError(Err[1])
return [[round(num / div, 2)for num in row]for row in matrix]
| true |
66c0a94681bba6fe0a474f5e92778564f0890fb4 | Python | cessor/gameoflife | /config.py | UTF-8 | 2,820 | 2.90625 | 3 | [
"MIT"
] | permissive | from collections import namedtuple
Resolution = namedtuple('Resolution', ['x', 'y'])
class Resolutions(object):
resolutions = [
(1920, 1200),
(1920, 1080),
(1680, 1050),
(1440, 900),
(1360, 768),
(1280, 800),
(1024, 640)
]
@classmethod
def parse(self, x, y):
if (x,y) not in self.resolutions:
resolutions = ', '.join(['%sx%s' % (a, b) for a,b in self.resolutions])
raise Exception('Resolution %s x %s not supported. Available resolutions: %s' % (x,y, resolutions) )
return Resolution(x, y)
class Color(object):
gray = (0.15, 0.15, 0.13, 1.0)
black = (0.0, 0.0, 0.0, 1.0)
white = (1.0, 1.0, 1.0, 1.0)
red = (1.0, 0.2, 0.0, 1.0)
orange = (1.0, 0.4, 0.0, 1.0)
yellow = (1.0, 0.9, 0.0, 1.0)
light_green = (0.4, 1.0, 0.0, 1.0)
green = (0.0, 1.0, 0.2, 1.0)
cyan = (0.0, 1.0, 0.4, 1.0)
light_blue = (0.0, 0.6, 1.0, 1.0)
blue = (0.0, 0.2, 1.0, 1.0)
purple = (0.4, 0.0, 1.0, 1.0)
pink = (1.0, 0.0, 0.8, 1.0)
@classmethod
def __colors(self):
return [key for key in self.__dict__.keys() if not key.startswith('_') and key != 'named']
@classmethod
def named(self, name):
if not hasattr(self, name):
colors = ', '.join(self.__colors())
raise Exception('Unknown color %s. Available colors are: %s' % (name, colors))
return getattr(self, name)
def try_parse(value):
try: return int(value)
except: return { 'true': True, 'false': False }.get(value.lower(), value)
def read_config():
with open('config.cfg', 'r') as cfg_file:
lines = cfg_file.readlines()
lines = [
line.strip().replace(' ', '').split('=')
for line in lines
if line.strip() and '=' in line
]
cfg = {key:try_parse(value) for key,value in lines}
return cfg
cfg = read_config()
NUM_CELLS = cfg.get('CELLS', 100)
RESOLUTION = Resolutions.parse(cfg.get('WINDOW_WIDTH', 1280), cfg.get('WINDOW_HEIGHT', 800))
limit = min(RESOLUTION)
PIXEL_PER_CELL = limit / NUM_CELLS
OFFSET_X = (RESOLUTION.x - (NUM_CELLS * PIXEL_PER_CELL)) / 2
OFFSET_Y = (RESOLUTION.y - (NUM_CELLS * PIXEL_PER_CELL)) / 2
SHOW_FULLSCREEN = cfg.get('FULLSCREEN', False)
SHOW_GRID = cfg.get('SHOW_GRID', True)
BACKGROUND_COLOR = Color.named(cfg.get('BACKGROUND_COLOR', 'black'))
GRID_BACKDROP_COLOR = Color.named(cfg.get('GRID_BACKDROP_COLOR', 'gray'))
GRID_LINE_COLOR = Color.named(cfg.get('GRID_LINE_COLOR', 'black'))
CELL_COLOR = Color.named(cfg.get('CELL_COLOR', 'green'))
CURSOR_COLOR = Color.named(cfg.get('CURSOR_COLOR', 'red')) | true |
019aab971ec44fecef6db3974683961cb85c3a4d | Python | diegojsk/MAP3121-Numerico-EP2-2019 | /teste_2.py | UTF-8 | 1,099 | 2.828125 | 3 | [] | no_license | import numpy as np
from main import *
import matplotlib.pyplot as plt
h = 0.01
ti = 0
tf = 2
def X_gab(t):
X = np.array([
np.exp(-t)*np.sin(t) + np.exp(-3*t)*np.cos(3*t),
np.exp(-t)*np.cos(t) + np.exp(-3*t)*np.sin(3*t),
-np.exp(-t)*np.sin(t) + np.exp(-3*t)*np.cos(3*t),
-np.exp(-t)*np.cos(t) + np.exp(-3*t)*np.sin(3*t)])
return X
if __name__ == "__main__":
np.set_printoptions(precision=3, suppress=True)
A = np.array([[-2, -1, -1, -2],
[1, -2, 2, -1],
[-1, -2, -2, -1],
[2, -1, 1, -2]]).astype(np.double)
X_0 = np.array([1, 1, 1, -1])
def F(t, x):
return np.matmul(A, x)
output, ts = runge_kutta(F, X_0, h, ti, tf)
depuracao(F, X_0, h, ti, tf, X_gab)
fig1, ax1 = plt.subplots()
ax1.plot(ts, output)
ax1.set_title("Resolução pelo método de Runge-Kutta")
output2 = [X_gab(t) for t in ts]
fig2, ax2 = plt.subplots()
ax2.plot(ts, output2)
ax2.set_title("Aplicação da solução analítica")
plt.show()
| true |
391a29cdbeb2202880cee2b77f3d2b9ef371604a | Python | AlexYangLong/Foundations-of-Python | /day016-day020/day019/fileclient.py | UTF-8 | 515 | 2.78125 | 3 | [] | no_license | from socket import socket
def main():
client = socket()
client.connect(('10.7.152.89', 9999))
print('连接服务器成功......')
filename = client.recv(1024).decode('utf-8')
print(filename)
file_len = int(client.recv(1024).decode('utf-8'))
print(file_len)
with open(filename, 'wb') as fw:
total = 0
while total < file_len:
fw.write(client.recv(1024))
total += 1024
print('图片接收成功')
if __name__ == '__main__':
main()
| true |
e78313df0c888037801b2b910e036b551d5f5415 | Python | khalop7a/TestPythonNTUCoder | /SoDep.py | UTF-8 | 127 | 3.546875 | 4 | [] | no_license | n = int(input())
kq = 0
while n != 0:
kq += n % 10
n = n // 10
if kq % 10 == 9:
print("YES")
else:
print("NO") | true |
32a36574e942cba912ab4aa26b0fc86eb7dc5f8a | Python | wangzheng62/pythonlesson | /list.py | UTF-8 | 533 | 3.515625 | 4 | [] | no_license | #creating list
number=[0,1,2,3,4]
#append()
number.append(5)
print number
number.append((6,7))
print number
l=[8,9,10]
number.append(l)
print number
#remove()
number.remove((6,7))
print number
number.remove(l)
print number
#insert(n,object)
number.insert(6,6)
print number
number.insert(8,8)
print number[7]
print number[-3:-1]
#pop()
number.pop()
print number[-3:-1]
#index()
print number.index(5)
print 5 in number
#reverse()
number.reverse()
print number
#sort()
number.sort()
print number
#extend()
number.extend(l)
print number
| true |
296708bf265085efe61f5f901c601011b3d1c133 | Python | andrewhead/netseq-proto | /proto/kivy/popup/PopupOverApp.py | UTF-8 | 724 | 3.140625 | 3 | [] | no_license | from kivy.app import App
from kivy.uix.popup import *
from kivy.uix.label import *
from kivy.uix.button import *
class PairSeqApp(App):
def build(self):
# 'size' below sets the size of this
# Get rid of 'size', add 'size_hint' as (1,1) to cover whole
# screen with our popup
popup = Popup(title='Test popup',
size_hint=(1, 1),
auto_dismiss=False)
# Set the popup's content to a 'close' button
popup.content = Button(text="Close me!")
popup.content.bind(on_press=popup.dismiss)
popup.open()
return 0
if __name__ == '__main__':
# Start the apps like this to make sure they don't exit immediately
PairSeqApp().run()
| true |
4a831589d84e39dbd4eea63f7cfc9c0c4d32cb66 | Python | Islandora-Image-Segmentation/dev-ops | /src/import_helper.py | UTF-8 | 4,173 | 2.578125 | 3 | [] | no_license | import os
import shutil
import sys
import tempfile
from glob import glob
from utils import mods_to_marcxml
class ImportHelper:
def __init__(self, ingest_dir: str = 'data/ingest', download_dir: str = 'data/download'):
self.ingest_dir = ingest_dir
self.download_dir = download_dir
self.papers = []
self.issues = []
self.pages = []
def load_dir(self):
items = (os.listdir(self.download_dir))
self.papers = []
self.issues = []
self.pages = []
for item in items:
col_split = item.split('_')
if len(col_split) > 1:
p_split = col_split[-1].split('-')
if item.startswith('newspapers:'):
self.papers.append(item)
elif len(p_split) > 1:
self.pages.append(item)
else:
self.issues.append(item)
def prep_papers_zip(self):
with tempfile.TemporaryDirectory() as tempDir:
self._save_papers_to_dir(tempDir)
shutil.make_archive(f'{self.ingest_dir}/newspapers', 'zip', tempDir)
def prep_papers_dir(self):
dest_dir = f'{self.ingest_dir}/newspapers'
os.mkdir(dest_dir)
self._save_papers_to_dir(dest_dir)
def _save_papers_to_dir(self, dest_dir):
for paper in self.papers:
shutil.copy(f'{self.download_dir}/{paper}/MODS.xml', f'{dest_dir}/{paper}.xml')
try:
tn_name = glob(f'{self.download_dir}/{paper}/TN.*')[0]
ext = tn_name[tn_name.rfind('.'):]
shutil.copy(tn_name, f'{dest_dir}/{paper}{ext}')
except IndexError:
print(f'Could not find thumbnail for: {paper}', file=sys.stderr)
def prep_papers_marc(self):
if not os.path.exists(f'{self.ingest_dir}/newspapers'):
os.mkdir(f'{self.ingest_dir}/newspapers')
for paper in self.papers:
marc = mods_to_marcxml(f'{self.download_dir}/{paper}/MODS.xml')
with open(f'{self.ingest_dir}/newspapers/{paper}.xml', 'bw') as f:
f.write(marc)
def prep_issues(self, method: str = 'dir'):
for issue_id in self.issues:
paper, issue = tuple(issue_id.split('_'))
dest_dir = f'{self.ingest_dir}/{paper}/{issue}'
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
shutil.copy(f'{self.download_dir}/{issue_id}/MODS.xml', dest_dir)
for page_id in self.pages:
paper, li = tuple(page_id.split('_'))
issue, page = tuple(li.split('-'))
dest_dir = f'{self.ingest_dir}/{paper}/{issue}/{page}'
print(dest_dir)
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
try:
shutil.copy(f'{self.download_dir}/{page_id}/JP2.jp2', dest_dir)
shutil.copy(f'{self.download_dir}/{page_id}/TN.jpg', dest_dir)
shutil.copy(f'{self.download_dir}/{page_id}/OCR.txt', dest_dir)
shutil.copy(f'{self.download_dir}/{page_id}/HOCR.html', dest_dir)
shutil.copy(f'{self.download_dir}/{page_id}/OBJ.tiff', dest_dir)
except FileNotFoundError:
try:
print(f'Could not find file: {self.download_dir}/{page_id}/OBJ.tiff', file=sys.stderr)
print(f'Trying {self.download_dir}/{page_id}/JP2.jp2', file=sys.stderr)
shutil.copy(f'{self.download_dir}/{page_id}/JP2.jp2', f'{dest_dir}/OBJ.jp2')
except FileNotFoundError:
print(f'Could not find file: Trying {self.download_dir}/{page_id}/JP2.jp2', file=sys.stderr)
if method == 'zip':
paper_names = set([issue_id.split('_')[0] for issue_id in self.issues])
for paper in paper_names:
paper_path = f'{self.ingest_dir}/{paper}'
if os.path.exists(paper_path):
print(f'Zipping: {paper_path}')
shutil.make_archive(paper_path, 'zip', paper_path)
shutil.rmtree(paper_path)
| true |
2c46c438f325a839b02035025f1124b2343486f5 | Python | nmrenyi/CodeDancePedia | /db-demo/ES_Evaluation/es_precision_recall.py | UTF-8 | 3,217 | 2.984375 | 3 | [] | no_license | """
Elastic Search Precision and Recall Evaluation
By RenYi
"""
import os
import numpy as np
import pandas as pd
from elasticsearch import Elasticsearch
from elasticsearch_dsl import Search
from tqdm import trange
def calc_precision(document, returned_docs, top_k):
"""
Args:
document: the ground truth doc
returned_docs: returned docs from elastic search
Returns:
precision (float) of this search
"""
if document in returned_docs[0:top_k]:
return 1 / top_k
# document is not in returned docs or len(returned_docs) == 0
return 0
def calc_recall(document, returned_docs, top_k):
"""
Args:
document: the related document (str)
there is only one related document in cmrc for each question
returned_docs: returned docs from elastic search
Returns:
recall (float) of this search
"""
if document in returned_docs[0:top_k]:
return 1
return 0
def get_precision_recall(search, question_list, document_list, top_k):
"""
Args:
search: search object in elastic search
question_list: questions
document_list: the specific document for the question
Returns:
precision_list: precision for the questions
recall_list: recall for the questions
"""
precision_list = list()
recall_list = list()
length = len(question_list)
for i in trange(length):
question = question_list[i]
document = document_list[i]
response = search.query("multi_match", query=question, fields=['title', 'content']) \
.filter("term", status=0) \
.execute()
# response = search.query("match", content=question) \
# .filter("term", status=0) \
# .execute()
returned_docs = [hit.content for hit in response]
precision_list.append(calc_precision(document, returned_docs, top_k))
recall_list.append(calc_recall(document, returned_docs, top_k))
return precision_list, recall_list
def main():
"""
main function for Elastic Search Precision and Recall Evaluation
"""
client = Elasticsearch(['152.136.231.113:32000'])
search = Search(using=client, index="mydocument")
top_k = 1
df_path = './cmrc_reformatted/cmrc_reformatted.csv'
df = pd.read_csv(df_path, sep='\t', index_col=0).dropna() # drop 2 nan question and 8 nan title
precision_list, recall_list = get_precision_recall(search, df['question'].tolist(),
df['paragraph_context'].tolist()
, top_k)
# print('precision list', precision_list)
# print('recall list', recall_list)
print('average precision:', np.mean(precision_list))
print('average recall: ', np.mean(recall_list))
df['precision'] = precision_list
df['recall'] = recall_list
new_df_path = os.path.splitext(df_path)[0] + '-precision-recall@' + str(top_k) + '.csv'
df.to_csv(new_df_path, sep='\t')
print('file successfully saved to ', new_df_path)
if __name__ == '__main__':
main()
| true |
b9a459531c1022475b77bb8d1ba19aaaa5ed8821 | Python | CarlosGreene/ProgramacionEstructurada | /Unidad 2-Estructuras de Control/Ejercicio19.py | UTF-8 | 1,211 | 4.78125 | 5 | [] | no_license | #Ejercicio 19
#Escribir un programa que lea tres números y determine el mayor de los tres.
#Autor: Pamela Citlali Canul Chacón (Equipo 'about:blank' )
#Dato de entrada: Tres números
#Dato de salida: El número mayor entre los tres.
#Entrada
#Definimos una variable donde se guardará al número mayor
numMayor = 0
#Se definen las variables a utilizar
num1 = 0
num2 = 0
num3 = 0
#Se solicita que el usuario ingrese los números
num1 = float(input())
num2 = float(input())
num3 = float(input())
#Procedimiento
#Primero se analiza si el número 1 es el mayor, comporándolo con los otros números
if num1 > num2 and num1 > num3:
#Si resulta el mayor de los tres se guarda en la variable antes definida
numMayor = num1
#Si el primer número no es el mayor se analiza el segundo número
elif num2 > num1 and num2 > num3:
#Si resulta el mayor de los tres se guarda en la variable antes definida
numMayor = num2
#Si nunguno de los dos primeros números resulta el mayos, entonces, se infiere que el tercer número es el mayor
else:
#Se guarda el valor en la variable antes definida
numMayor = num3
#Salida
#Por último se imprime el resultado
print (numMayor)
| true |
1b92a7f89d23decff84aea90b0cc61d8318cb0ee | Python | raymondmar61/pythonraymondmarbooks | /advancedguidepythonprogrammingreadwritefiles.py | UTF-8 | 3,289 | 3.65625 | 4 | [] | no_license | #Advanced Guide To Python 3 Programming by John Hunt Chapter 18 Reading And Writing Files
#Read file
fileobjectvariable = open("temp.txt", "r")
print("file name: " + fileobjectvariable.name) #print file name: temp.txt
print("file mode file is opened: " + fileobjectvariable.mode) #print file mode file is opened: r
fileobjectvariable.close()
print("file closed method returns a boolean:", fileobjectvariable.closed) #print True
print("file mode file is closed: " + fileobjectvariable.mode) #print file mode file is closed: r
fileobjectvariable = open("temp.txt", "r")
fileallinesprinted = fileobjectvariable.read()
print(fileallinesprinted) #print *all the file contents*
fileobjectvariable.close()
#Note that once you have read some text from a file using read(), readline(), or readlines(), then that line is not read again.
fileobjectvariable = open("temp.txt", "r")
eachlineinfile = fileobjectvariable.readlines()
for x in eachlineinfile:
print(x, end="") #print *all the file contents*
fileobjectvariable.close()
fileobjectvariable = open("temp.txt", "r")
for noreadlinesmethod in fileobjectvariable:
print(noreadlinesmethod, end="") #print *all the file contents*
fileobjectvariable.close()
fileobjectvariable = open("temp.txt", "r")
listcomprehension = [noreadlinesmethod.upper() for noreadlinesmethod in fileobjectvariable]
fileobjectvariable.close()
print(listcomprehension) #print ['\n', '200601BLOG.HTML\n', '167 <P>I WENT TO THE INTERVIEW AND WAS HIRED ON THE SPOT. I WAS GIVEN A CONTRACT AND MY PRIMARY RESPONSIBILITY WAS TO SOLVE PROBLEMS—OR AT LEAST, GET THEIR WORKERS TO SOLVE PROBLEMS.</P>\n', '\n', '200906BLOG.HTML\n', . . . ]
with open("temp.txt", "r") as fileobjectvariable:
eachline = fileobjectvariable.readlines()
for x in eachline:
print(x, end="") #print *all the file contents*
#Write file
newfileobjectvariable = open("mynewfile.txt", "w")
newfileobjectvariable.write("Line 1 Hello from Python\n")
newfileobjectvariable.write("Line 2 Working with files is easy\n")
newfileobjectvariable.write("Line 3 It is cool no need for \\n because it's the last line")
newfileobjectvariable.close()
import fileinput
# with fileinput.input(files=("temp.txt", "mynewfile.txt")) as fileobjectvariable:
# for x in fileobjectvariable:
# process(x) #return NameError: name 'process' is not defined
with fileinput.input(files=("temp.txt", "mynewfile.txt")) as fileobjectvariable:
eachline = fileobjectvariable.readline()
print("filename: " + fileobjectvariable.filename()) #print filename: temp.txt
print("The first line:", fileobjectvariable.isfirstline()) #print The first line: True
print("The first line number:", fileobjectvariable.lineno()) #print The first line number: 1
print("The first file line number:", fileobjectvariable.filelineno()) #print The first file line number: 1
for x in fileobjectvariable:
print(x, end="")
'''
*file contexts from temp.txt and mynewfile.txt*
...
Line 1 Hello from Python
Line 2 Working with files is easy
Line 3 It is cool no need for \n because it's the last line
'''
#rename files
import os
os.rename("mynewfile.txt", "new name for mynewfile.txt")
#delete files
import os
os.remove("new name for mynewfile.txt")
| true |
df0729c1f46bf79d348c9cb07cce88428fa7f243 | Python | rajathhalgi/computer-vision | /smoothing.py | UTF-8 | 532 | 3.046875 | 3 | [] | no_license | import cv2 as cv
import numpy as np
img = cv.imread('photos/cats.jpg')
cv.imshow('Cats', img)
# averaging
average = cv.blur(img, (3,3), )
cv.imshow('Average blur', average)
# gausian blur
gauss = cv.GaussianBlur(img, (3,3), 0)
cv.imshow('GBLUR', gauss)
# median Blur(more effective in removing noise in the image)
median = cv.medianBlur(img, 3)
cv.imshow('Median', median)
#bilateral blur (most effective)
bilateral = cv.bilateralFilter(img, 10, 35, 25)
cv.imshow('bilateral', bilateral)
cv.waitKey(0) | true |
69bc328ab014cc5562e22d2cb4272adbd90f13f5 | Python | zhhehao/Hyixiaohan | /0009/0009.py | UTF-8 | 406 | 2.734375 | 3 | [] | no_license | import logging; logging.basicConfig(level=logging.INFO)
from html.parser import HTMLParser
class _HTMLParser(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
def handle_starttag(self, tag, attrs):
if tag == 'a':
for link in attrs:
if link[0] == 'href':
logging.info('Found a link: %s' % link[1])
parser = _HTMLParser()
with open('index.html') as f:
parser.feed(f.read())
| true |
93b6899064c51ec6c15a0f35c84c8009f50b9b2d | Python | ErikAtKU/TLOZ-TP | /TwilightPrincessWii.py | UTF-8 | 1,494 | 2.625 | 3 | [] | no_license | from PIL import ImageDraw, Image
import glob, os, sys, re
def convert(a):
if(a==" "):
return "space"
if(a=="."):
return "period"
if(a==","):
return "comma"
if(a==";"):
return "semicolon"
if(a==":"):
return "colon"
if(a=="!"):
return "exclamation"
a = re.sub("[^a-zA-Z]",'',a)
if(a==""):
return "null"
return a
def translate(name,text):
path = sys.path[0]+"\TP\\"
im = Image.open(path+"space.bmp")
line = text.split("@")
length = 0
for i in line:
if len(i) > length:
length = len(i)
height = len(line)
length *= 42
height *= 40
diagram = Image.new("RGBA",(length,height),(255,255,255))
longest = 0
for i in range(0,len(line)):
letters = []
pos = 0
for j in range(0,len(line[i])):
temp = convert(line[i][j])
if(temp != "null"):
letters.append(temp)
for j in range(0,len(letters)):
k = len(letters)-j-1
im = Image.open(path+letters[k]+".bmp")
(le,up,ri,bo) = im.getbbox()
diagram.paste(im,(pos,i*40,pos+ri,(i+1)*40))
pos+=ri+1
if(pos > longest):
longest = pos
diagram = diagram.crop((0,0,longest-1,len(line)*40))
diagram.save(path+name+".png")
diagram.show()
translate("lol","if you can read this, then you are@a massive nerd, and i love you.@long live the twilight princess!") | true |
3f35131f779f2e21da4d07c12d76e7becb5f1be5 | Python | andrewjong/Transfer-Learning-Suite | /utils.py | UTF-8 | 3,721 | 2.671875 | 3 | [] | no_license | # Layers
from keras.layers import Dense, Activation, Flatten, Dropout, Add, BatchNormalization
from keras import backend as K
# Other
import keras
from keras import optimizers
from keras import losses
from keras.optimizers import SGD, Adam
from keras.models import Sequential, Model
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.models import load_model
# Utils
import matplotlib.pyplot as plt
import numpy as np
import argparse
import random, glob
import os, sys, csv
import cv2
import time, datetime
class FixedThenFinetune(keras.callbacks.Callback):
"""
Don't use this. Need to compile model
"""
def __init__(self, switch_epoch):
"""
switch_epoch: the epoch to unfreeze all model layers
"""
self.switch_epoch = switch_epoch
self.switched = False
def on_epoch_begin(self, epoch, logs):
if not self.switched and epoch > self.switch_epoch:
print("Switching from fixed to finetune. Setting all model params as trainable.")
set_trainable(self.model, True)
self.model.compile(self.model.optimizer, self.model.loss, metrics=self.model._compile_metrics)
self.switched = True
def save_class_list(OUT_DIR, class_list, model_name, dataset_name):
class_list.sort()
with open(os.path.join(OUT_DIR, model_name + "_" + dataset_name + "_class_list.txt"),'w') as target:
for c in class_list:
target.write(c)
target.write("\n")
def load_class_list(class_list_file):
class_list = []
with open(class_list_file, 'r') as csvfile:
file_reader = csv.reader(csvfile)
for row in file_reader:
class_list.append(row)
class_list.sort()
return class_list
# Get a list of subfolders in the directory
def get_subfolders(directory):
subfolders = os.listdir(directory)
subfolders.sort()
return subfolders
# Get number of files by searching directory recursively
def get_num_files(directory):
if not os.path.exists(directory):
return 0
cnt = 0
for r, dirs, files in os.walk(directory):
for dr in dirs:
cnt += len(glob.glob(os.path.join(r, dr + "/*")))
return cnt
def set_trainable(model, is_trainable):
for layer in model.layers:
layer.trainable = is_trainable
# Add on new FC layers with dropout for fine tuning
def build_finetune_model(base_model, dropout, fc_layers, num_classes, as_fixed_feature_extractor=True, skip_interval=0):
if as_fixed_feature_extractor:
set_trainable(base_model, False)
x = base_model.output
x = Flatten()(x)
for i, fc in enumerate(fc_layers):
x = Dense(fc, activation='relu')(x) # New FC layer, random init
x = Dropout(dropout)(x)
x = BatchNormalization()(x)
if skip_interval and i % skip_interval == 0:
if i > 0:
x = Add()([x, previous])
previous = x
predictions = Dense(num_classes, activation='softmax')(x) # New softmax layer
finetune_model = Model(inputs=base_model.input, outputs=predictions)
return finetune_model
# Plot the training and validation loss + accuracy
def plot_training(history):
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc, 'r.')
plt.plot(epochs, val_acc, 'r')
plt.title('Training and validation accuracy')
# plt.figure()
# plt.plot(epochs, loss, 'r.')
# plt.plot(epochs, val_loss, 'r-')
# plt.title('Training and validation loss')
plt.show()
plt.savefig('acc_vs_epochs.png')
| true |
e7a23393ae84b266e959f11c4a0c98bec2bc37bc | Python | fietensen/raytracer | /utilities/engine.py | UTF-8 | 1,944 | 2.984375 | 3 | [
"MIT"
] | permissive | from utilities import image, ray, vector, color
class RenderEngine:
def __init__(self):
pass
def render(self, scene):
width = scene.width
height = scene.height
aspect_ratio = float(width) / height
x0 = -1.0
x1 = 1.0
xstep = (x1-x0) / (width-1)
y0 = -1.0 / aspect_ratio
y1 = 1.0 / aspect_ratio
ystep = (y1 - y0) / (height-1)
camera = scene.camera
pixels = image.Image(width, height)
lastpercent = .0
for i in range(height):
y = y0+i*ystep
for j in range(width):
x = x0+j*xstep
r = ray.Ray(camera, vector.Vec3(x,y)-camera)
pixels.set_pixel(j,i, self.ray_trace(r, scene))
if (float((i+1)*100) / float(height)) != lastpercent:
lastpercent = float((i+1)*100) / float(height)
print("Rendered %.4f%%" % lastpercent)
return pixels
def ray_trace(self, r, scene):
color_ = color.Color(.0,.0,.0)
dist_hit, obj_hit = self.find_nearest(r, scene)
if obj_hit is None:
return color_
hit_pos = r.origin + r.direction * dist_hit
color_ += self.color_at(obj_hit, hit_pos, scene)
return color_
def find_nearest(self, r, scene, exclude=None):
dist_min = None
obj_hit = None
for obj in scene.objects:
if not obj == exclude:
dist = obj.intersects(r)
if dist is not None and (obj_hit is None or dist < dist_min):
dist_min = dist
obj_hit = obj
return (dist_min, obj_hit)
def color_at(self, obj_hit, hit_pos, scene):
color_ = obj_hit.color
for effect in obj_hit.effects:
color_ = effect(self, color_, hit_pos, obj_hit, scene)
return color_
| true |
08151311360bcfd3c72909cc7e554556cc034eef | Python | tianxing1994/TensorFlow | /神经网络练习/反馈神经网络原理 (Python 实现)/反馈神经网络二 (Python 实现).py | UTF-8 | 5,031 | 3.5625 | 4 | [] | no_license | """
我根据 <OpenCV + TensorFlow 深度学习与计算机视觉实战> 的演示代码, 改写的基于 numpy 的实现.
反馈神经网络原理实现.
输入层, 两个神经元
AX.T + B = Y
X.shape = (1, 3)
A.shape = (6, 3)
B.shape = (6, 1)
Y.shape = (6, 1)
隐藏层, 四个神经元
AX + B = Y
A.shape = (2, 6)
X.shape = (6, 1)
B.shape = (2, 1)
Y.shape = (2, 1)
输出层, 两个神经元(二分类)
矩阵求导: (虽然名为矩阵求导, 但实际只需要注意矩阵运算具体步骤. )
AX = Y
δY / δX = A.T
对 Y 对 X 的导数, 即求, Y 中的对 X 中的每一个值的导数.
示例:
[a1, a2, a3] * [x1, x2, x3].T = y
则:
δy / δx = [δy / δx1, δy / δx2, δy / δx3] = [a1, a2, a3]
δy / δa = [δy / δa1, δy / δa2, δy / δa3] = [x1, x2, x3]
"""
import numpy as np
def sigmoid(x):
result = 1.0 / (1.0 + np.exp(-x))
return result
def sigmoid_derivate(x):
result = sigmoid(x) * (1-sigmoid(x))
return result
class BPNeuralNetwork(object):
def __init__(self):
self.input_array = np.ones(shape=(3, 1), dtype=np.float64)
self.input_weights = np.random.randn(6, 3)
self.input_bias = np.random.randn(6, 1)
self.hidden_array = np.ones(shape=(6, 1), dtype=np.float64)
self.hidden_array_activated = sigmoid(self.hidden_array)
self.hidden_weights = np.random.randn(2, 6)
self.hidden_bias = np.random.randn(2, 1)
self.output_array = np.ones(shape=(2, 1), dtype=np.float64)
self.output_array_activated = sigmoid(self.output_array)
def predict(self, input_array):
"""
:param input_array: 形状为 (1, 3) 的数组.
:return: 输出值的形状为: (2, 1)
"""
self.input_array = input_array.T
self.hidden_array = np.dot(self.input_weights, self.input_array) + self.input_bias
self.hidden_array_activated = sigmoid(self.hidden_array)
self.output_array = np.dot(self.hidden_weights, self.hidden_array_activated) + self.hidden_bias
self.output_array_activated = sigmoid(self.output_array)
return self.output_array_activated
def back_propagate(self, input_array, label, learning_rate):
"""
:param input_array: 形状为 (1, 3) 的数组
:param label: 形状为 (1, 2) 的数组
:param learning_rate:
:return:
"""
result = self.predict(input_array)
label = label.T
# (2, 1) = (2, 1) - (2, 1)
output_array_activated_error = label - result
# 输出层的误差
# (2, 1) = (2, 1) / (2, 1)
output_array_error = output_array_activated_error * sigmoid_derivate(self.output_array)
# (2, 1) / (2, 6) = (2, 6) => sum => (1, 6) => transpose => (6, 1)
hidden_array_activated_error = np.sum(output_array_error * self.hidden_weights, axis=0, keepdims=True).T
# 隐藏层的误差
# (6, 1) / (6, 1) => (6, 1)
hidden_array_error = hidden_array_activated_error * sigmoid_derivate(self.hidden_array)
# 梯度下降算法: 求出误差相对于各参数的导数. learning_rate * f'.
# 这里是根据各个维度上导数的大小为参考来决定在各个维度上的移动距离.
# 在本例中, 我们将误差的大小也乘了进来. 这相当于是在误差较大时, 设置更大的学习率.
# 这里采用 += 运算符, 应注意, 误差的计算是 label - result.
# 更新 self.hidden_weights
# (2, 6) += (2, 1) / (1, 6) => (2, 6)
self.hidden_weights += output_array_error * self.hidden_array.T * learning_rate
self.hidden_bias += output_array_error * learning_rate
# 更新 self.input_weights
# (6, 3) += (6, 1) / (1, 3) => (6, 3)
self.input_weights += hidden_array_error * self.input_array.T * learning_rate
self.input_bias += hidden_array_error * learning_rate
# 计算损失.
cost = np.sum(np.power(output_array_activated_error, 2)) / output_array_activated_error.shape[0]
return cost
def train(self, x_train, y_train, limit=100, learning_rate=0.05):
for i in range(limit):
for j in range(len(x_train)):
input_array = np.reshape(x_train[j], (1, 3))
label = np.reshape(y_train[j], (1, 2))
self.back_propagate(input_array, label, learning_rate)
return
if __name__ == '__main__':
x_train = np.array([[1, 1, 0],
[2, 1, 0],
[0, 1, 1],
[0, 1, 2]], dtype=np.float64)
y_train = np.array([[1, 0],
[1, 0],
[0, 1],
[0, 1]], dtype=np.float64)
nn = BPNeuralNetwork()
nn.train(x_train, y_train, 10000, 0.005)
for i in range(len(x_train)):
input_array = np.reshape(x_train[i], (1, 3))
label = np.reshape(y_train[i], (2, 1))
pred = nn.predict(input_array)
print(pred)
print(label)
| true |
90ffb315d4f65888decd8af42060c7b8622bd667 | Python | Aasthaengg/IBMdataset | /Python_codes/p03673/s455623423.py | UTF-8 | 272 | 2.859375 | 3 | [] | no_license | n = int(input())
a = list(map(int,input().split()))
b = [-1] * n
l = 0
r = n-1
for i in range(n-1, -1, -1):
if i % 2:
b[l] = a[i]
l += 1
else:
b[r] = a[i]
r -= 1
if n % 2:
b.reverse()
b = [str(i) for i in b]
print(' '.join(b)) | true |
69605ffb8d561c8c92b6e777b541fb1a150c332d | Python | maxime915/info8010-deep-learning | /homeworks/homework2/mlp_2.py | UTF-8 | 3,179 | 2.875 | 3 | [
"BSD-3-Clause"
] | permissive | import numpy as np
import torch, torchvision
import torch.nn as nn
from torchvision import datasets
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
from PIL import Image
input_dim = 3*32*32 # fill in with appropriate values
hidden_dim = 300
output_dim = 10
learning_rate = 1e-4
num_epochs = 100
class net(nn.Module):
def __init__(self, input_dim, hidden_dim, output_dim):
super(net, self).__init__()
self.input_dim = input_dim
self.net = nn.Sequential(nn.Linear(input_dim, hidden_dim), nn.ReLU(),
nn.Linear(hidden_dim, hidden_dim), nn.ReLU(),
nn.Linear(hidden_dim, hidden_dim), nn.ReLU(),
nn.Linear(hidden_dim, output_dim))
def forward(self, x):
return self.net(x.view(x.size(0), self.input_dim))
device = 'cuda:0'
model = net(input_dim, hidden_dim, output_dim).to(device)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
transform = transforms.Compose([transforms.Resize((32, 32)),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.25, 0.25, 0.25))])
trainset = datasets.CIFAR10(root = "./data", train=True, download=True, transform=transform)
testset = datasets.CIFAR10(root = "./data", train=False, download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=256, shuffle=True, num_workers=4)
testloader = torch.utils.data.DataLoader(testset, batch_size=256, shuffle=True, num_workers=4)
def train(num_epochs):
epochs_train_loss = []
epochs_test_loss = []
for i in range(num_epochs):
tmp_loss = 0
for (x, y) in trainloader:
outputs = model(x.to(device))
loss = criterion(outputs, y.to(device))
tmp_loss += loss.item()
optimizer.zero_grad()
loss.backward()
optimizer.step()
epochs_train_loss.append(tmp_loss / (len(testloader) * testloader.batch_size))
if i % 10 == 0:
print('evaluating...', end='\r')
with torch.no_grad():
correct = 0
total = 0
tmp_loss = 0
for inputs, targets in testloader:
outputs = model(inputs.to(device))
loss = criterion(outputs, targets.to(device))
tmp_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets.to(device)).sum().item()
epochs_test_loss.append(tmp_loss / (len(trainloader) * trainloader.batch_size))
print(f'accuracy of the model on the testing images: {correct}/{total}={100*correct/total}%')
return epochs_train_loss, epochs_test_loss
epochs_train_loss, epochs_test_loss = train(num_epochs)
plt.plot(np.arange(0, num_epochs, 1), epochs_train_loss, label='training loss')
plt.plot(np.arange(0, num_epochs, 10), epochs_test_loss, label='testing loss')
plt.legend()
plt.show() | true |
07276d730fe747540b34c24d448d1edb4d0b44f8 | Python | ntrrgc/webkit-remote-build | /serial-listener.py | UTF-8 | 2,375 | 3 | 3 | [] | no_license | #!/usr/bin/python3
# Listen on the UNIX domain socket specified as parameter, one connection at
# a time. The data sent from each client is written to stdout.
#
# The program exits if an 'end' packet is received, identified by being a
# connection whose first 4 bytes transferred are 'end\n'.
#
import fcntl
import os
import socket
import sys
import select
sys.stdout = os.fdopen(1, 'wb', 0)
def recv_exact_bytes(socket, size):
buf = b""
while True:
new_buf = socket.recv(size - len(buf))
if len(new_buf) == 0:
# Connection closed, no more data to receive
return buf
buf += new_buf
if len(buf) == size:
# Received all awaited data
return buf
if __name__ == '__main__':
try:
s = socket.socket(socket.AF_UNIX)
s.bind(sys.argv[1])
s.listen(1000)
poll = select.poll()
poll.register(s.fileno())
poll.register(sys.stdout.fileno(), select.POLLERR)
while True:
for (fd, event_type) in poll.poll():
if fd == sys.stdout.fileno() and event_type == select.POLLERR:
# stdout closed, exit
raise SystemExit(0)
elif fd == s.fileno() and event_type == select.POLLIN:
# Connection received
ss, address = s.accept()
possible_end_block = recv_exact_bytes(ss, 4)
if possible_end_block == b"end\n":
# exit due to 'end' packet
raise SystemExit(0)
else:
# Not an 'end' packet, but regular data
sys.stdout.write(possible_end_block)
# Keep receiving and forwarding to stdout until the
# connection is closed.
while True:
block = ss.recv(2048)
if len(block) == 0:
ss.close()
break
sys.stdout.write(block)
else:
raise AssertionError("Unhandled event: fd={} "
"event_type={}".format(fd, event_type),
file=sys.stderr)
except KeyboardInterrupt:
pass
| true |
c7cfc02fc2d9af5ba4839a4f3f63303752939a9c | Python | lbonn041/Search-Engine | /app/bigram_language_model/bigram.py | UTF-8 | 1,250 | 3.078125 | 3 | [] | no_license | from nltk import bigrams
import json
from collections import Counter, defaultdict
from nltk.corpus import stopwords
def remove_stopwords(token_array):
punctuation = {",", ".", "'", "'s", ":", ";",
"(", ")", "..", '’', '®', '&', '-', '--', '/'}
stop_words = set(stopwords.words('english'))
new_token_array = []
for word in token_array:
if not((word in stop_words) or (word in punctuation)):
new_token_array.append(word)
return new_token_array
def bigram(corpus):
model = defaultdict(lambda: defaultdict(lambda: 0))
for document in corpus:
title = (remove_stopwords(document['title'].split()))
for w1, w2 in bigrams(title, pad_right=True, pad_left=True):
model[w1][w2] += 1
description = (remove_stopwords(document['description'].split()))
for w1, w2 in bigrams(description, pad_right=True, pad_left=True):
model[w1][w2] += 1
for word in model:
total_count = float(sum(model[word].values()))
for w2 in model[word]:
model[word][w2] /= total_count
with open('app/corpora/bigram_model.txt', 'w') as outfile:
json.dump(model, outfile, indent=4, separators=(',', ': '))
| true |
bca31c208274a232f0e88a8df890be061884fd36 | Python | sumitasok/CarND-Behavioral-Cloning-P3 | /preprocessing.py | UTF-8 | 6,469 | 2.59375 | 3 | [] | no_license | from PIL import Image
# from cv2 import getPerspectiveTransform, warpPerspective
import cv2
import numpy as np
import time
def AutoCannyGaussianBlurSobelYRGB(image):
# cv2.imwrite('./results/videos/acgbsr-' + str(time.time()) + '.png',image)
# print(str(image.shape()))
rgb_image = cv2.resize(image[60:140,:], (320, 80))
ksize = 15
sobely = abs_sobel_thresh(rgb_image, orient='y', sobel_kernel=ksize, thresh_min=100, thresh_max=200)
blurring_ksize = 3
gaussianBlur = cv2.GaussianBlur(sobely, (blurring_ksize, blurring_ksize), 0)
auto = auto_canny(gaussianBlur)
# https://stackoverflow.com/questions/7372316/how-to-make-a-2d-numpy-array-a-3d-array
# https://docs.scipy.org/doc/numpy/reference/generated/numpy.concatenate.html
auto = np.reshape(auto, auto.shape + (1,))
color_image = np.concatenate((auto, auto, auto), axis = 2)
return color_image
def CropSky(image):
return cv2.resize(image[60:140,:], (320, 80))
def SobelYRGB(image):
# cv2.imwrite('./results/videos/acgbsr-' + str(time.time()) + '.png',image)
# print(str(image.shape()))
rgb_image = cv2.resize(image[60:140,:], (320, 80))
ksize = 15
sobely = abs_sobel_thresh(rgb_image, orient='y', sobel_kernel=ksize, thresh_min=100, thresh_max=200)
# blurring_ksize = 3
# gaussianBlur = cv2.GaussianBlur(sobely, (blurring_ksize, blurring_ksize), 0)
# auto = auto_canny(gaussianBlur)
# https://stackoverflow.com/questions/7372316/how-to-make-a-2d-numpy-array-a-3d-array
# https://docs.scipy.org/doc/numpy/reference/generated/numpy.concatenate.html
auto = np.reshape(sobely, sobely.shape + (1,))
color_image = np.concatenate((auto, auto, auto), axis = 2)
return sobely
def auto_canny(image, sigma=0.33):
# compute the median of the single channel pixel intensities
v = np.median(image)
# apply automatic Canny edge detection using the computed median
lower = int(max(0, (1.0 - sigma) * v))
upper = int(min(255, (1.0 + sigma) * v))
edged = cv2.Canny(image, lower, upper)
# return the edged image
return edged
def image_process(current_path):
image = mpimg.imread(current_path)
cropped = cv2.resize(image[60:140,:], (320, 80))
R = cropped[:,:,0]
G = cropped[:,:,1]
B = cropped[:,:,2]
thresh = (200, 255)
rbinary = np.zeros_like(R)
gbinary = np.zeros_like(G)
rbinary[(R > thresh[0]) & (R <= thresh[1])] = 1
return np.dstack((rbinary, gbinary, gbinary))
# crop the image using the margin format that keras.cropping2D uses.
# makes it simpler to port the cropping configurations.
# https://keras.io/layers/convolutional/#cropping2d
# http://matthiaseisen.com/pp/patterns/p0202/
def crop_like_keras_crop2D(input_filename, output_filename, top_crop, bottom_crop, left_crop, right_crop):
img = Image.open(input_filename)
x_length, y_length = img.size
cropped_image = img.crop((left_crop, top_crop, x_length - right_crop, y_length - bottom_crop))
cropped_image.save(output_filename)
img.close()
return output_filename
# src = np.float32([
# [850, 320],
# [865, 450],
# [533, 350],
# [535, 210]
# ])
# src = np.float32([
# [870, 240],
# [870, 370],
# [520, 370],
# [520, 240]
# ])
def warp(img, src_points, dst_points, img_size=None):
if img_size == None:
img_size = (img.shape[1], img.shape[0])
M = cv2.getPerspectiveTransform(src_points, dst_points)
Minv = cv2.getPerspectiveTransform(dst_points, src_points)
warped = cv2.warpPerspective(img, M, img_size, flags=cv2.INTER_LINEAR)
return warped
def dir_threshold(gray, sobel_kernel=3, thresh=(0, np.pi/2)):
# Grayscale
# gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Calculate the x and y gradients
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
# Take the absolute value of the gradient direction,
# apply a threshold, and create a binary image result
absgraddir = np.arctan2(np.absolute(sobely), np.absolute(sobelx))
binary_output = np.zeros_like(absgraddir)
binary_output[(absgraddir >= thresh[0]) & (absgraddir <= thresh[1])] = 1
# Return the binary image
return binary_output
def mag_thresh(gray, sobel_kernel=3, mag_thresh=(0, 255)):
# Apply the following steps to img
# 1) Convert to grayscale
# gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# 2) Take the gradient in x and y separately
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
# 3) Calculate the magnitude
gradmag = np.sqrt(sobelx**2 + sobely**2)
# 4) Scale to 8-bit (0 - 255) and convert to type = np.uint8
scaled_sobel = np.max(gradmag)/255
gradmag = (gradmag/scaled_sobel).astype(np.uint8)
# 5) Create a binary mask where mag thresholds are met
binary_sobel = np.zeros_like(gradmag)
binary_sobel[(gradmag >= mag_thresh[0]) & (gradmag <= mag_thresh[1])] = 1
# 6) Return this mask as your binary_output image
return binary_sobel
def abs_sobel_thresh(img, orient='x', sobel_kernel=3, thresh_min=0, thresh_max=255):
# Grayscale
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Apply cv2.Sobel()
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
if orient == 'x':
# Take the absolute value of the output from cv2.Sobel()
abs_sobel = np.absolute(sobelx)
else:
abs_sobel = np.absolute(sobely)
# Scale the result to an 8-bit range (0-255)
scaled_sobel = np.uint8(255*abs_sobel/np.max(abs_sobel))
# Apply lower and upper thresholds
thresh_min = 20
thresh_max = 100
# Create binary_output
sxbinary = np.zeros_like(scaled_sobel)
sxbinary[(scaled_sobel >= thresh_min) & (scaled_sobel <= thresh_max)] = 1
return sxbinary
def image_process(img):
a = np.array(img.getdata()).astype(np.float32).reshape( (img.size[0],img.size[1],3) )
cropped = cv2.resize(a[60:140,:], (320, 80))
R = cropped[:,:,0]
G = cropped[:,:,1]
B = cropped[:,:,2]
thresh = (200, 255)
rbinary = np.zeros_like(R)
gbinary = np.zeros_like(G)
rbinary[(R > thresh[0]) & (R <= thresh[1])] = 1
return np.dstack((rbinary, gbinary, gbinary)) | true |
3ff4f03d7389f56331f0587746d9d4d62860ff1e | Python | Lukasranee/Self-Avoiding-Random-Walk | /SelfAvoidingRandomWalk1D.py | UTF-8 | 1,143 | 3.765625 | 4 | [] | no_license | #Self Avoiding Random Walk in 1 dimensional space @Lukas Rane
import random
import numpy as np
import matplotlib.pyplot as plt
import math
def random_walk(n):
#this function computes the random walk in 1 dimension
x = 0
xdistance = [ ]
xdistance.append(x)
for i in range(n):
dx = 0
dx = random.choice([-1,1])
if xdistance[-1] != dx:
x += dx
xdistance.append(x)
return xdistance
kb = 1.38065 * (10**-23) #boltzman constant
entropyvec = [ ] # entropy
x = 0 #starts at the origin
number_of_steps_vec = [ ] #all different number of steps used
number_of_walks = 1000
number_of_steps_vec.append(0)
for walk_lengths in range(number_of_walks):
xdistance = random_walk(walk_lengths)
number_of_steps_vec.append(walk_lengths)
S = kb * math.log(2**(number_of_walks-2))/((number_of_walks -2) **2)
entropyvec.append(S)
averagedistance = (abs(sum(xdistance))/len(xdistance))
print('The average distance is: ',averagedistance)
plt.figure(1)
plt.plot(xdistance)
plt.xlabel('Steps')
plt.ylabel('Distance')
plt.title('1-d random walk')
plt.show() | true |
99d08d18e2341a2efb0b34bf121af2814f457c2d | Python | J216/simple_tag_replace | /tagReplaceGUI.py | UTF-8 | 4,680 | 2.71875 | 3 | [] | no_license | #!/usr/bin/python3
import sys
import os
import re
from tkinter import filedialog
from tkinter import *
from PIL import ImageTk, Image
class Window(Frame):
tags = []
meta = {}
template = ""
replace = []
E1 = []
L1 = []
winx = 500
winy = 400
def __init__(self, master=None):
Frame.__init__(self, master)
self.master = master
self.init_window()
#Creation of init_window
def init_window(self):
# changing the title of our master widget
self.master.title("JSI - JReplace Template")
# allowing the widget to take the full space of the root window
self.pack(fill=BOTH, expand=1)
# creating a menu instance
menu = Menu(self.master)
self.master.config(menu=menu)
# create the file object)
file = Menu(menu)
# adds a command to the menu option, calling it exit, and the
# command it runs on event is client_exit
file.add_command(label="Open", command=self.openFile)
file.add_command(label="Save", command=self.saveFile)
file.add_command(label="Exit", command=self.space_rats, accelerator="Ctrl+Q")
#added "file" to our menu
menu.add_cascade(label="File", underline=0, menu=file)
# create the file object)
edit = Menu(menu)
self.bind_all("<Control-q>", self.space_rats)
# adds a command to the menu option, calling it exit, and the
# command it runs on event is client_exit
edit.add_command(label="Clear", command=self.clearWindow)
#added "file" to our menu
menu.add_cascade(label="View", menu=edit)
self.showImg()
clearButton = Button(self, text="Clear",command=self.clearWindow)
# placing the button on my window
clearButton.place(x=325, y=460)
quitButton = Button(self, text="Quit", command=self.space_rats)
# placing the button on my window
quitButton.place(x=385, y=460)
def drawEntry(self):
tag_count = 0
for tag in self.tags:
self.L1.append( Label(self, text=tag))
self.L1[tag_count].place(x=10, y=tag_count*28)
self.E1.append(Entry(self, bd =5))
self.E1[tag_count].place(x=178, y=tag_count*28)
tag_count += 1
self.resizeWindow()
def showImg(self):
load = Image.open("jsi-logo-256.png")
render = ImageTk.PhotoImage(load)
# labels can be text or images
img = Label(self, image=render)
img.image = render
img.place(x=190, y=200)
def openFile(self):
self.filename = filedialog.askopenfilename(initialdir = "~/",title = "Select file",filetypes = (("ini files","*.ini"),("all files","*.*")))
self.loadTemplate()
self.drawEntry()
def saveFile(self):
self.filename = filedialog.asksaveasfilename(initialdir = "~/",title = "Select file",filetypes = (("ini files","*.ini"),("all files","*.*")))
for t in self.E1:
self.replace.append(t.get())
self.replaceTags()
with open(self.filename, "w") as f:
f.write(self.template)
def clearWindow(self):
for i in range(len(self.E1)):
self.E1[i].destroy()
self.L1[i].destroy()
self.E1 = []
self.L1 = []
self.tags = []
self.meta = {}
self.template = ""
self.replace = []
def loadTemplate(self):
if os.path.isfile(self.filename):
# Read tags and create set
with open(self.filename) as file:
self.template = file.read()
file_tags=re.findall(re.escape('<')+"(.*)"+re.escape('>'),self.template)
for i in file_tags:
if ':' in i:
self.meta[i.split(':')[0]] = i.split(':')[1]
self.template = template.replace('<'+i+'>','')
else:
if not i in self.tags:
self.tags.append(i)
def replaceTags(self):
tag_count = 0
for t in self.tags:
self.template=self.template.replace("<"+t+">", self.replace[tag_count])
tag_count += 1
print(self.tags)
print(self.replace)
print(self.template)
#Quit button function
def space_rats(self, event=""):
sys.exit(0)
def resizeWindow(self):
self.winy=(len(self.tags)+1)*30+100
self.configure(width=self.winy)
root = Tk()
img = PhotoImage(file='jsi-logo-256.png')
root.tk.call('wm', 'iconphoto', root._w, img)
#size of the window
root.geometry("450x500")
app = Window(root)
root.mainloop()
| true |
8b292f9dc0337c98d5a36a1fdcfebc5b6dafdaf8 | Python | ilker07/pythonAltKumeler | /altKumeFonksiyonlar.py | UTF-8 | 1,275 | 2.890625 | 3 | [] | no_license |
import math
en_buyuk_liste=[]
liste2=[]
en_buyuk_toplam=0
def dogruMu(sayi):
d = bin(sayi)
d = d.lstrip("0b")
for index in range(0,len(d)-1):
if(index!=len(d) and len(d)!=1):
if(int(d[index])==1 and int(d[index+1])==1):
return False
return True
def altKumeler(arr, n):
liste = []
boyut = math.pow(2, n)
for sayici in range(1, (int)(boyut)):
for j in range(0, n):
if (sayici & (1 << j) and dogruMu(sayici)):
liste.append(arr[j])
if(len(liste) !=0 and len(liste) !=1):
liste2.append(liste)
liste=[]
def karsilastir(gelen_liste,gelen_toplam):
global en_buyuk_toplam
global en_buyuk_liste
if(gelen_toplam > en_buyuk_toplam):
en_buyuk_toplam = gelen_toplam
en_buyuk_liste=[]
en_buyuk_liste.append(gelen_liste)
elif (gelen_toplam == en_buyuk_toplam):
en_buyuk_liste.append(gelen_liste)
def fonksiyon():
toplam=0
for i in range(0, len(liste2)):
for j in range(0, len(liste2[i])):
toplam += int(liste2[i][j])
print(liste2[i], ":",toplam)
karsilastir(liste2[i],toplam)
toplam = 0
| true |
ace37dcb0072c2ea74feba86d11724ae90a170ad | Python | sharma-abhishek/splitexpense | /test.py | UTF-8 | 3,113 | 3 | 3 | [] | no_license | import unittest
from collections import OrderedDict
from main import parse_input_and_simplify_expenses, get_individual_share
'''
This test case validates the core logic of simplifying common expenses
'''
class TestSplitExpense(unittest.TestCase):
#test should pass as all the inputs are correct
def test_1_should_pass_parse_input_and_simplify_expenses(self):
simplified_debts = parse_input_and_simplify_expenses(self.names, self.correct_expenses_list, \
self.currency)
self.assertDictEqual(simplified_debts, self.expected_simplified_dict
)
'''test will fail with SystemExit as currency value is not passed and hence default is '$'
but expense list has INR. '''
def test_2_should_fail_parse_input_and_simplify_expenses_for_incorrect_currency(self):
with self.assertRaises(SystemExit):
simplified_debts = parse_input_and_simplify_expenses(self.names, self.correct_expenses_list)
# test will fail with SystemExit as expense records has 'E' which is not there in names list
def test_3_should_fail_parse_input_and_simplify_expenses(self):
with self.assertRaises(SystemExit):
simplified_debts = parse_input_and_simplify_expenses(self.names, self.incorrect_expenses_list, \
self.currency)
#test should pass to get individual share for each person
def test_4_should_pass_get_expected_individual_share(self):
share = get_individual_share(self.names, self.user_common_expenses_map)
self.assertDictEqual(share, self.expected_individual_share)
## Variables to hold test data
names = ['A', 'B', 'C', 'D']
# Valid expense list for test case 1
correct_expenses_list = [
'A paid INR 100',
'B paid INR 50',
'C paid INR 30',
'D paid INR 20'
]
# user_common_expenses_map created to calculate individual share
user_common_expenses_map = dict()
user_common_expenses_map['A'] = 100
user_common_expenses_map['B'] = 50
user_common_expenses_map['C'] = 30
user_common_expenses_map['D'] = 20
# expected individual share based on 'user_common_expenses_map'
expected_individual_share = dict()
expected_individual_share['A'] = 50
expected_individual_share['B'] = 0
expected_individual_share['C'] = -20
expected_individual_share['D'] = -30
# This is an example of incorrect expense list as it has additional person data 'E' which is not in names
incorrect_expenses_list = [
'A paid INR 100',
'B paid INR 50',
'C paid INR 30',
'D paid INR 20',
'E paid INR 20'
]
# currency to use for this test
currency = 'INR'
# expected simplified share of each person to be zero after settling down all expenses
expected_simplified_dict = OrderedDict()
expected_simplified_dict['A'] = expected_simplified_dict['B'] = expected_simplified_dict['C'] = expected_simplified_dict['D'] = 0.0
if __name__ == '__main__':
unittest.main()
| true |
74f2927145c56a929fb4f78297caa3e64deec55e | Python | jackiegitari1234/homestudy | /app/api/v2/models/auth_model.py | UTF-8 | 1,468 | 2.6875 | 3 | [] | no_license | from datetime import datetime
import psycopg2
from app.api.v2.utils.database import init_db
class User(object):
def __init__(self, *args):
self.firstname = args[0]
self.lastname = args[1]
self.othername = args[2]
self.username = args[3]
self.email = args[4]
self.phone_number = args[5]
self.password = args[6]
self.db = init_db()
def register_user(self):
new_user = {
'firstname': self.firstname,
'lastname': self.lastname,
'isAdmin': False,
"username": self.username,
"phone_number": self.phone_number,
"othername": self.othername,
'registered': datetime.now(),
"email": self.email,
"password": self.password
}
try:
query = """
INSERT INTO member(public_id, firstname, lastname,
othername, PhoneNumber, isAdmin, registered, username,
email, password)
VALUES (1, %(firstname)s, %(lastname)s, %(othername)s,
%(phone_number)s, %(isAdmin)s,%(registered)s, %(username)s,
%(email)s,%(password)s) ;
"""
cur = self.db.cursor()
cur.execute(query, new_user)
self.db.commit()
return new_user
except (Exception, psycopg2.Error) as error:
print(error)
| true |
a53e8435feed1ccc3985d8cd9b85109ea484b069 | Python | ShahriarXD/Junks | /04_input_function.py | UTF-8 | 118 | 3.78125 | 4 | [] | no_license | a = input("Enter a number: ")
a = int(a) # Convert a to an Integer(if possible)
a += 55
print(type(a))
print (a)
| true |
3d855feae7b1f65092f0c51d682139a35944dfa3 | Python | hakkeroid/python-colorlog | /tests/test_colorlog.py | UTF-8 | 4,515 | 3.046875 | 3 | [
"MIT"
] | permissive | """
Tests for the colorlog library
Some tests are only loaded on Python 2.7 and above.
"""
from __future__ import absolute_import, print_function
from os.path import join, dirname, realpath
from sys import version_info
from unittest import TestCase, TextTestRunner, main
from logging import StreamHandler, DEBUG, getLogger, root
from logging.config import fileConfig
from colorlog import ColoredFormatter
class TestColoredFormatter(TestCase):
LOGFORMAT = (
"%(log_color)s%(levelname)s%(reset)s:"
"%(bold_black)s%(name)s:%(reset)s%(message)s"
)
def setUp(self):
"""Clear the handlers on the root logger before each test"""
root.handlers = list()
root.setLevel(DEBUG)
def example_log_messages(self, logger):
"""Passes if the code does not throw an exception"""
logger.debug('a debug message')
logger.info('an info message')
logger.warning('a warning message')
logger.error('an error message')
logger.critical('a critical message')
def test_colorlog_module(self):
"""Use the default module level logger"""
import colorlog
self.example_log_messages(colorlog)
def test_python(self):
"""Manually build the logger"""
formatter = ColoredFormatter(self.LOGFORMAT)
stream = StreamHandler()
stream.setLevel(DEBUG)
stream.setFormatter(formatter)
logger = getLogger('pythonConfig')
logger.setLevel(DEBUG)
logger.addHandler(stream)
self.example_log_messages(logger)
def test_file(self):
"""Build the logger from a config file"""
filename = join(dirname(realpath(__file__)), "test_config.ini")
with open(filename, 'r') as f:
fileConfig(f.name)
self.example_log_messages(getLogger('fileConfig'))
class TestRainbow(TestCase):
RAINBOW = (
"%(log_color)s%(levelname)s%(reset)s:%(bold_black)s%(name)s:%(reset)s"
"%(bold_red)sr%(red)sa%(yellow)si%(green)sn%(bold_blue)sb"
"%(blue)so%(purple)sw%(reset)s "
"%(fg_bold_red)sr%(fg_red)sa%(fg_yellow)si%(fg_green)sn"
"%(fg_bold_blue)sb%(fg_blue)so%(fg_purple)sw%(reset)s "
"%(bg_red)sr%(bg_bold_red)sa%(bg_yellow)si%(bg_green)sn"
"%(bg_bold_blue)sb%(bg_blue)so%(bg_purple)sw%(reset)s "
)
def test_rainbow(self):
formatter = ColoredFormatter(self.RAINBOW)
stream = StreamHandler()
stream.setLevel(DEBUG)
stream.setFormatter(formatter)
logger = getLogger('rainbow')
logger.setLevel(DEBUG)
logger.addHandler(stream)
logger.critical(None)
if version_info > (2, 7):
from unittest import skipUnless
from logging.config import dictConfig
class TestColoredFormatter(TestColoredFormatter):
@skipUnless(version_info > (2, 7), "requires python 2.7 or above")
def test_dict_config(self):
"""Build the logger from a dictionary"""
dictConfig({
'version': 1,
'formatters': {
'colored': {
'()': 'colorlog.ColoredFormatter',
'format': self.LOGFORMAT,
}
},
'handlers': {
'stream': {
'class': 'logging.StreamHandler',
'formatter': 'colored',
},
},
'loggers': {
'dictConfig': {
'handlers': ['stream'],
'level': 'DEBUG',
},
},
})
self.example_log_messages(getLogger('dictConfig'))
BRACES_LOGFORMAT = (
"{log_color}{levelname}{reset}:"
"{bold_black}{name}:{reset}{message}"
)
@skipUnless(version_info > (3, 2), "requires python 3.2 or above")
def test_py3(self):
"""Manually build the logger using {} style formatting"""
formatter = ColoredFormatter(self.BRACES_LOGFORMAT, style="{")
stream = StreamHandler()
stream.setLevel(DEBUG)
stream.setFormatter(formatter)
logger = getLogger('py3-formatting')
logger.setLevel(DEBUG)
logger.addHandler(stream)
self.example_log_messages(logger)
if __name__ == '__main__':
main(testRunner=TextTestRunner(verbosity=0))
| true |
ceff6478e39abb939490e9d36ed13ce9a96c34dc | Python | rhyoharianja/social_media_crawler | /smc_no_gui/smc_no_gui.py | UTF-8 | 951 | 2.53125 | 3 | [
"MIT"
] | permissive | import argparse
parser = argparse.ArgumentParser(description='Description of what the program does here')
parser.add_argument('-creds_id', type=int, nargs=1, required=True,
help='ID (number) of the set of credentials to use. You can update this list in the file creds.py')
parser.add_argument('-keyword', type=str, nargs=1, required=True,
help='Keyword to crawl')
args = parser.parse_args()
keyword = args.keyword[0].strip()
kw_number = args.creds_id[0]
import os
os.chdir(os.path.dirname(os.path.realpath(__file__)))
print('#---------------------------------------------------------------------------------------------------#')
print('#----------------------------------------- Initializing... -----------------------------------------#')
print('#---------------------------------------------------------------------------------------------------#')
import src as wc
wc.auto_crawler(keyword, kw_number)
| true |
64c1df6e200c173f5613fbe01ca2426b22b9d855 | Python | tmsquill/object-tracking | /yolo_for_tracking.py | UTF-8 | 5,328 | 3 | 3 | [] | no_license | import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
class YOLO():
def __init__(self):
"""
- YOLO takes an image as input. We should set the dimension of the image to a fixed number.
- The default choice is often 416x416.
- YOLO applies thresholding and non maxima suppression, define a value for both
- Load the classes, model configuration (cfg file) and pretrained weights (weights file) into variables
- If the image is 416x416, the weights must be corresponding to that image
- Load the network with OpenCV.dnn function
"""
self.conf_threshold = 0.5
self.nms_threshold = 0.4
self.inp_width = 320
self.inp_height = 320
with open("yolov3/coco.names", "rt") as f:
self.classes = f.read().rstrip('\n').split('\n')
model_configuration = "Yolov3/yolov3.cfg";
model_weights = "Yolov3/yolov3.weights";
self.net = cv2.dnn.readNetFromDarknet(model_configuration, model_weights)
self.net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV)
self.net.setPreferableTarget(cv2.dnn.DNN_TARGET_OPENCL)
def get_outputs_names(self):
"""
Get the names of the output layers.
"""
# Get the names of all the layers in the network.
layers_names = self.net.getLayerNames()
# Get the names of the output layers, i.e. the layers with unconnected outputs.
return [layers_names[i[0] - 1] for i in self.net.getUnconnectedOutLayers()]
def draw_pred(self, frame, class_id, conf, left, top, right, bottom):
"""
Draw a bounding box around a detected object given the box coordinates.
"""
# Draw a bounding box.
cv2.rectangle(frame, (left, top), (right, bottom), (255, 0, 0), thickness=5)
label = '%.2f' % conf
# Get the label for the class name and its confidence.
if self.classes:
assert(class_id < len(self.classes))
label = f"{self.classes[class_id]}:{label}"
# Display the label at the top of the bounding box.
label_size, baseLine = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1)
top = max(top, label_size[1])
cv2.putText(frame, label, (left, top), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255), thickness=3)
return frame
def post_process(self,frame, outs):
"""
Take the output out of the neural network and interpret it; use the output to
apply NMS thresholding and confidence thresholding. Also use the output to
draw the bounding boxes using the draw_pred method.
"""
frame_height = frame.shape[0]
frame_width = frame.shape[1]
class_ids = []
confidences = []
boxes = []
# Scan through all the bounding boxes output from the network and keep only the
# ones with high confidence scores. Assign the box's class label as the class
# with the highest score.
for out in outs:
for detection in out:
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
if confidence > self.conf_threshold:
center_x = int(detection[0] * frame_width)
center_y = int(detection[1] * frame_height)
width = int(detection[2] * frame_width)
height = int(detection[3] * frame_height)
left = int(center_x - width / 2)
top = int(center_y - height / 2)
class_ids.append(class_id)
confidences.append(float(confidence))
boxes.append([left, top, width, height])
# Perform non maximum suppression to eliminate redundant overlapping boxes with
# lower confidences.
indices = cv2.dnn.NMSBoxes(
boxes,
confidences,
self.conf_threshold,
self.nms_threshold
)
for i in indices:
i = i[0]
box = boxes[i]
left = box[0]
top = box[1]
width = box[2]
height = box[3]
output_image = self.draw_pred(
frame, class_ids[i],
confidences[i],
left,
top,
left + width,
top + height
)
return frame, boxes
def inference(self, image):
"""
Takes an image as input, then runs inference to generate a set of
bounding boxes as output.
"""
# Create a 4D blob from a frame.
blob = cv2.dnn.blobFromImage(
image,
1/255,
(self.inp_width, self.inp_height),
[0,0,0],
1,
crop=False
)
# Assign the image as the input to the network.
self.net.setInput(blob)
# Runs the forward pass to get output of the output layers.
outs = self.net.forward(self.get_outputs_names())
# Remove the bounding boxes with low confidence and markup the frame.
final_frame, boxes = self.post_process(image, outs)
return final_frame, boxes
| true |
0e50b341ca0353cf990cda41df547a3dfb08c574 | Python | yetanothersegfault/Python-Bootcamp | /Introduction/NumbersPython.py | UTF-8 | 927 | 4.90625 | 5 | [] | no_license | # Addition is +
addition = 2 + 1
print('2 + 1 = ' + str(addition))
# Subtraction is -
subtraction = 2 - 1
print('2 - 1 = ' + str(subtraction))
# Multiplication is *
multiply = 2 * 2
print('2 * 2 = ' + str(multiply))
# Division is /
division = 3 / 2
print('3 / 2 = ' + str(division))
# Note that a result from division will be a floating point number
# Modulo Operator is %
# Also known as the remainder operator
modulo = 7 % 4
print('The remainder of 7 / 4 is ' + str(modulo))
# Exponents are done with **
exponent = 2 ** 3
print('2 ^ 3 = ' + str(exponent))
# Order of operations exists
order = 2 + 10 * 10 + 3
print('2 + 10 * 10 + 3 = ' + str(order))
# Is different than
order = (2 + 10) * (10 + 3)
print('(2 + 10) * (10 + 3) = ' + str(order))
# We can also use python to tell us what type a variable is by using type()
print(type(order))
print(type(division))
print(type('Hello')) | true |
fd82f49b6ad4e815800ee9a2f0a1c806c13c0ab6 | Python | Sun-Zhen/leetcode | /0601-0650/0605-CanPlaceFlowers/CanPlaceFlowers.py | UTF-8 | 1,075 | 3.625 | 4 | [
"Apache-2.0"
] | permissive | # -*- coding:utf-8 -*-
"""
@author: Alden
@email: sunzhenhy@gmail.com
@date: 2018/4/2
@version: 1.0.0.0
"""
class Solution(object):
def canPlaceFlowers(self, flowerbed, n):
"""
:type flowerbed: List[int]
:type n: int
:rtype: bool
10,扣掉2个字符,判断后续字符
01,扣掉3个字符,判断后续字符
00,n-1,扣掉两个字符,判断后续字符
"""
# if len(flowerbed) < n:
# return False
# elif n == 0:
# return True
# elif len(flowerbed) == 0:
# return False
# elif len(flowerbed) == 1 and
# for i in range(len(flowerbed)):
# pass
if __name__ == "__main__":
s = Solution()
print s.canPlaceFlowers([1, 0, 0, 0, 1], 1)
print s.canPlaceFlowers([1, 0, 0, 0, 1], 2)
print s.canPlaceFlowers([0, 1, 0, 0, 0, 1], 2)
print s.canPlaceFlowers([0, 0, 1, 0, 0, 0, 1], 2)
print s.canPlaceFlowers([0, 0, 0, 1, 0, 0, 0, 1], 3)
print s.canPlaceFlowers([0, 0, 0, 0, 1, 0, 0, 0, 1], 3)
| true |
b10556f684d42eacdd2a6fd5d24ecfc85fa1345c | Python | HennyJie/CS572-InformationRetrieval | /divide_semi_dataset.py | UTF-8 | 706 | 2.765625 | 3 | [] | no_license | import pandas as pd
for dataset in ['MQ2007semi', 'MQ2008semi']:
for i in range(1, 6):
folder_path = f'/home/xuankan/Documents/CS572-InformationRetrieval/{dataset}/Fold{i}'
data = pd.read_csv(
folder_path + '/train.txt', header=None, sep='\s+')
filter_bool = data.iloc[:, 0] == -1
labeled = data[~filter_bool]
labeled.to_csv(folder_path + '/train_labeled.txt',
sep=' ', header=False, index=False)
print(labeled.describe())
unlabel = data[filter_bool]
print(unlabel.describe())
unlabel.to_csv(folder_path + '/train_unlabel.txt',
sep=' ', header=False, index=False)
| true |
e99b70e32c1fa2fbe5b65072b412ac132ef2daef | Python | akaptur/yaca | /yaca.py | UTF-8 | 936 | 2.96875 | 3 | [] | no_license | from flask import Flask, render_template
from flask_sockets import Sockets
import time
app = Flask(__name__)
app.debug = True
sockets = Sockets(app)
app.messages = []
@sockets.route('/echo')
def echo(sock):
while True:
msg = sock.receive()
print msg
sock.send(msg[::-1])
@sockets.route('/chat')
def chat(sock):
while True:
msg = sock.receive()
time.sleep(10)
app.messages.append(msg)
sock.send("".join([msg + "<br>" for msg in app.messages]))
@app.route('/')
def home():
""" Hitting / invokes the socket method in /echo,
because that's where the web socket created in echo.html
is pointing."""
return render_template('echo.html')
@app.route('/chatter')
def chatter():
return render_template('chat.html', messages = app.messages)
if __name__ == '__main__':
print "You must run the app with gevent for socket support"
app.run(debug = True)
| true |
9b147f2625dfdbee1b720d46cf9c766c99d1b53e | Python | Jie-Knots/Jie | /jie/controller.py | UTF-8 | 1,940 | 2.703125 | 3 | [
"BSD-3-Clause"
] | permissive | import types
from functools import wraps
from inspect import isawaitable
from sanic.views import HTTPMethodView
from sanic.request import Request
class ViewRoute:
"""Decorat a HTTPMethodView instance to be registered as a route.
"""
def __init__(self, app, url, *args, **kwargs):
"""
:param app: sanic app or blueprint
:param url: path of the URL
"""
self.app = app
self.url = url
def __call__(self, instance):
"""Add instance's view to app.
:param instance: a instance of HTTPMethodView
:return instance
"""
def decorate(instance):
if not getattr(self, '__name__', None):
wraps(instance)(self)
self.app.add_route(instance.as_view(), self.url)
decorate(instance)
return instance
def __get__(self, instance, cls):
if instance is None:
return self
else:
return types.MethodType(self, instance)
def db_transaction(func):
"""A decorator to add db connection and start transaction
"""
@wraps(func)
async def wrapper(*args, **kwargs):
request = args[0] if isinstance(args[0], Request) else args[1]
pool = request.app.env.db_pool
db_connection = await pool.acquire()
tr = db_connection.transaction()
await tr.start()
try:
result = func(*args, **dict(kwargs, db_connection=db_connection))
if isawaitable(result):
result = await result
except:
await tr.rollback()
raise
else:
await tr.commit()
finally:
await db_connection.close()
await pool.release(db_connection)
return result
return wrapper
class DBTransactionView(HTTPMethodView):
"""Add database transaction to decorators
"""
decorators = [db_transaction]
| true |
90449561f4d7f052e2d94e6468277692be050eb5 | Python | tangyi1989/swift-op | /bench/utils.py | UTF-8 | 1,614 | 2.5625 | 3 | [] | no_license | #!/usr/bin/env python
#*_* coding=utf8 *_*
import random
import eventlet
import eventlet.pools
from time import time
from cStringIO import StringIO
import swiftclient as client
# 请修改此变量
DATADIR = 'objects'
DEVICE_PATH = '/srv/node/'
PROXY_IP = '127.0.0.1'
ACCOUNT = 'test'
USER = 'testadmin'
KEY = 'testing'
def gen_text(length=1024):
""" Generate random string of given length """
plain_text = "QWERTYUIOPASDFGHJKLZXCVBNMqwertyuiopasdfghjklzxcvbnm1234567890"
text_length = len(plain_text)
buf = StringIO()
while length > 0:
c = plain_text[random.randint(0, text_length - 1)]
buf.write(c)
length -= 1
buf.seek(0)
return buf.read()
def get_auth_token():
"""
Get Authenticate token and Storage URL
Returns:
(token, storage_url)
"""
auth_url = "http://%s:8080/auth/v1.0" % PROXY_IP
url, token = client.get_auth(auth_url,
':'.join((ACCOUNT, USER)),
KEY)
return (token, url)
def timing_stats(func):
""" Stats function call's time cost """
def wrapped(*args, **kwargs):
start_time = time()
func(*args, **kwargs)
end_time = time()
print 'Cost seconds' % (end_time - start_time)
print 'Function : %s, args : %s, kwargs : %s' % (func, args, kwargs)
return wrapped
class ConnectionPool(eventlet.pools.Pool):
def __init__(self, url, size):
self.url = url
eventlet.pools.Pool.__init__(self, size, size)
def create(self):
return client.http_connection(self.url)
| true |
7c06fc4843eff6d8569bc900ce57e90ea09e03f4 | Python | yeonjudkwl/Algorithm | /swea/Dijkstra_최소이동거리.py | UTF-8 | 859 | 3.03125 | 3 | [] | no_license | import sys
sys.stdin = open("Dijkstra_최소이동거리.txt")
for tc in range(int(input())):
V, E = map(int, input().split())
adj = {i: [] for i in range(V+1)}
for i in range(E):
s,e,c = map(int, input().split())
adj[s].append([e,c])
INF = float('inf')
dist = [INF] * (V+1)
selected = [False] * (V+1)
dist[0] = 0
cnt = 0
while cnt < (V+1):
#dist의 값이 최소인 정점
min = INF
u = -1
for i in range((V+1)):
if not selected[i] and dist[i] < min:
min = dist[i]
u = i
# 결정
selected[u] = True
cnt += 1
# 간선완화
for w, cost in adj[u]:
if dist[w] > dist[u] + cost:
dist[w] = dist[u] + cost
print("#{} {}".format(tc+1, dist[-1])) | true |
09b7538bcc9f752b8c2060f2d817096826e6f72b | Python | rg3915/py-net-coders | /exemplos/function_args01.py | UTF-8 | 90 | 2.671875 | 3 | [] | no_license | def func(a, b, c):
print(a, b, c)
if __name__ == '__main__':
func(a=1, c=2, b=3)
| true |
054fea92a2e2fb1f4f6887e9f70ac7a1f61daebe | Python | kumopro/pro-tech | /lesson11/sample1.py | UTF-8 | 2,326 | 3.109375 | 3 | [] | no_license | import requests
import json
import wiringpi
import time
from watson_developer_cloud import TextToSpeechV1
import pygame.mixer
def get_forecast():
url = 'http://weather.livedoor.com/forecast/webservice/json/v1?city=130010'
data = requests.get(url).json()
location = data['location']['city']
forecasts = data['forecasts']
tomorrow_forecast = forecasts[1]
tomorrow_weather = tomorrow_forecast['telop']
forecast = '明日の{0}の天気は{1}です'.format(location, tomorrow_weather)
return forecast
def text2speech(text, filename):
api_key = ''
url = 'https://stream.watsonplatform.net/text-to-speech/api'
text_to_speech = TextToSpeechV1(iam_apikey=api_key, url=url)
r = text_to_speech.synthesize(text, 'audio/mp3', 'ja-JP_EmiVoice').get_result().content
with open(filename, 'wb') as audio_file:
audio_file.write(r)
def play(filename):
pygame.mixer.init()
pygame.mixer.music.load(filename)
pygame.mixer.music.play()
time.sleep(5)
pygame.mixer.music.stop()
pygame.mixer.quit()
def getDistance(trig_pin, echo_pin):
### trigger
wiringpi.digitalWrite(trig_pin, wiringpi.HIGH)
time.sleep(0.00001) # [sec]
wiringpi.digitalWrite(trig_pin, wiringpi.LOW)
### response time
while wiringpi.digitalRead(echo_pin) == 0:
time_begin = time.time()
while wiringpi.digitalRead(echo_pin) == 1:
time_end = time.time()
t = time_end - time_begin
### calculate distance
distance = t * 17000
return distance
def save_forecast_audio(forecast_filename):
forecast = get_forecast() # forecastは英語で「予報」という意味
print(forecast)
text2speech(forecast, forecast_filename)
def main():
trig_pin = 17
echo_pin = 27
wiringpi.wiringPiSetupGpio()
wiringpi.pinMode(trig_pin, wiringpi.OUTPUT)
wiringpi.pinMode(echo_pin, wiringpi.INPUT)
wiringpi.digitalWrite(trig_pin, wiringpi.LOW)
forecast_filename = 'forecast.mp3'
while True:
distance = getDistance(trig_pin, echo_pin)
print(distance)
time.sleep(0.5) # [sec]
if distance < 15:
save_forecast_audio(forecast_filename)
time.sleep(1)
play(forecast_filename)
if distance < 8:
break
main()
| true |
f32e4939789f98726ca04a082fbf112c33973b1b | Python | drakenclimber/hookster | /checks/CheckCopyright.py | UTF-8 | 4,580 | 2.71875 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env python
#****************************************************************************
# ©
# Copyright 2014-2015 Tom Hromatka
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# See this page for more info on abstract python classes:
# https://julien.danjou.info/blog/2013/guide-python-static-class-abstract-methods
#
#****************************************************************************
#****************************************************************************
# Imports
#****************************************************************************
# python standard imports
# project-specific imports
from abstractCheck import AbstractCheck
from hooksterExceptions import *
from logManager import *
#****************************************************************************
# Constants
#****************************************************************************
KEY_FIRST_LINE = "first_line"
KEY_LAST_LINE = "last_line"
KEY_COPYRIGHT_OWNER = "copyright_owner"
CHECK_TO_END_OF_FILE = -1
NOT_FOUND = -1
#****************************************************************************
# Functions
#****************************************************************************
#****************************************************************************
# Classes
#****************************************************************************
class CheckCopyright(AbstractCheck):
def __init__(self, config, check_name, check_config_dict):
"""
Initialize this check
"""
super(type(self), self).__init__(config, check_name, check_config_dict)
def check_file(self, file_obj):
"""
Run this check against the file_obj parameter
This method should raise a CheckException if the check fails
"""
log("Running " + self.check_name + " on " + file_obj.filename)
current_year = time.strftime("%Y", time.localtime())
hint = None
if file_obj.contents is None:
# this file is empty. (It's likely being deleted). we don't need to check
# for copyright info
return
found_copyright = False
for line_number, line in enumerate(file_obj.contents.splitlines()):
if int(self.check_config_dict[KEY_LAST_LINE]) != CHECK_TO_END_OF_FILE and \
line_number > int(self.check_config_dict[KEY_LAST_LINE]):
# we have exceeded the "last_line". exit the for loop
break
if line_number >= int(self.check_config_dict[KEY_FIRST_LINE]):
if line.find("copyright") != NOT_FOUND or line.find("Copyright") != NOT_FOUND:
# this is likely the copyright line. look for the current year and predicate
if not line.find(current_year) != NOT_FOUND:
hint = "Line %d contains 'copyright' but does not contain the current year" % (line_number + 1)
# go on to the next line in the file. this was close but no cigar
continue
if not line.find(self.check_config_dict[KEY_COPYRIGHT_OWNER]) != NOT_FOUND:
hint = "Line %d contains 'copyright' but does not contain the correct copyright information\n" \
"Expected to find the copyright owner: %s" % \
(line_number + 1, self.check_config_dict[KEY_COPYRIGHT_OWNER])
# go on to the next line in the file. this was close but no cigar
continue
# all of the copyright checks passed. we have found the copyright string
found_copyright = True
break
if not found_copyright:
# we failed to find a valid copyright string, fail this check
exception_string = "Failed to find the copyright line in %s" % file_obj.filename
if hint is not None:
exception_string += "\n%s" % hint
raise CheckException(exception_string)
| true |
ebbded9c9c3266393af4272646534fb60d4a187b | Python | charry07/MisionTic2022 | /Mi Cliclo 1 - py/ColasPormii.py | UTF-8 | 195 | 2.5625 | 3 | [] | no_license | from claseCola import Cola
import random
cola = Cola()
for i in range(4):
cola.encolar(random.randint(0,10))
cola.imprimirCola()
cola.encolar(6)
cola.imprimirCola()
d = cola.desencolar()
| true |
4220409b7f8b8b38a92abe5ffb5c48c056dcc332 | Python | Yurikimkrk/Alg | /les2/2.1.py | UTF-8 | 1,919 | 4.15625 | 4 | [] | no_license | # # https://drive.google.com/file/d/1xm-VHEQfWt9csqacq6HPm3QLOxXXuiG3/view?usp=sharing
# 1. Написать программу, которая будет складывать, вычитать, умножать или делить
# два числа. Числа и знак операции вводятся пользователем. После выполнения вычисления
# программа не завершается, а запрашивает новые данные для вычислений.
# Завершение программы должно выполняться при вводе символа '0' в качестве
# знака операции. Если пользователь вводит неверный знак (не '0', '+', '-', '*', '/'),
# программа должна сообщать об ошибке и снова запрашивать знак операции.
# Также она должна сообщать пользователю о невозможности деления на ноль,
# если он ввел его в качестве делителя.
sign = '+'
while sign != '0':
num1 = float(input("input any number (1): "))
num2 = float(input("input any number (2): "))
sign = input("input the operation sign (0 - end of the program): ")
if sign == '+':
answer = num1 + num2
print(f'{num1} + {num2} = {answer}')
elif sign == '-':
answer = num1 - num2
print(f'{num1} - {num2} = {answer}')
elif sign == '*':
answer = num1 * num2
print(f'{num1} * {num2} = {answer}')
elif sign == '/':
if num2 == 0:
print("you can't divide by zero")
else:
answer = num1 / num2
print(f'{num1} / {num2} = {answer}')
else:
if sign != "0":
print("wrong sign")
else:
print("end of program") | true |
1cec80e460bc004507ed676557ce839c35c2add8 | Python | gummoe/nava-quality-measure-reader | /tests/test_record.py | UTF-8 | 1,352 | 3.15625 | 3 | [] | no_license | import unittest
from domain.record import Record
from domain.record_item import RecordItem
from domain.schema_field import SchemaField
class RecordTest(unittest.TestCase):
def test_init(self):
record = Record()
self.assertEqual([], record.record_items)
def test_add_record_item(self):
schema_field = SchemaField('', 0, 'TEXT')
record_item = RecordItem(1, schema_field)
record = Record()
record.add_record_item(record_item)
record.add_record_item(record_item)
self.assertEqual(2, len(record.record_items))
def test_to_dict(self):
record = Record()
schema_field_text = SchemaField('field1', 10, 'TEXT')
record_item_text = RecordItem('hello', schema_field_text)
record.add_record_item(record_item_text)
schema_field_int = SchemaField('field2', 11, 'INTEGER')
record_item_int = RecordItem(18, schema_field_int)
record.add_record_item(record_item_int)
schema_field_bool = SchemaField('field3', 1, 'BOOLEAN')
record_item_bool = RecordItem(0, schema_field_bool)
record.add_record_item(record_item_bool)
expected_output = {
'field1': 'hello',
'field2': 18,
'field3': False
}
self.assertEqual(expected_output, record.to_dict())
| true |
b01593dc4c523f224ba9b32a09612ef3a766314b | Python | ravichalla/wallbreaker | /week2/candies.py | UTF-8 | 573 | 3.28125 | 3 | [
"MIT"
] | permissive | class Solution(object):
def distributeCandies(self, candies):
total_candy = len(candies)
candy_set = set(candies)
candy_list = list(candy_set)
if len(candy_set) >= total_candy / 2:
return total_candy / 2
else:
# return len(candy_list+ candies[:(total_candy/2)-len(candy_list)])
return len(candy_list)
'''
Ideas/thoughts:
Create a set and the sister will get half the len of candy list, if there are completely different varieties
If not , then sister will get len of different candies.
''' | true |