blob_id stringlengths 40 40 | repo_name stringlengths 5 119 | path stringlengths 2 424 | length_bytes int64 36 888k | score float64 3.5 5.22 | int_score int64 4 5 | text stringlengths 27 888k |
|---|---|---|---|---|---|---|
1426dfb22dc5166aa8ba57adee8b94ed586a03ed | Sat0shi/Cp1404_Pracs | /Prac 5/colour_names.py | 649 | 4.40625 | 4 | colours = {'rebeccapurple': '#663399', 'darkorange': '#ff8c00', 'cyan2': '#00eeee',
'cornflowerblue': '#6495ed', 'darkseagreen': '#8fbc8f', 'deeppink1': '#ff1493',
'darkslateblue': '#483d8b', 'dodgerblue1': '#1e90ff', 'goldenrod': '#daa520',
'lightslateblue': '#8470ff'}
colour_name = input('Please enter the name of a colour: ').lower()
while colour_name != '':
if colour_name in colours:
print(colours[colour_name])
else:
print('Please choose a valid colour from this list: ')
for key in colours:
print(key)
colour_name = input('Please enter the name of a colour: ') |
47097ab6612e30df6d93ebf52fa0caa14d2d7110 | brittany-morris/Bootcamp-Python-and-Bash-Scripts | /python fullstackVM/lines_lines_lines.py | 259 | 3.65625 | 4 | #!/usr/bin/env python3
import sys
def main():
input_file = sys.argv[1]
output = ''
with open(input_file, 'r') as f:
for line in f:
text = line.strip()
if len(text) > 0:
output += text + ' '
print(output)
if __name__ == '__main__':
main()
|
c4b40b5611a14f302ef0fe9993526cbdecaf2f90 | AMGitsKriss/GWI | /src/preprocess.py | 1,824 | 3.609375 | 4 | import pandas as pd
import numpy as np
# Loading the file. If not csv, assumes hdf
def load_data(filename):
if filename.endswith('.csv'):
return pd.read_csv(filename, index_col=0)
else:
return pd.read_hdf(filename, key='df')
# Load the specified file as a series (squeeze), then name it q3 so it's easy to merge.
def load_series(filename):
return pd.read_csv(filename, index_col=0, squeeze=True, names=["q3"])
# Return a version of the dataset with only the first occurence of each row
def drop_duplicates(dataset):
return dataset.groupby(dataset.index).nth(0)
# Save the dataframe. duh.
def save_dataframe(dataset, filename):
try:
dataset.to_csv(filename)
except IOError:
print("<Error writing file> Is it already open?")
# Merge new data into the existing dataframe by appending columns, not rows.
def merge_new(dataset, new_column):
return pd.concat([dataset, new_column], axis=1)
def main():
# Borrowing this from the code you sent me
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('dfname', help='Path to a dataframe CSV or HDF file')
parser.add_argument('seriesname', help='Path to a series CSV file')
args = parser.parse_args()
#Load main table, drop the duplicates, then drop the NaNs
df = load_data(args.dfname)
df = drop_duplicates(df)
df = df.dropna(subset=['q2', 'q4'])
#Load the q3 series and add it to the main dataframe
q3 = load_series(args.seriesname)
df = merge_new(df, q3)
# Validate that the data structures were merged completely before saving
if(df.q3.count() == q3.count()):
save_dataframe(df, "complete.csv")
print("Done!")
else:
print("The q3 values weren't copied correctly.")
if __name__ == '__main__':
main()
|
70908d4b96b50f04a2e0df5a6ed4e1158b6e8936 | vpetrigo/courses | /programming/adaptive-python/group_2_1/check_graph_matrix.py | 523 | 3.8125 | 4 | #!/usr/bin/env python3
# coding=utf-8
import sys
def naive_adj_matrix_check(matrix):
n = len(matrix)
for i in range(n):
for j in range(n):
if matrix[i][j] != matrix[j][i]:
return False
else:
return True
def main():
reader = (tuple(map(int, line.split())) for line in sys.stdin)
n, *e = next(reader)
matrix = tuple(next(reader) for _ in range(n))
print("YES" if naive_adj_matrix_check(matrix) else "NO")
if __name__ == "__main__":
main()
|
d27869c33e6ba5a175e74d01c120815a914317cb | Training-and-Learning-Skills/alien_invasion | /ship.py | 1,543 | 3.8125 | 4 | import pygame
class Ship():
def __init__(self, screen, ai_settings):
"""Initializes the spaceship and set your starting position."""
self.screen = screen
self.ai_settings = ai_settings
#load the image of the spaceship and get you rect
self.image = pygame.image.load('images/ship.bmp')
#self.image = pygame.image.load('images/mordomo.bmp')
self.rect = self.image.get_rect()
self.screen_rect = self.screen.get_rect()
#Starts each new spaceship at the bottom center of the screen.
self.rect.centerx = self.screen_rect.centerx
self.rect.bottom = self.screen_rect.bottom
#stores a value decimal to the center of the spaceship
self.center = float(self.rect.centerx)
#Flag of movement
self.moving_right = False
self.moving_left = False
def update(self):
"""Upgrades position of the spaceship according to
Flag of movement."""
#upgrades the value of the center of the ship, not the rectangle
if self.moving_right and self.rect.right < self.screen_rect.right:
self.center += self.ai_settings.ship_speed_factor
if self.moving_left and self.rect.left > 0:
self.center -= self.ai_settings.ship_speed_factor
#Upgrades object according with self.center
self.rect.centerx = self.center
def blitme(self):
"""Draw the spaceship in your current position."""
self.screen.blit(self.image, self.rect) |
8c9ddb9b05a08989c45b160d2a2e325ce750c9e7 | PeterPanonGit/ir_group_project | /preprocessing/_test.py | 843 | 3.53125 | 4 | import calendar
#set firstweekday=0
cal= calendar.Calendar(firstweekday=0)
# for x in cal.iterweekdays():
# print(x)
data_week = []
#for year in range(2004,2009):
# for month in range(1,12):
# year = 2004
# month = 1
# for week in cal.monthdayscalendar(2004, 1):
# for (weekday,monthday) in enumerate(week):
# if monthday == 0:
# continue
#print cal.iterweekdays(2004)
from dateutil import rrule
from datetime import datetime, timedelta
now = datetime.now()
hundredDaysLater = now + timedelta(days=100)
start = datetime(2004,1,5)
end = datetime(2004,3,5)
for dt in rrule.rrule(rrule.WEEKLY, dtstart=start, until=end):
print dt, dt.weekday()
for dt2 in rrule.rrule(rrule.DAILY, dtstart=dt, until=dt+timedelta(days=6)):
print " ", dt2, dt2.weekday()
#print calendar.weekday(2004, 1, 5) |
5c999338ad00f46b88246da44db9aeab4a327df2 | AyoKun/CodeKata | /Great.py | 227 | 3.984375 | 4 | n1 = int(input("Enter the first number : "))
n2 = int(input("Enter the second number : "))
n3 = int(input("Enter the third number : "))
grt = n1
if(n2>grt):
grt = n2
if(n3>grt):
grt = n3
print("The Greatest number : ",grt)
|
43c1a57c017425e7039918dd253272253712841b | ColinFendrick/cleaning-data-in-python | /exploring-data/loading-viewing-data.py | 334 | 3.5 | 4 | import pandas as pd
# Read the file into a DataFrame: df
df = pd.read_csv('../_datasets/dob_job_application_filings_subset.csv')
# Print the head of df
print(df.head())
# Print the tail of df
print(df.tail())
# Print the shape of df
print(df.shape)
# Print the columns of df
print(df.columns)
print(df.info)
print(df.describe())
|
4de15391ad0a0c7d9cdb4f8906d748fee9efc8a0 | anubhav-shukla/Learnpyhton | /dunc_as_arg.py | 616 | 4.25 | 4 | # sorry not dunc
# it is function
# def square(a):
# return a**2
# map
l=[1,2,3,4]
sq=map(lambda a:a**2,l)
print(list(sq)) #you get the output
# today make a function that take as a input a function\\
def my_map(func,l):
new_list=[]
for item in l:
new_list.append(func(item))
return new_list
print(my_map(lambda a:a**3,l))#Hope now you able to do it...ArithmeticError
# /now you make a func like map
# Hope it help you
# cAN we do it with list comprehension
def map_fun2(func,l):
return [func(item) for item in l]
print(map_fun2(lambda a:a**3,l))
|
a9b7014aaeefabb5c62012d7f8e21637592abfdd | TetianaSob/Python-Projects | /access_value_list.py | 877 | 4 | 4 | # Indexes
friends = ["Ashley", "Matt", "Michael"]
print(friends[0]) # Ashley
print(friends[1]) # Matt
print(friends[2]) # Michael
print("\n")
colors = ["purple", "teal", "orange"]
print(colors[0]) # purple
print(colors[1]) # teal
print(colors[2]) # orange
print("\n")
the_best = colors[0]
print(the_best) # purple
print("\n")
friends = ["Ashley", "Matt", "Michael"]
print(friends[-1]) # Michael
print("\n")
colors = ["purple", "teal", "orange"]
print(colors[-1]) # orange
print("\n")
# is in the list
friends = ["Ashley", "Matt", "Michael"]
print("Ashley" in friends) # True
print("Colt" in friends) # False
print("\n")
###
colors = ["purple", "teal", "orange"]
print("purple" in colors) # True
print("Purple" in colors) # False
if "teal" in colors:
print("YOU HAVE A GOOD TASTE IN COLORS!") # YOU HAVE A GOOD TASTE IN COLORS! |
4c86b589c25f013ae5f1f06bb2d7a75e079322e1 | gwavre/ML_project1 | /scripts/preprocessing.py | 18,365 | 3.875 | 4 | import numpy as np
import matplotlib.pyplot as plt
"""Implements functions that can modify data in any way.
In particular, "utility functions" will help with sampling, shuffling, splitting
without actually pre-processing the data.
"Pre-processing" functions will be able to modify or add features, filter,
remove corrupted data such as entries with -999 values, etc.
Main author (90%+) : Richie Yat-tsai Wan (258934)
"""
#=========================================================================#
#======== Utility functions ========#
#=========================================================================#
def batch_iter(y, tx, batch_size=1, num_batches=1, shuffle=True):
"""
Generate a minibatch iterator for a dataset.
Takes as input two iterables (here the output desired values 'y' and the input data 'tx')
Outputs an iterator which gives mini-batches of `batch_size` matching elements from `y` and `tx`.
Data can be randomly shuffled to avoid ordering in the original data messing with the randomness of the minibatches.
Example of use :
for minibatch_y, minibatch_tx in batch_iter(y, tx, 32):
<DO-SOMETHING>
"""
data_size = len(y)
if shuffle:
shuffle_indices = np.random.permutation(np.arange(data_size))
shuffled_y = y[shuffle_indices]
shuffled_tx = tx[shuffle_indices]
else:
shuffled_y = y
shuffled_tx = tx
for batch_num in range(num_batches):
start_index = batch_num * batch_size
end_index = min((batch_num + 1) * batch_size, data_size)
if start_index != end_index:
yield shuffled_y[start_index:end_index], shuffled_tx[start_index:end_index]
def split_data(x, y, ratio, myseed=None):
"""split the dataset based on the split ratio.
"""
# set seed, None by default
np.random.seed(myseed)
# generate random indices
num_row = len(y)
indices = np.random.permutation(num_row)
index_split = int(np.floor(ratio * num_row))
index_tr = indices[: index_split]
index_te = indices[index_split:]
# create split
x_tr = x[index_tr]
x_te = x[index_te]
y_tr = y[index_tr]
y_te = y[index_te]
return x_tr, x_te, y_tr, y_te
def sample_data(y, x, size_samples):
"""sample from dataset."""
num_observations = y.shape[0]
random_permuted_indices = np.random.permutation(num_observations)
y = y[random_permuted_indices]
x = x[random_permuted_indices]
return y[:size_samples], x[:size_samples]
#=========================================================================#
#======== Pre-processing functions ========#
#=========================================================================#
def standardize(x):
"""Standardize the data-set to have 0 mean and unit variance"""
mean_x = np.mean(x)
x = x - mean_x
std_x = np.std(x)
x = x / std_x
return x
def add_bias(tx):
"""Adds a bias at the beginning of an dataset.
Input : tx, np.array of dim N x D
Output : tx_biased, np.array of dim N x (D+1)
"""
return np.c_[np.ones((tx.shape[0], 1)),tx]
def build_poly(x, degree):
"""polynomial basis functions for input data x, for j=0 up to j=degree."""
#adding bias
poly = np.ones((len(x), 1))
for deg in range(1, degree+1):
poly = np.c_[poly, np.power(x, deg)]
return poly
def convert_label(y):
"""converts the labels into 0 or 1 for log reg"""
#copy to prevent unwanted inplace value assignment
bin_y = y.copy()
#using fancy numpy indexing
bin_y[bin_y==-1]=0
return bin_y
def replace_999_nan(tx):
"""Replaces all -999 values by NaN, allows easier processing below"""
#copy to prevent unwanted inplace value assignment
tx_out = tx.copy()
tx_out[tx_out==-999]= np.nan
return tx_out
def replace_999_mean(tx):
"""Replaces all -999 values by the *mean* of their column.
First replaces all abherrant values by NaN, then compute the *mean*,
ignoring those values, then replacing NaNs by the computed *mean*.
"""
tx_out = replace_999_nan(tx) #replace -999 by NaN
mean_of_feat = np.nanmean(tx_out,axis = 0) #mean of columns
inds = np.where(np.isnan(tx_out)) #calculate index
tx_out[inds] = np.take(mean_of_feat, inds[1]) #replace NaN by mean
return tx_out
def replace_999_median(tx):
"""Replaces all -999 values by the *median* of their column.
First replaces all abherrant values by NaN, then compute the *median*,
ignoring those values, then replacing NaNs by the computed *median*.
"""
tx_out = replace_999_nan(tx) #replace -999 by NaN
med_of_feat = np.nanmedian(tx_out,axis = 0) #median of columns
inds = np.where(np.isnan(tx_out)) #calculate index
tx_out[inds] = np.take(med_of_feat, inds[1]) #replace NaN by median
return tx_out
def replace_outliers(tx, conf_level = 1):
"""Replaces outliers that aren't in the defined confidence interval by the median
Input : tx (np.array),
conf_level (int), takes values : 0 (68%), 1 (95%), 2 (99.7%)
Output : tx (np.array), without outliers
"""
if conf_level is None:
conf_level = 1;
#Computing mean, standard deviation, median of all features column-wise
mean_of_feat = np.nanmean( tx, axis = 0)
std_of_feat = np.nanstd( tx, axis = 0)
med_of_feat = np.nanmedian( tx, axis = 0)
#Getting the boundaries of the confidence interval
max_conf_int = mean_of_feat + (conf_level+1) * std_of_feat / np.sqrt( len( tx[0] ) )
min_conf_int = mean_of_feat - (conf_level+1) * std_of_feat / np.sqrt( len( tx[0] ) )
for i in range( len( tx[0] ) ):
#print('in feature index', i, np.count_nonzero( ( tx[i] > max_conf_int[i]) | (tx[i] < min_conf_int[i] ) ), 'outliers, with confidence intervalle', conf_level) #can be put in comment
tx_train_without_out = np.where( (tx[i] > max_conf_int[i]) | (tx[i] < min_conf_int[i]) , med_of_feat[i], tx) #replace values if it isn't in Confidence intervalle
return tx_train_without_out
def prijetnum_indexing(tx, jetcol=22):
"""
Gets the indices for the various clusters according to their PRI_jet_num.
"""
pjn_arr = tx[:,jetcol]
return (pjn_arr==0),(pjn_arr==1),(pjn_arr==2),(pjn_arr==3)
def prijetnum_clustering(tx,y=None,jetcol=22):
"""
Clusters the data into four groups, according to their PRI_jet_num value.
PRI_jet_num is found in column 22, can change if needed.
Input : tx, y (training set and target), or only tx (test_set)
Output : split dataset (clusters).
Additional ouput : Clusterized targets if it is a training set, i.e.
(Y is not None)
Indices if it is a test set (Y is None)
"""
#Values of PRI_jet_num are found in column 22 of tx.
id0,id1,id2,id3 = prijetnum_indexing(tx,jetcol)
#getting indices, clusters for train data and targets
tx0 = tx[id0]
tx1 = tx[id1]
tx2 = tx[id2]
tx3 = tx[id3]
if y is not None:
y0 = y[id0]
y1 = y[id1]
y2 = y[id2]
y3 = y[id3]
print("Prediction targets detected. Using a training set. \n Returning clusterized dataset and targets. \n")
return tx0, y0, tx1, y1, tx2, y2, tx3, y3
#When y is None, i.e. when only input is a test-set
#Returns the clustermust also return indices
#to use for prediction
elif y is None:
print("No targets detected. Using a test-set. \n Returning clusterized dataset and indices. \n")
return tx0, id0, tx1, id1, tx2, id2, tx3, id3
def delete_features(tx):
"""
If the entire column is equal to -999,
the entire column is deleted and the index is registered in a list "idx_taken_out" for the future prediction.
"""
x_df = tx.copy()
idx_taken_out=[]
for i in range(len(x_df[0])):
if np.all(x_df[:,i] == -999):
idx_taken_out.append(i)
x_df=np.delete(x_df, idx_taken_out, 1)
print(len(idx_taken_out), 'features deleted')
return x_df, idx_taken_out
def reexpand_w(w, idx):
"""
After computation of weights, which some features were previously deleted, we reexpand our weight "w" vector to use it in prediction.
idx are the index of features deleted and given by function delete_features.
It returns an array of the vector weights reexpand to the original dimension
"""
w_re = w.copy()
for i in range(len(idx)):
w_re = np.insert(w_re, idx[i], 0)
return w_re
#=========================================================================#
#======== Cluster-processing functions ========#
#=========================================================================#
def cluster_log(tx0,tx1,tx2,tx3, feat):
"""
Returns the data sets with a natural logarithm applied to the selected features
"""
t0 = np.copy(tx0)
t1 = np.copy(tx1)
t2 = np.copy(tx2)
t3 = np.copy(tx3)
t0[:,feat] = np.log(t0[:,feat]+0.01)
t1[:,feat] = np.log(t1[:,feat]+0.01)
t2[:,feat] = np.log(t2[:,feat]+0.01)
t3[:,feat] = np.log(t3[:,feat]+0.01)
return t0, t1, t2, t3
def cluster_std(t0,t1,t2,t3):
"""
Standardizes the clusterized datasets
"""
t0 = standardize(t0)
t1 = standardize(t1)
t2 = standardize(t2)
t3 = standardize(t3)
return t0, t1, t2, t3
def cluster_replace(t0,t1,t2,t3,f="mean"):
"""
Replaces remaining -999 values for all sets, using f. f is mean by default
Should be used after delete_features.
"""
if f == "mean":
print("Replacing -999 values with mean")
t0=replace_999_mean(t0)
t1=replace_999_mean(t1)
t2=replace_999_mean(t2)
t3=replace_999_mean(t3)
if f == "median":
print("Replacing -999 values with median")
t0=replace_999_median(t0)
t1=replace_999_median(t1)
t2=replace_999_median(t2)
t3=replace_999_median(t3)
if f!="mean" and f!="median":
print("Invalid f detected. Returning un-processed datasets")
return t0, t1, t2, t3
def cluster_buildpoly(t0,t1,t2,t3,degs):
"build_poly() function for all clusters w.r.t to their optimal degree found during crossvalidation"
t0 = build_poly(t0,degs[0])
t1 = build_poly(t1,degs[1])
t2 = build_poly(t2,degs[2])
t3 = build_poly(t3,degs[3])
return t0, t1, t2, t3
def cluster_preprocessing_train(tx_train,y,num2name, f="mean"):
"""
input : tx_train (np.array), whole training set
y (np.array), whole training target
f (str), = "mean" or "median" or write anything else to ignore
num2name (dict), the keys mapping feature numbers to their name. (See proj1_helpers: mapping)
Pre-process whole training dataset. Clusters them w.r.t. PRIjetnum, applying log to wanted features,
Removing features with all -999 rows, replacing remaning -999 values with f (mean by default)
Standardizes and returns all sets, targets, and deleted column indices.
"""
print("PREPROCESSING TRAIN DATA \n Clustering w.r.t. to PRI_jet_num numbers")
tx0, y0, tx1, y1, tx2, y2, tx3, y3 = prijetnum_clustering(tx_train,y)
print("REMOVING LAST COL for TX0")
tx0 = np.delete(tx0,-1,1)
#Logarithm of selected features with long-tail distribution
log_features = [1,2,3,8,9,10,13,16,19,21]
print("Taking the log of the following features : \n",[num2name.get(key) for key in log_features])
tx_df0, tx_df1, tx_df2, tx_df3 = cluster_log(tx0,tx1,tx2,tx3,log_features)
#Deleting features with all -999 rows
print("Removing features with all -999 rows. Returning indices for later")
tx_df0, id_del0 = delete_features(tx_df0)
tx_df1, id_del1 = delete_features(tx_df1)
tx_df2, id_del2 = delete_features(tx_df2)
tx_df3, id_del3 = delete_features(tx_df3)
##Replacing remaining -999 values with the mean or median of that feature
tx_df0, tx_df1, tx_df2, tx_df3 = cluster_replace(tx_df0, tx_df1, tx_df2, tx_df3,f)
#Standardizing
print("Standardizing : Setting mean to 0 and variance to 1")
tx_df0, tx_df1, tx_df2, tx_df3 = cluster_std(tx_df0, tx_df1, tx_df2, tx_df3)
print("Preprocessing done")
return tx_df0, y0, tx_df1, y1, tx_df2, y2, tx_df3, y3, id_del0, id_del1, id_del2, id_del3
def cluster_preprocessing_test(tX_test, id_del0, id_del1, id_del2, id_del3, degs, num2name,f="mean"):
"""
input : tx_train (np.array), whole training set
id_del0, ..., id_del3, indices of deleted columns returned by
degs (list), degrees for build_poly found during crossvalidation gridsearch
num2name (dict), the keys mapping feature numbers to their name. (See proj1_helpers: mapping)
f (str), = "mean" or "median" or write anything else to ignore.
Pre-process whole training dataset. Clusters them w.r.t. PRIjetnum, applying log to wanted features,
Removing features with all -999 rows, replacing remaning -999 values with f (mean by default)
Standardizes and returns all sets, targets, and deleted column indices.
"""
print("PREPROCESSING TEST DATA \n Clustering w.r.t. to PRI_jet_num numbers")
test0, i0, test1, i1, test2, i2, test3, i3, = prijetnum_clustering(tX_test)
print("REMOVING LAST COL for TX0")
test0 = np.delete(test0,-1,1)
#Logarithm of selected features with long-tail distribution
log_features = [1,2,3,8,9,10,13,16,19,21]
print("Taking the log of the following features : \n",[num2name.get(key) for key in log_features])
test0,test1,test2,test3 = cluster_log(test0,test1,test2,test3,log_features)
#Deleting features with all -999 rows, ID from pre_processing_train
print("deleting corresponding columns")
test0 = np.delete(test0,id_del0,1)
test1 = np.delete(test1,id_del1,1)
test2 = np.delete(test2,id_del2,1)
test3 = np.delete(test3,id_del3,1)
##Replacing remaining -999 values with the mean or median of that feature
test0, test1, test2, test3 = cluster_replace(test0, test1, test2, test3, f)
#Standardizing
print("Standardizing : Setting mean to 0 and variance to 1")
test0, test1, test2, test3 = cluster_std(test0, test1, test2, test3)
#Augmenting features w.r.t. optimal degrees found during CV.
print("Augmenting features")
test0, test1, test2, test3 = cluster_buildpoly(test0,test1,test2,test3,degs)
print("Preprocessing done, returning clusterized test set and indices")
return test0, i0, test1, i1, test2, i2, test3, i3
#####################################################
def cluster_preprocessing_train_alt(tx_train,y,num2name, f="median"):
"""
input : tx_train (np.array), whole training set
y (np.array), whole training target
f (str), = "mean" or "median" or write anything else to ignore
num2name (dict), the keys mapping feature numbers to their name. (See proj1_helpers: mapping)
ALT IS TO PROCESS BEFORE CLUSTERING
"""
tx = tx_train.copy()
print("PREPROCESSING TRAIN DATA \n Clustering w.r.t. to PRI_jet_num numbers")
clustid0, clustid1, clustid2, clustid3= prijetnum_indexing(tx)
#Deleting features with all -999 rows
print("Removing features with all -999 rows for cluster 0 and 1. Returning indices for later")
_, id_del0 = delete_features(tx[clustid0])
_, id_del1 = delete_features(tx[clustid1])
##Replacing remaining -999 values with the mean or median of that feature
tx= replace_by_median(tx)
#Logarithm of selected features with long-tail distribution
log_features = [0,1,2,3,7,8,9,10,13,16,19,21]
print("Taking the log of the following features : \n",[num2name.get(key) for key in log_features])
#shift to avoid log(0)
tx[:,log_features] = np.log(tx[:,log_features]+.5)
#Standardizing
print("Standardizing : Setting mean to 0 and variance to 1")
tx = standardize(tx)
print("CLUSTERING")
tx0,tx1,tx2,tx3 = tx[clustid0], tx[clustid1], tx[clustid2], tx[clustid3]
y0, y1, y2, y3 = y[clustid0], y[clustid1], y[clustid2], y[clustid3]
print("deleting useless feats")
tx0 = np.delete(tx0,id_del0,1)
tx1 = np.delete(tx1,id_del1,1)
print("Preprocessing done")
return tx0, y0, tx1, y1, tx2, y2, tx3, y3, id_del0, id_del1
def cluster_preprocessing_test_alt(tX_test, id_del0, id_del1, degs, num2name,f="mean"):
"""
input : tx_train (np.array), whole training set
id_del0, ..., id_del3, indices of deleted columns returned by
degs (list), degrees for build_poly found during crossvalidation gridsearch
num2name (dict), the keys mapping feature numbers to their name. (See proj1_helpers: mapping)
f (str), = "mean" or "median" or write anything else to ignore.
Pre-process whole training dataset. Clusters them w.r.t. PRIjetnum, applying log to wanted features,
Removing features with all -999 rows, replacing remaning -999 values with f (mean by default)
Standardizes and returns all sets, targets, and deleted column indices.
"""
print("replace by median")
i0, i1, i2, i3 = prijetnum_indexing(tX_test)
tx_t = replace_by_median(tX_test)
#Logarithm of selected features with long-tail distribution
log_features = [0,1,2,3,7,8,9,10,13,16,19,21]
print("Taking the log of the following features : \n",[num2name.get(key) for key in log_features])
#shift to avoid log(0)
tx_t[:,log_features] = np.log(tx_t[:,log_features]+.5)
print("Standardizing")
tx_t = standardize(tx_t)
print("CLUSTERING")
test0, test1, test2, test3 = tx_t[i0], tx_t[i1], tx_t[i2], tx_t[i3]
#Deleting features with all -999 rows, ID from pre_processing_train
print("deleting corresponding columns")
test0 = np.delete(test0,id_del0,1)
test1 = np.delete(test1,id_del1,1)
print("Augmenting features")
test0, test1, test2, test3 = cluster_buildpoly(test0,test1,test2,test3,degs)
print("Preprocessing done, returning clusterized test set and indices")
return test0, i0, test1, i1, test2, i2, test3, i3
|
a8c968c0963c6a8e1ccba182dd5ddb6285aab422 | MPI-IS/CityGraph | /city_graph/utils.py | 3,456 | 3.984375 | 4 | """
Utils
=====
Module with different utilities needed for the package.
"""
import collections
import string
import time
from math import sin, cos, sqrt, atan2, radians
from numpy.random import RandomState
# Mean Earth radius in meters.
EARTH_RADIUS_METERS = 6.371 * 1e6
def get_current_time_in_ms():
"""
Returns the current time in milliseconds.
:note: Used for seeding the pseudo random number generator.
"""
return int(time.time() * 1000)
def distance(long1, lat1, long2, lat2):
"""
Calculate the distance between two points on the Earth.
:param float long1: longitude of the first point in degrees.
:param float lat1: latitude of the first point in degrees.
:param float long2: longitude of the second point in degrees.
:param float lat2: latitude of the second point in degrees.
:returns: distance in meters.
:rtype: float
:note: We approximate the Earth as a sphere and use the Haversine formula.
"""
# Convert to radians
long1 = radians(long1)
lat1 = radians(lat1)
long2 = radians(long2)
lat2 = radians(lat2)
delta_long = long1 - long2
delta_lat = lat1 - lat2
a = sin(delta_lat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(delta_long / 2) ** 2
d = 2 * atan2(sqrt(a), sqrt(1 - a))
return d * EARTH_RADIUS_METERS
def group_locations_by(locations, attribute):
"""
Group locations into a dictionary of lists based on an attribute.
:param list locations: A list of locations.
:param str attribute: Attribute name.
"""
grouped = collections.defaultdict(list)
for location in locations:
value = getattr(location, attribute)
grouped[value].append(location)
return grouped
def reverse_mapping(mapping):
"""
Construct a reverse dictionary mapping values to keys.
:param dict mapping: the original dictionary.
"""
reverse = {}
for key, values in mapping.items():
for value in values:
reverse[value] = key
return reverse
class RandomGenerator(RandomState):
"""
Pseudo-random number generator based on the MT19937.
Used for the tests and generating random data.
:param float seed: Seed for the PRNG (default: current time)
"""
# MT19937: seed should be between 0 and 2**32 - 1
MAX_SEED = 2**32
def __init__(self, seed=None):
seed = seed or get_current_time_in_ms()
self._seed = seed % self.MAX_SEED
super().__init__(self._seed)
@property
def rng_seed(self):
"""Returns seed."""
return self._seed
def seed(self, _seed):
"""Reseeds the generator."""
self._seed = _seed
super().seed(self.rng_seed)
def __call__(self):
"""Returns a random float in [0.0, 1.0)."""
return self.random_sample()
def rand_int(self, max_value=MAX_SEED):
"""
Returns a random integer in [0, max_value).
:param int max_value: Maximum value.
"""
if max_value < 1:
raise ValueError("Maximum value should be at least 1, instead got %s" % max_value)
return super().randint(0, max_value)
def rand_str(self, length=None):
"""Returns a random string."""
length = length or (5 + self.randint(60))
characters = string.ascii_letters + string.digits
size = len(characters)
return ''.join(characters[self.randint(size)] for _ in range(length))
|
6b8111971500612b1c6010ae73c49d35e24a3ba9 | donyu/euler_project | /p12.py | 307 | 3.890625 | 4 | def factors_count(x):
count = 0
for i in xrange(1, x + 1):
if x % i == 0:
count += 1
return count
def max_divisible_num(max):
tri_num = 1
i = 2
while factors_count(tri_num) < max:
tri_num += i
i += 1
return tri_num
if __name__ == "__main__":
print max_divisible_num(500)
|
bbef15db3e9d0a69178bbe75bce99b0e5215adad | Environmental-Informatics/building-more-complex-programs-with-python-gargeyavunnava | /program_4.2.py | 3,205 | 4.78125 | 5 |
"""
Created on Fri Jan 24 13:50:40 2020
Purdue account name: vvunnava
Github account name: gargeyavunnava
Exercise 4.2, thinkpython 2e
Program description:
Multiple functions are defined to make it easy to draw the three flowers
as shown in fig 4.1, thinkpython 2e
As discussed in the chapter, a 'polyline' funtion is defined here to make it
easier compared to a polygon function to draw geometrical shapes.
The polyline function defined is then used in the definition of another
function 'arc' to help draw arcs.
"""
import turtle #to work with turtle
import math #to use math constants like pi
"""
polyline function: it takes 4 inputs: t=turtle object, n = no of lines/sides,
length and angle.The polyline is created using a for loop where in each iteration
the trutle moves forward as directed by the length variable value and then turns
at an angle as directed by angle variable. The next iteration of for loop begins
at the end location of the turtle in the previous iteration and this creates a
continous polyline.
"""
def polyline(t,n,length,angle):
for i in range(n):
t.fd(length)
t.lt(angle)
"""
arc function: it takes 3 inputs: t=turtle object, r=radius, and angle= angle of
the arc (ex: angle=360 - circle, angle=90 - semicircle).
arc length is calculated by the formula 2*pi*r, pi value from the math function
The arc circumference is approximated as using a many sided (variable n) polyline.
More the sides, smoother the curve. Once n is determined, n is used to calculate
the individual polyline's length and turning angle
"""
def arc(t,r,angle):
arc_length = 2*math.pi*r*angle/360
n = int(arc_length/3)+1
step_length = arc_length/n
step_angle = float(angle)/n
polyline(t,n,step_length,step_angle)
def circle(t,r): # an arc with a fixed angle = 360
arc(t,r,360)
"""
petal function: takes 3 inputs: t=turtle object, r =radius and angle
the function calls the arc function twice in a for loop to create the shape of
a petal. After the 1st iteration, turtle is turned by an angle of (180-angle)
to create the petal shape.
"""
def petal(t, r, angle):
for i in range(2):
arc(t, r, angle)
t.lt(180-angle) #turn angle
"""
flower function: takes 4 inputs: t=turtle object, r=radius, angle and n=no of
petals in the flower. To create the shape of a flower with n petals, the function
calls the petal function n times in a loop. The way the petals are arranged is
modifed the the angle variable.
"""
def flower(t,r,angle,n):
for i in range(n):
petal(t,r,angle)
t.lt(360/n) #arranging multiple petals
"""
offset function is defined to moce the turtle in the turtle window.
pu = pen up, turtle moving path not displayed
pd = pen down, turtle moving path displayed.
"""
def offset(t, offset_length): #moving turtle position
t.pu()
t.fd(offset_length)
t.pd()
bob=turtle.Turtle()
offset(bob,-200)
flower(bob,80,60,6)
offset(bob,200)
flower(bob,60,80,10)
offset(bob,200)
flower(bob,180,15,20)
"""
reset function used to keep the turtle window open and modify the code without the
need to restart the kernel
"""
#bob.reset()
turtle.mainloop()
|
a02ec46140ef6cf6ebc0b5bae91959eaf473c102 | Joshuabhad/mypackage | /mypackage/recursion.py | 1,699 | 4.65625 | 5 | def sum_array(array):
"""
Calculate the sum of a list of arrays
Args:
(array): Numbers in a list to be added together
Returns:
int: sum of all numbers in a array added together
Examples:
>>> sum_array([1,2,3,4,5])
15
>> sum_array([1,5,7,3,4])
20
>> sum_array([10,10,20,10])
50
"""
if len(array)==0:
return 0
else:
return array[0] + sum_array(array[1:])
def fibonacci(n):
"""
Calculate nth term in fibonacci sequence
Args:
n (int): nth term in fibonacci sequence to calculate
Returns:
int: nth term of fibonacci sequence,
equal to sum of previous two terms
Examples:
>>> fibonacci(1)
1
>> fibonacci(2)
1
>> fibonacci(3)
2
"""
if n <= 1:
return n
else:
return fibonacci(n - 1) + fibonacci(n - 2)
def factorial(n):
"""
Calculate the factorial of a given number
Args:
n (int): the input number
Returns:
int: the factorial of a number e.g 5! = 5*4*3*2*1 = 120
Examples:
>>> factorial(6)
720
>> factorial(4)
24
"""
if n < 1:
return 1
else:
fac = n * factorial( n - 1 )
return fac
def reverse(word):
"""
Output a string in reverse
Args:
word: string that you would input
Returns: a string in reverse order
Examples:
>>> reverse('apple')
'elppa'
>> reverse('friend')
'dneirf'
"""
if word == "":
return word
else:
return word[-1] + reverse(word[:-1])
|
7758810b8fefce67062fd14ed34f41871dda2ea0 | chill1495/RandomCode | /String List Editor/string_list_editor.py | 2,057 | 3.875 | 4 | def string_list_editor(strings):
start_chars = count_chars(strings)
arranged = []
for string in strings: #sort the input list to find median
arranged.append(len(string))
sorted(arranged)
if len(arranged) % 2 == 0: #calculate median
median = (arranged[len(arranged)/2] + arranged[(len(arranged)/2) - 1]) / 2
else:
median = arranged[len(arranged)//2]
if len(strings) > 0: ##check for bad inputs
for index in range(len(strings)):
if len(strings[index]) % 4 == 0: ##string length divisible by 4
strings[index] = strings[index][::-1]
##****************************************************************************************
if len(strings[index]) % 5 == 0: ##string length divisible by 5
strings[index] = strings[index][:5]
##******************************************************************************************
count = 0
for char in strings[index][:5]: ##count number of capitals in first 5
if char == char.upper():
count += 1
if count >= 3: ##make word a capital if true
strings[index] = strings[index].upper()
count = 0
##**********************************************************************************************
if strings[index][len(strings[index])-1:] == "-": ##check for hyphen
if index == len(strings) - 1: ##if there is no next word, just remove hyphen
strings[index] = strings[index][:len(strings)-1]
else:
strings[index] = strings[index][:len(strings[index])-1]
strings[index] += strings[index + 1]
##**********************************************************************************************
print (strings[index])
end_chars = count_chars(strings)
print("Starting chars: " + str(start_chars) + "\nEnding Chars: " + str(end_chars) + "\nMedian length of input: " + str(median))
def count_chars(strings): ##counts chars in a list
total = 0
for string in strings:
total += len(string)
return total
string_list_editor(["hellothere","name","YELling","hyphen-","ate"]) ##sample input with one case for each objective |
97e8f98f5dbc2ab5109a57d452179b96bbc3b7ec | leedale1981/compsci | /insertion_sort/python/insertion_sort.py | 513 | 4.21875 | 4 | def insertion_sort(array):
for j in range(2, len(array)):
i = j - 1;
key = array[j];
while (i > -1 and array[i] > key):
array[i + 1] = array[i];
i = i -1;
array[i + 1] = key;
return array;
unsorted_array = [9, 5, 3, 8, 2, 1, 4, 7, 6, 13, 10, 12, 11];
for i in range(len(unsorted_array)):
print unsorted_array[i];
print "\n";
sorted_array = insertion_sort(unsorted_array);
for i in range(len(sorted_array)):
print sorted_array[i];
|
86a8dc74a19b7d6ec74ee97dbba1d5268d32f556 | achenachena/leetcode | /jianzhi/jump_floor.py | 686 | 3.75 | 4 | """
一只青蛙一次可以跳上1级台阶,也可以跳上2级。求该青蛙跳上一个n级的台阶总共有多少种跳法
(先后次序不同算不同的结果)。
"""
# -*- coding:utf-8 -*-
# 递归法
# class Solution:
# def jumpFloor(self, number):
# # write code here
# if number == 1:
# return 1
# if number == 0:
# return 0
# return self.jumpFloor(number - 1) + self.jumpFloor(number - 2)
# 非递归法
class Solution:
def jumpFloor(self, number):
# write code here
prev, curr = 0, 1
for _ in range(number):
prev, curr = curr, prev + curr
return curr
|
788e304bfe3dccdd75e95cad83d058b57aa921bc | alexandraback/datacollection | /solutions_2463486_1/Python/sealion/Fair-and-Square.py | 588 | 3.515625 | 4 | import math
import sys
def is_palindrome(n):
s = str(n)
l = list(s)
l.reverse()
return list(s) == l
def find_numbers(A, B):
ns = range(int(math.ceil(math.sqrt(A))), int(math.floor(math.sqrt(B))) + 1)
for n in ns:
n2 = n * n
if is_palindrome(n) and is_palindrome(n2):
yield n2
inp = sys.stdin
T = int(inp.readline())
numbers = list(find_numbers(1, 10 ** 14))
for case_number in range(1, T + 1):
A, B = map(int, inp.readline().split())
c = sum(1 for n in numbers if A <= n <= B)
print 'Case #%d: %d' % (case_number, c)
|
bd9e8658c2a7d7928516d8b1de0b7d77ea16aa18 | jbhennes/CSCI-220-Programming-1 | /Chapter 5 Strings/readNumericData.py | 2,836 | 4.28125 | 4 | # readNumericData.py
# illustrates how to retrieve numeric data from a file
def main():
#reading data from a file for calculations - fine for integer values
## infileName = "data.txt"
## infileName = "dataWithFloat.txt"
## infile = open(infileName,"r")
## print ("\n*** Integer values ***")
## for line in infile:
## print ("Value read: " + line[:-1])
## value = int(eval(line)) + 1
## print ("Value plus 1: " + str(value))
## print ()
## infile.close()
# Reading multiple pieces of numeric data per line
## infileName = "dataMultiple.txt"
## infile = open(infileName,"r")
## print ("\n*** Multiple data per line ***")
## for line in infile:
## values = line.split()
## for valueStr in values:
## print ("Value read: " + valueStr)
## value = eval(valueStr) + 1
## print ("Value plus 1: " + str(value))
## print ()
## infile.close()
## # Reading multiple pieces of data per line - fixed length of data
# This code segment reads in from files with the format
# studentFirstName studentLastName grade1 grade2 grade3
# and computes and outputs the student's gpa
## print ("Outputs student data to a file")
## infileName = "dataStudentInfo3Grades.txt"
## outfileName = "studentInfoOutput.txt"
## numGrades = 3
## startPos = 2
## infile = open(infileName,"r")
## outfile = open(outfileName, "w")
## print ("\n*** Student Info with 3 grades ***", file=outfile)
## for line in infile:
## values = line.split()
## total = 0
## for i in range(startPos, startPos + numGrades):
## total = total + float(values[i])
## average = total / numGrades
## message = values[0] + " " + values[1]
## message = message + "'s GPA: {0:.3f}".format(average)
## print (message, file=outfile)
## infile.close()
## print()
## print("Your data has been written to " + outfileName)
# Reading multiple pieces of data per line - unknown length of data
# This code segment reads in from files with the format
# studentFirstName studentLastName grade1 grade2 grade3 ... gradeN
# and computes and outputs the student's gpa
infileName = "dataStudentInfoMultipleGrades.txt"
infile = open(infileName,"r")
print ("\n*** Student Info with Multiple grades ***")
for line in infile:
values = line.split()
total = 0
count = 0
for i in range(2,len(values)):
total = total + float(values[i])
count = count + 1
average = total / count
print (values[0] + " " + values[1] + "'s GPA: {0:.3f}".format(average))
infile.close()
main()
|
53934f1b1645d147841985e09f7ee40c80d2cf94 | vedadeepta/Tic-Tac-Toe | /LookAhead.py | 1,857 | 3.734375 | 4 | from random import randint
pos = -1
def checkWin(board):
i=0
while i < len(board):
if(board[i] != 'n' and board[i] == board[i+1] and board[i] == board[i+2] and board[i+1] == board[i+2]):
return True
i = i + 3
#columns
i=0
while i < 3:
if(board[i] != 'n' and board[i] == board[i+3] and board[i] == board[i+6] and board[i+3] == board[i+6]):
return True
i = i + 1
#diagonals
if(board[0] != 'n' and board[0] == board[4] and board[0] == board[8] and board[4] == board[8]):
return True
elif(board[2] != 'n' and board[2] == board[4] and board[2] == board[6] and board[4] == board[6]):
return True
else:
return False
def checkScore(board,player):
if(checkWin(board)):
if(player == 0):
return 1
else:
return -1
else:
for i in board:
if(i == 'n'):
return 10
return 0
def minimax(board,player):
score = checkScore(board,(player + 1) % 2)
if( score == 0 or score == 1 or score == -1):
return score
else:
moves = []
scoreList = []
for i in xrange(9):
if (board[i] == 'n'):
moves.append(i)
for i in moves:
if (player == 0):
board[i] = 'o'
else:
board[i] = 'x'
temp= minimax(board,(player + 1) % 2)
scoreList.append(temp)
board[i] = 'n'
if(player == 0):
global pos
pos = moves[scoreList.index(max(scoreList))]
return max(scoreList)
#return max(scoreList), moves[len(scoreList) - scoreList[::-1].index(max(scoreList)) - 1]
else:
global pos
pos = moves[scoreList.index(min(scoreList))]
return min(scoreList)
#return min(scoreList), moves[len(scoreList) - scoreList[::-1].index(min(scoreList)) - 1]
def checkEmpty(board):
for i in board:
if(i != 'n'):
return False
return True
def getPos(board,player):
if(checkEmpty(board)):
return randint(0,8)
minimax(board,player)
return pos
|
b54caf2497f920125388e5e8ab64f74255934687 | karan2808/Python-Data-Structures-and-Algorithms | /DFS/MatchSticksToSquare.py | 3,004 | 3.625 | 4 | class Solution:
def makesquare(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
# If there are no matchsticks, then we can't form any square.
if not nums:
return False
# Number of matchsticks
L = len(nums)
# Possible perimeter of our square
perimeter = sum(nums)
# Possible side of our square from the given matchsticks
possible_side = perimeter // 4
# If the perimeter isn't equally divisible among 4 sides, return False.
if possible_side * 4 != perimeter:
return False
# Memoization cache for the dynamic programming solution.
memo = {}
# mask and the sides_done define the state of our recursion.
def recurse(mask, sides_done):
# This will calculate the total sum of matchsticks used till now using the bits of the mask.
total = 0
for i in range(L - 1, -1, -1):
if not (mask & (1 << i)):
total += nums[L - 1 - i]
# If some of the matchsticks have been used and the sum is divisible by our square's side, then we increment the number of sides completed.
if total > 0 and total % possible_side == 0:
sides_done += 1
# If we were successfully able to form 3 sides, return True
if sides_done == 3:
return True
# If this recursion state has already been calculated, just return the stored value.
if (mask, sides_done) in memo:
return memo[(mask, sides_done)]
# Common variable to store answer from all possible further recursions from this step.
ans = False
# rem stores available space in the current side (incomplete).
c = int(total / possible_side)
rem = possible_side * (c + 1) - total
# Iterate over all the matchsticks
for i in range(L - 1, -1, -1):
# If the current one can fit in the remaining space of the side and it hasn't already been taken, then try it out
if nums[L - 1 - i] <= rem and mask & (1 << i):
# If the recursion after considering this matchstick gives a True result, just break. No need to look any further.
# mask ^ (1 << i) makes the i^th from the right, 0 making it unavailable in future recursions.
if recurse(mask ^ (1 << i), sides_done):
ans = True
break
# cache the result for the current recursion state.
memo[(mask, sides_done)] = ans
return ans
# recurse with the initial mask with all matchsticks available.
return recurse((1 << L) - 1, 0)
def main():
mySol = Solution()
print(mySol.makesquare([1, 1, 2, 2, 2]))
print(mySol.makesquare([1, 1, 2, 2, 1]))
if __name__ == "__main__":
main()
|
ec7836f977187b10a21c36c3c0f9461e8f39f7ff | dujiaojingyu/Personal-programming-exercises | /编程/1月/1.24/斐波那契.py | 136 | 3.609375 | 4 |
def fib(max):
n,a,b = 0,0,1
while n < max:
print(b)
a,b = b,a+b
n += 1
return 'lalalalal'
fib(6) |
aae3fb42325e5ec655c6599832589a00b36139fe | hitesh789/datasharing | /Chp-4 Representing Data and Engineering Features.py | 6,660 | 3.8125 | 4 | %cd C:\Users\bama6012\Desktop\Python My study\data
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# The file has no headers naming the columns, so we pass header=None
# and provide the column names explicitly in "names"
data=pd.read_csv('adult.csv',header=None, index_col=False,
names=['age', 'workclass', 'fnlwgt', 'education', 'education-num',
'marital-status', 'occupation', 'relationship', 'race', 'gender',
'capital-gain', 'capital-loss', 'hours-per-week', 'native-country',
'income'])
# For illustration purposes, we only select some of the columns
data = data[['age', 'workclass', 'education', 'gender', 'hours-per-week','occupation', 'income']]
# One-Hot-Encoding (Dummy Variables)-------------------------------------------
print("Original features:\n", list(data.columns), "\n")
data_dummies = pd.get_dummies(data)
print("Features after get_dummies:\n", list(data_dummies.columns))
data_dummies.head()
features = data_dummies.ix[:, 'age':'occupation_ Transport-moving']
# Extract NumPy arrays
X = features.values
y = data_dummies['income_ >50K'].values
print("X.shape: {} y.shape: {}".format(X.shape, y.shape))
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
logreg = LogisticRegression()
logreg.fit(X_train, y_train)
print("Test score: {:.2f}".format(logreg.score(X_test, y_test)))
# create a DataFrame with an integer feature and a categorical string feature
demo_df = pd.DataFrame({'Integer Feature': [0, 1, 2, 1],'Categorical Feature': ['socks', 'fox', 'socks', 'box']})
# Using get_dummies will only encode the string feature and will not change the integer feature
pd.get_dummies(demo_df)
"""
If you want dummy variables to be created for the “Integer Feature” column, you can
explicitly list the columns you want to encode using the columns parameter. Then,
both features will be treated as categorical
"""
demo_df['Integer Feature'] = demo_df['Integer Feature'].astype(str)
pd.get_dummies(demo_df, columns=['Integer Feature', 'Categorical Feature'])
#----------------------------------------------------------------------------------Automatic Feature Selection
# There are three basic strategies:
# 1) univariate statistics
# 2) Model-based selection
# 3) iterative selection
# Univariate Statistics--------------------------------------------------------
"""
In univariate statistics, we compute whether there is a statistically significant relationship
between each feature and the target. Then the features that are related with the
highest confidence are selected. In the case of classification, this is also known as
analysis of variance (ANOVA). A key property of these tests is that they are univariate,
meaning that they only consider each feature individually.
"""
from sklearn.datasets import load_breast_cancer
from sklearn.feature_selection import SelectPercentile
from sklearn.model_selection import train_test_split
cancer=load_breast_cancer()
# get deterministic random numbers
rng = np.random.RandomState(42)
noise = rng.normal(size=(len(cancer.data), 50))
# add noise features to the data
# the first 30 features are from the dataset, the next 50 are noise
X_w_noise = np.hstack([cancer.data, noise])
X_train, X_test, y_train, y_test = train_test_split(X_w_noise, cancer.target, random_state=0, test_size=.5)
# use f_classif (the default) and SelectPercentile to select 50% of features
select = SelectPercentile(percentile=50)
select.fit(X_train, y_train)
# transform training set
X_train_selected = select.transform(X_train)
print("X_train.shape: {}".format(X_train.shape))
print("X_train_selected.shape: {}".format(X_train_selected.shape))
"""
As you can see, the number of features was reduced from 80 to 40 (50 percent of the
original number of features). We can find out which features have been selected using
the get_support method, which returns a Boolean mask of the selected features
"""
mask = select.get_support()
print(mask)
# visualize the mask -- black is True, white is False
plt.matshow(mask.reshape(1, -1), cmap='gray_r')
plt.xlabel("Sample index")
from sklearn.linear_model import LogisticRegression
# transform test data
X_test_selected = select.transform(X_test)
lr = LogisticRegression()
lr.fit(X_train, y_train)
print("Score with all features: {:.3f}".format(lr.score(X_test, y_test)))
lr.fit(X_train_selected, y_train)
print("Score with only selected features: {:.3f}".format(
lr.score(X_test_selected, y_test)))
# Model-Based Feature Selection------------------------------------------------
from sklearn.feature_selection import SelectFromModel
from sklearn.ensemble import RandomForestClassifier
select = SelectFromModel(RandomForestClassifier(n_estimators=100, random_state=42),threshold="median")
select.fit(X_train, y_train)
X_train_l1 = select.transform(X_train)
print("X_train.shape: {}".format(X_train.shape))
print("X_train_l1.shape: {}".format(X_train_l1.shape))
mask = select.get_support()
# visualize the mask -- black is True, white is False
plt.matshow(mask.reshape(1, -1), cmap='gray_r')
plt.xlabel("Sample index")
X_test_l1 = select.transform(X_test)
score = LogisticRegression().fit(X_train_l1, y_train).score(X_test_l1, y_test)
print("Test score: {:.3f}".format(score))
# Iterative Feature Selection--------------------------------------------------
# Recursive feature elimination (RFE)
"""
which starts with all features,
builds a model, and discards the least important feature according to the
model. Then a new model is built using all but the discarded feature, and so on until
only a prespecified number of features are left. For this to work, the model used for
selection needs to provide some way to determine feature importance, as was the case
for the model-based selection. Here, we use the same random forest model that we
used earlier, and get the results
"""
from sklearn.feature_selection import RFE
select=RFE(RandomForestClassifier(n_estimators=100,random_state=42),n_features_to_select=40)
select.fit(X_train,y_train)
# visualize the selected features:
mask = select.get_support()
plt.matshow(mask.reshape(1, -1), cmap='gray_r')
plt.xlabel("Sample index")
X_train_rfe= select.transform(X_train)
X_test_rfe= select.transform(X_test)
score = LogisticRegression().fit(X_train_rfe, y_train).score(X_test_rfe, y_test)
print("Test score: {:.3f}".format(score))
print("Test score: {:.3f}".format(select.score(X_test, y_test))) |
376a454f428fb553b51d4685682c5565672c9837 | python-amazon-mws/python-amazon-mws | /mws/utils/collections.py | 4,288 | 3.515625 | 4 | """Data structure utilities."""
from collections.abc import Iterable, Mapping
def unique_list_order_preserved(seq):
"""Returns a unique list of items from the sequence
while preserving original ordering.
The first occurrence of an item is returned in the new sequence:
any subsequent occurrences of the same item are ignored.
"""
seen = set()
seen_add = seen.add
return [x for x in seq if not (x in seen or seen_add(x))]
class DotDict(dict):
"""Read-only dict-like object class that wraps a mapping object."""
def __init__(self, *args, **kwargs):
dict.__init__(self)
self.update(*args, **kwargs)
def __repr__(self):
return f"{self.__class__.__name__}({super().__repr__()})"
def __getattr__(self, name):
"""Simply attempts to grab a key `name`.
Has some fallback logic for keys starting with '@' and '#',
which are output by xmltodict when a tag has attributes included.
In that case, will attempt to find a key starting with '@' or '#',
or will raise the original KeyError exception.
"""
try:
return self[name]
except KeyError:
# No key by that name? Let's try being helpful.
if f"@{name}" in self:
# Does this same name occur starting with ``@``?
return self[f"@{name}"]
if f"#{name}" in self:
# Does this same name occur starting with ``#``?
return self[f"#{name}"]
# Otherwise, raise the original exception
raise
def __setattr__(self, name, val):
"""Allows assigning new values to a DotDict, which will automatically build
nested mapping objects into DotDicts, as well.
Passes responsibility to ``__setitem__`` for consistency.
"""
self.__setitem__(name, val)
def __delattr__(self, name):
"""Passes attr deletion to __delitem__."""
self.__delitem__(name)
def __setitem__(self, key, val):
"""Allows assigning new values to a DotDict, which will automatically build
nested mapping objects into DotDicts, as well.
"""
val = self.__class__.build(val)
dict.__setitem__(self, key, val)
def __iter__(self):
"""Nodes are natively iterable, returning an iterator wrapping this instance.
This is slightly different from standard behavior: iterating a ``dict`` will
return its keys. Instead, we assume that the user is iterating an XML node
which they expect sometimes returns a list of nodes, and other times returns
a single instance of ``DotDict``. If the latter is true, we end up here.
So, we wrap this instance in an iterator, so that iterating on it will return
the ``DotDict`` itself, rather than its keys.
"""
return iter([self])
def update(self, *args, **kwargs):
"""Recursively builds values in any nested objects, such that any mapping
object in the nested structure is converted to a ``DotDict``.
- Each nested mapping object will be converted to ``DotDict``.
- Each non-string, non-dict iterable will have elements built, as well.
- All other objects in the data are left unchanged.
"""
for key, val in dict(*args, **kwargs).items():
self[key] = self.__class__.build(val)
@classmethod
def build(cls, obj):
"""Builds objects to work as recursive versions of this object.
- Mappings are converted to a DotDict object.
- For iterables, each element in the sequence is run through the build method recursively.
- All other objects are returned unchanged.
"""
if isinstance(obj, Mapping):
# Return a new DotDict object wrapping `obj`.
return cls(obj)
if not isinstance(obj, str) and isinstance(obj, Iterable):
# Build each item in the `obj` sequence,
# then construct a new sequence matching `obj`'s type.
# Must be careful not to pass strings here, even though they are iterable!
return obj.__class__(cls.build(x) for x in obj)
# In all other cases, return `obj` unchanged.
return obj
|
326b36f411a82c8084722aef705757cdbd72aa70 | samskhan/User-Interface-Engineering | /TextEditor.py | 1,621 | 3.5625 | 4 | '''Author: Sams KHan
Class: User Interface Engineering
References: https://www.youtube.com/watch?v=xqDonHEYPgA
https://www.instructables.com/id/Create-a-Simple-Python-Text-Editor/
'''
import sys
from tkinter import *
import tkinter.filedialog as fd
root = Tk()
#Adding text widget
text = Text(root)
text.grid()
'''#Adding ability to save file
def save():
global text
t=text.get(0.0,END)
f=open(text,'w')
f.write(t)
f.close()'''
#New file
def new():
global filename
filename = "Untitled"
text.delete(0.0,END)
def openFile():
f=fd.askopenfile(mode='r')
t=f.read()
text.delete(0.0,END)
text.insert(0.0,t)
#Adding the ability to save as
def saveas():
global text
t = text.get("1.0", "end-1c")
savelocation = fd.asksaveasfilename()
file1=open(savelocation, "w+")
file1.write(t)
file1.close()
#Adding the bullet point list
def addList():
global text
text.tag_configure('bulleted_list')
text.insert(END, u'\u25C6', 'bullets')
text.insert(END, u"\tThis is the first item in the list.\n",
'bulleted_list')
root.title("Word Processor")
menubar = Menu(root)
filemenu = Menu(menubar)
insertmenu = Menu(menubar)
filemenu.add_command(label="New", command=new)
filemenu.add_command(label="Open", command=openFile)
filemenu.add_command(label="Save as", command=saveas)
filemenu.add_command(label="Quit", command=root.quit)
menubar.add_cascade(label="File",menu=filemenu)
insertmenu.add_command(label="Quick List", command=addList)
menubar.add_cascade(label="Insert",menu=insertmenu)
root.config(menu=menubar)
root.mainloop() |
4d86b7416c2e7e3ed010de1535596835b8617974 | aCoffeeYin/pyreco | /repoData/samuel-squawk/allPythonContent.py | 21,221 | 3.546875 | 4 | __FILENAME__ = aggregates
from __future__ import division
"""
An aggregate class is expected to accept two values at
instantiation: 'column' and 'name', and the class
must have two methods 'update(self, row)' and 'value(self)'.
The 'update' method is called for each row, and the 'value'
must return the final result of the aggregation.
"""
class Aggregate(object):
def __init__(self, column, name=None):
self.column = column and column.lower()
self.name = (name or column).lower()
def _to_number(self, val):
if isinstance(val, (int, long, float)):
return val
if isinstance(val, basestring):
if '.' in val:
return float(val)
return int(val)
return float(val)
class AvgAggregate(Aggregate):
"""Calculate the average value for a column"""
def __init__(self, *args, **kwargs):
super(AvgAggregate, self).__init__(*args, **kwargs)
self.sum = 0
self.count = 0
def update(self, row):
self.sum += self._to_number(row[self.column])
self.count += 1
def value(self):
if self.count == 0:
return None
return self.sum / self.count
class CountAggregate(Aggregate):
"""Count the number of rows"""
def __init__(self, *args, **kwargs):
super(CountAggregate, self).__init__(*args, **kwargs)
self.count = 0
def update(self, row):
self.count += 1
def value(self):
return self.count
class MaxAggregate(Aggregate):
"""Calculate the maximum value for a column"""
def __init__(self, *args, **kwargs):
super(MaxAggregate, self).__init__(*args, **kwargs)
self.max = None
def update(self, row):
val = self._to_number(row[self.column])
if self.max is None:
self.max = val
else:
self.max = max(self.max, val)
def value(self):
return self.max
class MinAggregate(Aggregate):
"""Calculate the minimum value for a column"""
def __init__(self, *args, **kwargs):
super(MinAggregate, self).__init__(*args, **kwargs)
self.min = None
def update(self, row):
val = self._to_number(row[self.column])
if self.min is None:
self.min = val
else:
self.min = min(self.min, val)
def value(self):
return self.min
class SumAggregate(Aggregate):
"""Calculate the sum of values for a column"""
def __init__(self, *args, **kwargs):
super(SumAggregate, self).__init__(*args, **kwargs)
self.sum = 0
def update(self, row):
self.sum += self._to_number(row[self.column])
def value(self):
return self.sum
aggregate_functions = dict(
avg = AvgAggregate,
count = CountAggregate,
max = MaxAggregate,
min = MinAggregate,
sum = SumAggregate,
)
########NEW FILE########
__FILENAME__ = command
from __future__ import with_statement
import sys
from optparse import OptionParser
from squawk.query import Query
from squawk.output import output_formats
from squawk.parsers import parsers
from squawk.sql import sql_parser
def get_table_names(tokens):
if not isinstance(tokens.tables[0][0], basestring):
return get_table_names(tokens.tables[0][0])
return [tokens.tables[0][0]]
class Combiner(object):
def __init__(self, files, parser_class):
self.files = files
self.parser_class = parser_class
self.index = 0
self.next_file()
def next_file(self):
if self.index >= len(self.files):
raise StopIteration()
fname = self.files[self.index]
self.parser = self.parser_class(sys.stdin if fname == '-' else open(fname, "r"))
self.parser_iter = iter(self.parser)
self.columns = self.parser.columns
self.index += 1
def __iter__(self):
return self
def next(self):
while True:
try:
row = self.parser_iter.next()
except StopIteration:
self.next_file()
else:
return row
def build_opt_parser():
parser = OptionParser()
parser.add_option("-p", "--parser", dest="parser",
help="name of parser for input")
parser.add_option("-f", "--format", dest="format", default="tabular",
help="write output in FORMAT format", metavar="FORMAT")
# parser.add_option("-q", "--quiet",
# action="store_false", dest="verbose", default=True,
# help="don't print status messages to stdout")
return parser
def main():
parser = build_opt_parser()
(options, args) = parser.parse_args()
sql = ' '.join(args).strip()
if not sql:
print "An SQL expression is required"
return
files = get_table_names(sql_parser.parseString(sql))
parser_name = options.parser
if parser_name:
parser = parsers[parser_name]
else:
fn = files[0]
if fn.rsplit('/', 1)[-1] == 'access.log':
parser = parsers['access_log']
elif fn.endswith('.csv'):
parser = parsers['csv']
else:
sys.stderr.write("Can't figure out parser for input")
sys.exit(1)
source = Combiner(files, parser)
query = Query(sql)
output = output_formats[options.format]
output(query(source))
if __name__ == "__main__":
main()
########NEW FILE########
__FILENAME__ = output
import csv
import sys
try:
import json
except ImportError:
try:
import simplejson as json
except ImportError:
json = None
def output_tabular(rows, fp=None):
fp = fp or sys.stdout
fp.write("\t| ".join(rows.columns))
fp.write("\n")
fp.write("-"*40+"\n")
for row in rows:
fp.write("\t| ".join(row[k] if isinstance(row[k], basestring) else str(row[k]) for k in rows.columns))
fp.write("\n")
def output_json(rows, fp=None):
fp = fp or sys.stdout
fp.write('[')
first = True
for row in rows:
if not first:
fp.write(',\n')
else:
first = False
fp.write(json.dumps(row))
fp.write(']')
def output_csv(rows, fp=None, **kwargs):
fp = fp or sys.stdout
writer = csv.writer(fp, **kwargs)
writer.writerow(rows.columns)
for row in rows:
writer.writerow([row[k] for k in rows.columns])
output_formats = dict(
tabular = output_tabular,
json = output_json,
csv = output_csv,
)
########NEW FILE########
__FILENAME__ = access_log
import re
log_re = re.compile(
r'^(?P<remote_addr>("[^"]+"|[^\s]+))'
r" -"
r" (?P<remote_user>[^\s]+)"
r" \[(?P<time>[^\]]+)\]"
r'\s+"(?P<request>[^"]*)"'
r" (?P<status>[^\s]+)"
r" (?P<bytes>[^\s]+)"
r'\s+"(?P<referrer>[^"]*)"'
r'\s+"(?P<user_agent>[^"]*)"'
r".*$")
class AccessLogParser(object):
def __init__(self, file):
if isinstance(file, basestring):
self.fp = open(file, "rb")
else:
self.fp = file
self.columns = [x[0] for x in sorted(log_re.groupindex.items(), key=lambda g:g[1])]
self.columns.remove("request")
self.columns += ["method", "path", "httpver"]
def __iter__(self):
for line in self.fp:
m = log_re.match(line.strip())
d = m.groupdict()
d['remote_addr'] = d['remote_addr'].replace('"', '')
try:
request = d.pop('request')
method, path, httpver = request.split(' ')
except ValueError:
method, path, httpver = None, None, None
try:
d['bytes'] = int(d['bytes'])
except ValueError:
d['bytes'] = 0
d['status'] = int(d['status'])
yield d
########NEW FILE########
__FILENAME__ = csvparser
import csv
class CSVParser(object):
def __init__(self, file):
if isinstance(file, basestring):
fp = open(file, "rb")
else:
fp = file
self.reader = csv.DictReader(fp)
self.columns = [x.lower() for x in self.reader.fieldnames]
def __iter__(self):
for row in self.reader:
yield dict((k.lower(), v) for k, v in row.items())
########NEW FILE########
__FILENAME__ = pickleparser
try:
import cPickle as pickle
except ImportError:
import pickle
class PickleParser(object):
def __init__(self, file):
if isinstance(file, basestring):
self.fp = open(file, "rb")
else:
self.fp = file
self.data = pickle.load(self.fp)
if not isinstance(self.data, (list, tuple)):
raise Exception("Unsupported format for pickled data. Should be a list of dictionaries e.g. [{'col': 'value'}]")
self.columns = self.data[0].keys()
def __iter__(self):
for row in self.data:
yield row
########NEW FILE########
__FILENAME__ = query
from __future__ import division
from functools import partial
import re
from squawk.aggregates import aggregate_functions
from squawk.sql import sql_parser
OPERATOR_MAPPING = {
'<>': '!=',
'!=': '!=',
'=': '==',
'<': '<',
'>': '>',
'<=': '<=',
'>=': '>=',
}
def sql_like(like_clause):
return like_clause.replace("%",".*").replace("_",".")
class Column(object):
def __init__(self, column, name=None):
self.column = column.lower()
self.name = (name or column).lower()
self._value = None
def update(self, row):
self._value = row[self.column]
def value(self):
return self._value
class LimitOffset(object):
def __init__(self, source, limit, offset=0):
self.source = source
self.limit = limit
self.offset = offset
def __iter__(self):
for i, row in enumerate(self.source):
if i < self.offset:
continue
yield row
if self.limit is not None and i+1 >= self.limit + self.offset:
return
class OrderBy(object):
def __init__(self, source, order_by, descending=False):
self.source = source
self.order_by = order_by.lower()
self.descending = descending
def __iter__(self):
results = list(self.source)
results.sort(key=lambda row:row[self.order_by], reverse=self.descending)
for r in results:
yield r
class GroupBy(object):
def __init__(self, source, group_by, columns):
self.source = source
self.group_by = group_by
self._columns = columns
def __iter__(self):
groups = {}
for row in self.source:
key = tuple(row[k] for k in self.group_by)
if key not in groups:
groups[key] = [x() for x in self._columns]
for s in groups[key]:
s.update(row)
for key, row in groups.iteritems():
yield dict((r.name, r.value()) for r in row)
class Filter(object):
def __init__(self, source, function):
self.source = source
self.function = function
def __iter__(self):
for row in self.source:
if self.function(row):
yield row
class Selector(object):
def __init__(self, source, columns):
self.source = source
self._columns = [(n.lower(), (a or n).lower()) for n, a in columns] if columns else None
def __iter__(self):
if self._columns:
for row in self.source:
yield dict((alias, row[name]) for name, alias in self._columns)
else:
for row in self.source:
yield row
class Aggregator(object):
def __init__(self, source, columns):
self.source = source
self._columns = columns
def __iter__(self):
columns = [c() for c in self._columns]
for row in self.source:
for c in columns:
c.update(row)
yield dict((c.name, c.value()) for c in columns)
class Query(object):
def __init__(self, sql):
self.tokens = sql_parser.parseString(sql) if isinstance(sql, basestring) else sql
self.column_classes = None
self._table_subquery = None
self._parts = self._generate_parts()
def _generate_parts(self):
"""Return a list of callables that can be composed to build a query generator"""
tokens = self.tokens
parts = []
self.column_classes = [self._column_builder(c) for c in tokens.columns] if tokens.columns != '*' else None
if not isinstance(tokens.tables[0][0], basestring):
self._table_subquery = Query(tokens.tables[0][0])
if tokens.where:
func = eval("lambda row:"+self._filter_builder(tokens.where))
parts.append(partial(Filter, function=func))
if tokens.groupby:
# Group by query
parts.append(partial(GroupBy,
group_by = [c[0] for c in tokens.groupby],
columns = self.column_classes))
elif self.column_classes and any(len(c.name)>1 for c in tokens.columns):
# Aggregate query
parts.append(partial(Aggregator, columns=self.column_classes))
else:
# Basic select
parts.append(partial(Selector, columns=[(c.name[0], c.alias) for c in tokens.columns] if tokens.columns != '*' else None))
if tokens.orderby:
order = tokens.orderby
parts.append(partial(OrderBy, order_by=order[0][0], descending=order[1]=='DESC' if len(order) > 1 else False))
if tokens.limit or tokens.offset:
parts.append(partial(LimitOffset,
limit = int(tokens.limit) if tokens.limit else None,
offset = int(tokens.offset) if tokens.offset else 0))
return parts
def _filter_builder(self, where):
"""Return a Python expression from a tokenized 'where' filter"""
l = []
for expr in where:
if expr[0] == '(':
l.append("(")
l.append(self._filter_builder(expr[1:-1]))
l.append(")")
else:
if isinstance(expr, basestring):
l.append(expr)
elif len(expr) == 3:
if expr[1] == "like":
l.append('re.match(%s, row["%s"])' % (sql_like(expr[2]), expr[0].lower()))
elif expr[1] in ("~", '~*', '!~', '!~*'):
neg = "not " if expr[1][0] == '!' else ""
flags = re.I if expr[1][-1] == '*' else 0
l.append('%sre.match(r%s, row["%s"], %d)' % (neg, expr[2], expr[0].lower(), flags))
else:
op = OPERATOR_MAPPING[expr[1]]
l.append('(row["%s"] %s %s)' % (expr[0].lower(), op, expr[2]))
elif expr[1] == "in":
l.append('(row["%s"] in %r)' % (expr[0].lower(), expr[3:-1]))
else:
raise Exception("Don't understand expression %s in where clause" % expr)
return " ".join(l)
def _column_builder(self, col):
"""Return a callable that builds a column or aggregate object"""
if len(col.name) > 1:
# Aggregate
try:
aclass = aggregate_functions[col.name[0]]
except KeyError:
raise KeyError("Unknown aggregate function %s" % col.name[0])
return lambda:aclass(col.name[1], col.alias if col.alias else '%s(%s)' % (col.name[0], col.name[1]))
else:
# Column
return lambda:Column(col.name[0], col.alias)
def __call__(self, source):
executor = self._table_subquery(source) if self._table_subquery else source
for p in self._parts:
executor = p(source=executor)
executor.columns = [c().name for c in self.column_classes] if self.column_classes else source.columns
return executor
########NEW FILE########
__FILENAME__ = sql
# This file is camelCase to match pyparsing
__all__ = ["sql_parser"]
from pyparsing import Literal, CaselessLiteral, Word, Upcase, delimitedList, Optional, \
Combine, Group, alphas, nums, alphanums, ParseException, Forward, oneOf, quotedString, \
ZeroOrMore, restOfLine, Keyword, downcaseTokens, Suppress, stringEnd, Regex, NotAny
selectToken = Keyword("select", caseless=True)
fromToken = Keyword("from", caseless=True)
whereToken = Keyword("where", caseless=True)
groupByToken = Keyword("group", caseless=True) + Keyword("by", caseless=True)
orderByToken = Keyword("order", caseless=True) + Keyword("by", caseless=True)
limitToken = Keyword("limit", caseless=True)
offsetToken = Keyword("offset", caseless=True)
keywords = NotAny(selectToken | fromToken | whereToken | groupByToken | orderByToken | limitToken | offsetToken)
selectStmt = Forward()
ident = Word(alphas, alphanums + "_$").setName("identifier")
# ident = Regex(r'"?(?!^from$|^where$)[A-Za-z][A-Za-z0-9_$]*"?').setName("identifier")
columnName = delimitedList(ident, ".", combine=True).setParseAction(downcaseTokens)
aggregateFunction = (
(CaselessLiteral("count") | CaselessLiteral("sum") |
CaselessLiteral("min") | CaselessLiteral("max") | CaselessLiteral("avg"))
+ Suppress("(") + (columnName | oneOf("1 *")) + Suppress(")"))
columnDef = Group(aggregateFunction | columnName).setResultsName("name")
aliasDef = Optional(Optional(Suppress(CaselessLiteral("AS"))) +
keywords +
columnName.setResultsName("alias"))
filename = Word(alphanums+"/._-$").setName("filename")
tableName = delimitedList(filename, ".", combine=True)
subQuery = Group(Suppress("(") + selectStmt + Suppress(")"))
tableDef = subQuery | tableName
# tableNameList = Group(delimitedList(Group(tableDef + aliasDef))) # Standard SQL table list
tableNameList = Group(delimitedList(Group(tableDef), ' ')) # Not standard SQL table list. Allow spaces to separate tables. Easier to use on command line.
whereExpression = Forward()
and_ = Keyword("and", caseless=True)
or_ = Keyword("or", caseless=True)
in_ = Keyword("in", caseless=True)
like = Keyword("like", caseless=True)
E = CaselessLiteral("E")
binop = oneOf("= != <> < > >= <= eq ne lt le gt ge", caseless=True)
regexOp = oneOf("~ ~* !~ !~*")
arithSign = Word("+-", exact=1)
realNum = (Combine(
Optional(arithSign) + (
Word(nums) + "." + Optional(Word(nums)) | ("." + Word(nums))
) + Optional(E + Optional(arithSign) + Word(nums)))
.setName("real")
.setParseAction(lambda s,l,toks: float(toks[0])))
intNum = (Combine(Optional(arithSign) + Word(nums) +
Optional(E + Optional("+") + Word(nums)))
.setName("integer")
.setParseAction(lambda s,l,toks: int(toks[0])))
# WHERE
columnRval = realNum | intNum | quotedString | columnName # need to add support for alg expressions
columnLikeval = quotedString
whereCondition = Group(
(columnName + binop + columnRval) |
(columnName + like + columnLikeval) |
(columnName + regexOp + quotedString) |
(columnName + in_ + "(" + delimitedList(columnRval) + ")") |
(columnName + in_ + "(" + selectStmt + ")") |
("(" + whereExpression + ")")
)
whereExpression << whereCondition + ZeroOrMore((and_ | or_) + whereExpression)
# GROUP BY
groupByExpression = Group(delimitedList(columnDef))
# ORDER BY
orderByExpression = Group(delimitedList(columnDef + Optional(CaselessLiteral("DESC") | CaselessLiteral("ASC"))))
# LIMIT
limitExpression = intNum
# OFFSET
offsetExpression = intNum
# define the grammar
selectColumnList = Group(delimitedList(Group(columnDef + aliasDef)))
selectStmt << (
selectToken +
('*' | selectColumnList).setResultsName("columns") +
fromToken + tableNameList.setResultsName("tables") +
Optional(whereToken + whereExpression.setResultsName("where"), "") +
Optional(groupByToken + groupByExpression.setResultsName("groupby"), "") +
Optional(orderByToken + orderByExpression.setResultsName("orderby"), "") +
Optional(limitToken + limitExpression.setResultsName("limit"), "") +
Optional(offsetToken + offsetExpression.setResultsName("offset"), ""))
sql_parser = selectStmt # + stringEnd
sqlComment = "--" + restOfLine # ignore comments
sql_parser.ignore(sqlComment)
########NEW FILE########
__FILENAME__ = version
VERSION = "0.3"
########NEW FILE########
|
9a46d3fdd0076d9eee95fab47466cbe8b52919c8 | gyandhanee/fsdse-python-assignment-8 | /even_numbers.py | 78 | 3.53125 | 4 | def get_even_numbers(num):
return [i for i in range(1,num+1) if i%2 == 0]
|
bcf1e10a8a03c7a1e9aa218f911d08fc79acd01f | vik-tort/hillel | /Homework_/Task_26.py | 282 | 4.03125 | 4 | lst= [2,2,2,3,3]
def sum_diff (lst):
sum_of_even=0
sum_of_odd=0
for i in lst:
if i%2==0:
sum_of_even+=i
else:
sum_of_odd+=i
sum_different=sum_of_even-sum_of_odd
return sum_different
print("Sum different =",sum_diff(lst)) |
29a3092eb99281be431ffea8f6cb5f1d69dc03d5 | rathijeetbhave/dynamicProgramming | /lis.py | 1,256 | 4.03125 | 4 | # The Longest Increasing Subsequence (LIS) problem is to find the length of the longest subsequence of a given sequence
# such that all elements of the subsequence are sorted in increasing order.
# For example, the length of LIS for {10, 22, 9, 33, 21, 50, 41, 60, 80} is 6 and LIS is {10, 22, 33, 50, 60, 80}.
# Here we will maintain a temp array that will store the values of lis till that index.
# Then we just check if curr elem is smaller then the elem for which we are finding lis, then we can add 1 to lis of curr element.
# In the end we will pick max value from the temp array to get the overall lis.
def lis(a) :
temp = [1]*len(a) # min lis at any index is 1, the element itself
for i in range(1, len(a)) :
for j in range(i) :
if a[j] < a[i] :
temp[i] = max(temp[i], temp[j] + 1)
return temp
def print_ans(temp, a) :
index = temp.index(max(temp))
lastIndex = index
ans = [a[index]]
while index >= 0 :
if temp[index] == temp[lastIndex] - 1 :
ans.append(a[index])
lastIndex = index
index -= 1
return ans
a = [10, 22, 9, 33, 21, 50, 41, 60, 80]
a = [3, 10, 2, 1, 20]
a = [3, 2]
a = [50, 3, 10, 7, 40, 80]
print print_ans(lis(a), a)
|
78f94e40efdc38576a200d2289740e2d2170164d | Bobbyb6/PythonProject | /Log_In_Project.py | 994 | 4.03125 | 4 | # Password/username project
def get_attempted_username():
return input('Enter Username: ')
def get_attempted_password():
return input('Enter Password: ')
def get_stored_password():
try:
return data[attempted_username]
except KeyError:
print('This Username is not recognised Enter a valid Username!')
def check_passwords_are_equal():
return password == attempted_password
data = {'Bobby_96': 'Password1', 'Gamer101': 'Xboxxx2021', 'dave000': 'GUESS', 'ExAmPlE1': 'passworddude'}
print('_____Log in dashboard_____')
attempts = 0
while attempts < 3:
attempted_username = get_attempted_username()
attempted_password = get_attempted_password()
password = get_stored_password()
if check_passwords_are_equal():
print('Logged in')
break
else:
print('Incorrect Username or Password')
attempts = attempts + 1
if attempts == 3:
print("Maximum attempt's reached you are locked out see further assistance")
|
e3d71ee58d969700f507872a2accce2673cb4815 | kunalj101/flask-blog | /sql.py | 602 | 3.9375 | 4 | #sql.py databse connections
#import sqlite
import sqlite3
#create new database, if it does not exist
with sqlite3.connect("blog.db") as connection:
#Get cursor object to execute the SQL commands
c = connection.cursor()
#create the table
c.execute("""CREATE TABLE posts
(title TEXT, post TEXT)""")
#insert dummy values in the table
c.execute("""INSERT INTO posts VALUES ("Good","I'm good")""")
c.execute("""INSERT INTO posts VALUES ("Okay","I'm okay")""")
c.execute("""INSERT INTO posts VALUES ("Excellent","I'm excellent")""")
c.execute("""INSERT INTO posts VALUES ("Well","I'm well")""")
|
8c0f7fa937fcbe3170d07f25c3fd2c0657279574 | 9bason/pp2 | /TSIS3/1_4.py | 129 | 3.71875 | 4 | arr = list(input().split())
for i in arr:
if i != '0': print(i, end = " ")
for i in arr:
if i == '0': print(i, end = " ") |
ada92058c1960bd4aa2962a851faa20e866e34e4 | GledsonLScruz/Learning-Python | /exercicio2.py | 284 | 4.03125 | 4 | reta1 = float(input('Digite um valor: '))
reta2 = float(input('Digite outro valor: '))
reta3 = float(input('Digite outro valor: '))
if reta1 > abs(reta2 - reta3) and reta1 < reta2 + reta3:
print('Pode formar um tringulo')
else:
print('Não pode formar um triangulo')
|
656462b54463197990029794c16cb30caad8eb7c | cmumman/calculator | /calculator/GUI/counter.py | 412 | 3.8125 | 4 | import Tkinter as tk
counter = 0
def count_label(label):
def count():
global counter
counter += 1
label.config(text=str(counter))
label.after(1000, count)
count()
loop = tk.Tk()
loop.title("counting seconds")
label = tk.Label(loop, fg="red")
label.pack()
count_label(label)
button = tk.Button(loop, text="stop", width=50, command=loop.destroy).pack()
loop.mainloop()
|
fe7e7a6244ed78042a73778d3e88a10ccdc16e1b | konradbondu/CodeWars-solutions | /vovel_code.py | 1,141 | 4.40625 | 4 | # Step 1: Create a function called encode() to replace all the lowercase vowels in a given string with numbers
# according to the following pattern:
#
# a -> 1
# e -> 2
# i -> 3
# o -> 4
# u -> 5
# For example, encode("hello") would return "h2ll4". There is no need to worry about uppercase vowels in this kata.
#
# Step 2: Now create a function called decode() to turn the numbers back into vowels according to the same pattern
# shown above.
#
# For example, decode("h3 th2r2") would return "hi there".
#
# For the sake of simplicity, you can assume that any numbers passed into the function will correspond to vowels.
def encode(st):
dict_code = {'a': '1', 'e': '2', 'i': '3', 'o': '4', 'u': '5'}
str_list = list(st)
for i in range(len(str_list)):
if str_list[i] in dict_code:
str_list[i] = dict_code[str_list[i]]
return "".join(str_list)
def decode(st):
dict_code = {'1': 'a', '2': 'e', '3': 'i', '4': 'o', '5': 'u'}
str_list = list(st)
for i in range(len(str_list)):
if str_list[i] in dict_code:
str_list[i] = dict_code[str_list[i]]
return "".join(str_list)
|
e856b91c026eeda8d434810787c682eb8e65c259 | pabb2002/Computer-Science-Labs | /CS1/Python/MoreStringsNotes.py | 1,283 | 4.15625 | 4 | # Strings loops and find/rfind methods
# Why do we use loops with string?
# to go index by index through the string
# this way is long
s = "first"
print s[0]
print s[1]
print s[2]
print s[3]
print s[4]
# but with a loop:
s = "first"
for x in range(len(s)): #range => start to stop, does range run at the stop? No
print s[x],
print
for x in range(len(s)-1, -1, -1): #must start at len(s) - 1 because length is one greater than highest index
#stop value is -1 because check is if x>-1, so that the x reaches index of 0
print s[x],
print
for let in s: #can't manipulate step and direction; let is letter and you don't need brackets to access each letter
print let,
print
# find method starts at 0 and increases by 1 until it finds the character or phrase
happy = "happys days"
print len(happy)
print happy.find("a")
print happy.find("ys")
print happy.find("x") # -1 tells that it is not part of the string bc no index of -1
print happy.find(" ")
# rfind method starts at len()-1 and decreases by 1 until it finds the character or phrase
print happy.rfind("a")
print happy.rfind("ys")
print happy.rfind("x") # still returns -1
print happy.rfind(" ")
#count
print happy.count("a")
print happy.count("ys")
|
f7c605ce3acde6627adbeba7b33e95140952b86b | q737645224/python3 | /python基础/python笔记/day09/exercise/myadd.py | 328 | 3.84375 | 4 | # 练习:
# 写一个函数 myadd, 此函数可以计算两个数的和,也可
# 以计算三个数的和
# def myadd(......):
# ....
# print(myadd(10, 20)) # 30
# print(myadd(100, 200, 300)) # 600
def myadd(a, b, c=0):
return a + b + c
print(myadd(10, 20)) # 30
print(myadd(100, 200, 300)) # 600
|
3b42206a8ab151047c67fb6c0a7e04c55efc9f9a | bpa/advent-of-code | /lib/python/aoc/point.py | 7,799 | 4.1875 | 4 | class Point:
"""A point in 2D space"""
def __init__(self, grid, x, y):
self.grid = grid
self.x = x
self.y = y
def get(self):
return self.grid.data[self.y][self.x]
def set(self, value):
self.grid.data[self.y][self.x] = value
def neighbor(self, x, y):
nx = self.x + x
if nx < 0 or nx >= self.grid.width:
return None
ny = self.y + y
if ny < 0 or ny >= self.grid.height:
return None
return Point(self.grid, nx, ny)
def n(self): return self.neighbor(0, -1)
def ne(self): return self.neighbor(1, -1)
def e(self): return self.neighbor(1, 0)
def se(self): return self.neighbor(1, 1)
def s(self): return self.neighbor(0, 1)
def sw(self): return self.neighbor(-1, 1)
def w(self): return self.neighbor(-1, 0)
def nw(self): return self.neighbor(-1, -1)
def adjacent_4(self):
"""Get the n, e, s, w adjacent points"""
from .func import nop1
return filter(nop1, [self.n(), self.e(), self.s(), self.w()])
def adjacent_8(self):
"""Get the n, e, s, w adjacent points"""
from .func import nop1
return filter(nop1, [self.n(), self.ne(), self.e(), self.se(), self.s(), self.sw(), self.w(), self.nw()])
def manhattan_distance(self, to):
return abs(self.x - to.x) + abs(self.y - to.y)
def __iter__(self):
return iter([self.x, self.y])
def __repr__(self):
return f'({self.x}, {self.y})'
def __set__(self, instance, value):
print(self, instance, value)
def __lt__(self, o):
if isinstance(o, Point):
o = o.get()
return self.get() < o
def __le__(self, o):
if isinstance(o, Point):
o = o.get()
return self.get() <= o
def __eq__(self, o):
if isinstance(o, Point):
o = o.get()
return self.get() == o
def __ne__(self, o):
if isinstance(o, Point):
o = o.get()
return self.get() != o
def __gt__(self, o):
if isinstance(o, Point):
o = o.get()
return self.get() > o
def __ge__(self, o):
if isinstance(o, Point):
o = o.get()
return self.get() >= o
def __hash__(self):
return self.x * 181 + self.y
def __bool__(self):
return bool(self.grid.data[self.y][self.x])
def __add__(self, o):
if isinstance(o, Point):
o = o.get()
return self.get() + o
__radd__ = __add__
def __sub__(self, o):
if isinstance(o, Point):
o = o.get()
return self.get() - o
def __rsub__(self, o):
if isinstance(o, Point):
o = o.get()
return o - self.get()
def __mul__(self, o):
if isinstance(o, Point):
o = o.get()
return self.get() * o
__rmul__ = __mul__
def __truediv__(self, o):
if isinstance(o, Point):
o = o.get()
return self.get() / o
def __rtruediv__(self, o):
if isinstance(o, Point):
o = o.get()
return o / self.get()
def __floordiv__(self, o):
if isinstance(o, Point):
o = o.get()
return self.get() // o
def __rfloordiv__(self, o):
if isinstance(o, Point):
o = o.get()
return o//self.get()
def __mod__(self, o):
if isinstance(o, Point):
o = o.get()
return self.get() % o
def __rmod__(self, o):
if isinstance(o, Point):
o = o.get()
return o % self.get()
def __divmod__(self, o):
if isinstance(o, Point):
o = o.get()
return divmod(self.get(), o)
def __rdivmod__(self, o):
if isinstance(o, Point):
o = o.get()
return divmod(o, self.get())
def __pow__(self, o):
if isinstance(o, Point):
o = o.get()
return pow(self.get(), o)
def __rpow__(self, o):
if isinstance(o, Point):
o = o.get()
return pow(o, self.get())
def __lshift__(self, o):
if isinstance(o, Point):
o = o.get()
return self.get() << o
def __rlshift__(self, o):
if isinstance(o, Point):
o = o.get()
return o << self.get()
def __rshift__(self, o):
if isinstance(o, Point):
o = o.get()
return self.get() >> o
def __rrshift__(self, o):
if isinstance(o, Point):
o = o.get()
return o >> self.get()
def __and__(self, o):
if isinstance(o, Point):
o = o.get()
return self.get() & o
__rand__ = __and__
def __xor__(self, o):
if isinstance(o, Point):
o = o.get()
return self.get() ^ o
__rxor__ = __xor__
def __or__(self, o):
if isinstance(o, Point):
o = o.get()
return self.get() | o
__ror__ = __or__
def __iadd__(self, o):
if isinstance(o, Point):
o = o.get()
self.grid.data[self.y][self.x] += o
return self
def __isub__(self, o):
if isinstance(o, Point):
o = o.get()
self.grid.data[self.y][self.x] -= o
return self
def __imul__(self, o):
if isinstance(o, Point):
o = o.get()
self.grid.data[self.y][self.x] *= o
return self
def __itruediv__(self, o):
if isinstance(o, Point):
o = o.get()
self.grid.data[self.y][self.x] /= o
return self
def __ifloordiv__(self, o):
if isinstance(o, Point):
o = o.get()
self.grid.data[self.y][self.x] //= o
return self
def __imod__(self, o):
if isinstance(o, Point):
o = o.get()
self.grid.data[self.y][self.x] %= o
return self
def __ipow__(self, o):
if isinstance(o, Point):
o = o.get()
self.grid.data[self.y][self.x] += self.grid.data[self.y][self.x].pow(o)
return self
def __ilshift__(self, o):
if isinstance(o, Point):
o = o.get()
self.grid.data[self.y][self.x] <<= o
return self
def __irshift__(self, o):
if isinstance(o, Point):
o = o.get()
self.grid.data[self.y][self.x] >>= o
return self
def __iand__(self, o):
if isinstance(o, Point):
o = o.get()
self.grid.data[self.y][self.x] &= o
return self
def __ixor__(self, o):
if isinstance(o, Point):
o = o.get()
self.grid.data[self.y][self.x] += o
return self
def __ior__(self, o):
if isinstance(o, Point):
o = o.get()
self.grid.data[self.y][self.x] |= o
return self
def __neg__(self):
-self.grid.data[self.y][self.x]
def __pos__(self):
+self.grid.data[self.y][self.x]
def __abs__(self):
abs(self.grid.data[self.y][self.x])
def __invert__(self):
~self.grid.data[self.y][self.x]
def __complex__(self):
complex(self.grid.data[self.y][self.x])
def __int__(self):
int(self.grid.data[self.y][self.x])
def __float__(self):
float(self.grid.data[self.y][self.x])
def __round__(self, digits):
round(self.grid.data[self.y][self.x], digits)
def __trunc__(self, digits):
from math import trunc
trunc(self.grid.data[self.y][self.x], digits)
def __floor__(self, digits):
from math import floor
floor(self.grid.data[self.y][self.x], digits)
def __ceil__(self, digits):
from math import ceil
ceil(self.grid.data[self.y][self.x], digits)
|
114419bd8757bb8eb4bd1d47b1de9ffc87af099a | mkebrahimpour/DataStructures_Python | /GeeksForGeeks/Binary Trees/expression_tree.py | 1,054 | 3.984375 | 4 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat May 18 18:01:39 2019
@author: sbk
"""
# Python program to insert element in binary tree
class Tree:
def __init__(self, data):
self.data = data
self.left = None
self.right = None
# A utility function to do inorder traversal
def inorder(t):
if t is not None:
inorder(t.left)
print(t.data)
inorder(t.right)
def isOperator(value):
operators = ['+','-','*','/','%']
if value in operators:
return True
return False
def postfix_convertor(expr):
stack = []
for char in expr:
if not isOperator(char):
t = Tree(char)
stack.append(t)
else:
t = Tree(char)
s1 = stack.pop()
s2 = stack.pop()
t.right = s1
t.left = s2
stack.append(t)
t = stack.pop()
return t
postfix = "ab+ef*g*-"
r = postfix_convertor(postfix)
print("Infix expression is")
inorder(r) |
a03d14de6f654e8b17b7c9f91e823a6ab286b7b8 | acoltelli/Algorithms-DataStructures | /Fibonacci.py | 905 | 3.84375 | 4 | from timeit import *
#recursive
def nthFibonacci(n):
if n == 0 or n == 1:
return n
else:
return nthFibonacci(n-1) + nthFibonacci(n-2)
#memoization
def nthFibonacci_(n, memo ={0:0,1:1}):
if n not in memo:
memo[n] = nthFibonacci_(n-1) + nthFibonacci_(n-2) #compute recursively if val not already saved in memo
return memo[n]
###tests
testRecurse = Timer("nthFibonacci(20)",
"from __main__ import nthFibonacci")
testMemo = Timer("nthFibonacci_(20)",
"from __main__ import nthFibonacci_")
testRecurse1 = Timer("nthFibonacci(50)",
"from __main__ import nthFibonacci")
testMemo1 = Timer("nthFibonacci(50)",
"from __main__ import nthFibonacci")
print testRecurse.timeit(number=10) #statement executed ten times
print testMemo.timeit(number=10)
# print testRecurse1.timeit(number=10)
print testMemo1.timeit(number=10)
|
b86f386c4901285b7680e38975f5cd6c3611ca56 | bmckelv30/python-challenge | /PyPoll/main.py | 2,004 | 3.921875 | 4 | # Import modules
import os
import csv
# Read in csv file
csvpath = os.path.join('Resources', 'election_data.csv')
with open(csvpath, newline='') as csvfile:
csvreader = csv.reader(csvfile, delimiter=',')
header=next(csvreader)
# print(f"Header: {header}")
votes = []
candidate = []
total=0
for row in csvreader:
# The total number of votes cast
total=total+1
# Compile all the votes in list
votes.append(row[2])
# A complete list of candidates who received votes
if row[2] not in candidate:
candidate.append(row[2])
# Print out total votes
print (f"Election results")
print (f"----------------------------")
print (f"Total Votes: {total}")
print (f"----------------------------")
# print(candidate)
# Sort in order to obtain count
votes.sort
# The Print out candidate, percentage and number of votes each candidate won
for i in range(len(candidate)):
print(f"{str(candidate[i])}: {str(format(votes.count(candidate[i]) / total * 100,'.3f'))}% ({str(votes.count(candidate[i]))})")
# Print out winner
print (f"----------------------------")
print (f"Winner: {max(set(votes),key=votes.count)}")
print (f"----------------------------")
text_file = open("election_result.txt","w")
with open("election_result.txt", "w") as text_file:
print (f"Election Results", file=text_file)
print (f"----------------------------", file=text_file)
print (f"Total Votes: {total}", file=text_file)
print (f"----------------------------", file=text_file)
for i in range(len(candidate)):
print(f"{str(candidate[i])}: {str(format(votes.count(candidate[i]) / total * 100,'.3f'))}% ({str(votes.count(candidate[i]))})", file=text_file)
print (f"----------------------------", file=text_file)
print (f"Winner: {max(set(votes),key=votes.count)}", file=text_file)
print (f"----------------------------", file=text_file)
text_file.close() |
c458b1633c4dd3ae039a8f25f58073686d15ecb2 | 3dmikec/python | /credit-card-validator.py | 944 | 4.15625 | 4 | '''
CHECKS IF A CREDIT CARD NUMBER IS VALID USING THE LUHN ALGORITHM
'''
def cc_checker():
# Input a credit card number
cc = input("Enter your credit card number: ")
# Begin with a total of zero
total = 0
# Get every second digit starting from the right-most digit and double the value
def double_digit():
for i in cc[-2::-2]:
yield int(i)*2
# Check if number is double-digit number, if yes, add individual digits to total, otherwise add to total
for x in double_digit():
if len(str(x)) > 1:
for i in str(x):
total += int(i)
else:
total += int(x)
# Get every other second digit and add to the total
for i in cc[-1::-2]:
total += int(i)
# Checks if total is valid using module 10
if total%10 == 0:
print("VALID")
else:
print("NOT VALID")
cc_checker()
input("Press ENTER to exit") |
bfb49daf7f7eee2ca1e964c61d5d3ce2edd4c022 | AjithCGeorge/PythonAssignment | /set3/qn1.py | 946 | 4.0625 | 4 | # Qn1
import datetime
day=datetime.date.today().day
month=datetime.date.today().month
year=datetime.date.today().year
# print(datetime.date.today())
# print(day.value)
months31=[0,1,3,5,7,8,10,12]
print('today is : ',datetime.date(year,month,day) )
try:
tomorrow=datetime.date(year,month,day+1)
except:
tday=1
try:
tmonth=month+1
except:
tmonth=1
tomorrow = datetime.date(year, tmonth, tday)
print('tomorrow is : ',tomorrow)
ymonth=month
yday=day-1
if day==1:
if month-1 in months31:
yday=31
if month-1 ==0 :
ymonth=1
year=year-1
else:
if month-1 ==2:
if year % 4 == 0 :
if year % 100 == 0 and year % 400 !=0 :
yday=28
else:
yday=29
else:
yday=30
print('Yesterday was :',datetime.date(year,ymonth,yday))
|
db9e2bddf7ccb7d0ebd6e17a22d86f4b704ae9c0 | kimathi-chris/christopher | /Sum.py | 177 | 4.03125 | 4 | num1 =input ('Enter the first number:')
num2 =input ('Enter the second number:')
sum= float(num1) + float(num2)
print('the sum of {0} and {1} is {2}'.format(num1, num2, sum))
|
4ff1e29e1e2e88a8615773dfc7e093157b904ce3 | casanas10/practice_python | /HashTable/ContactList.py | 1,092 | 4.40625 | 4 | '''
Design a hashable class that represents contacts
- Assume each contact is a string and must be in a list
- Possible that contacts are duplicates.
'''
class ContactList:
def __init__(self, names):
'''
list of strings
:param names:
'''
self.names = names
def __hash__(self):
'''
Conceptually we want to hash the set of names. Since the set type is mutable, it cannot be hashed
Therefore we use frozenset. A frozen set is just an immutable version of a python set. Elements cannot be changed
:return:
'''
return hash(frozenset(self.names))
def __eq__(self, other):
'''
check if equal
:param other:
:return:
'''
return set(self.names) == set(other.names)
def merge_contact_list(self, contacts):
'''
list of contact list
:param contacts:
:return:
'''
return list(set(contacts))
'''
Time Complexity for computing the Hash is O(n) where n is the number of strings in the contact list
'''
|
e6f82dc39636bd069f823010da550959757313c4 | kongziqing/Python-2lever | /实战篇/第15章-并发编程/15.4多线程编程/15.4.1thread实现多线程.py | 2,088 | 3.96875 | 4 | """
threading实现多线程
threading 是Python提供的新的线程开发模块,除了支持基本的线程处理外,也提供了大量的工具类,本课程使用此模块讲解了多线程的实现以及线程
相关信息的获取
threading是一个最新的多线程实现模块,拥有更加方便的线程控制以及线程同步支持,在此模块中提供了一个Thread类实现线程的相关
处理操作,Thread类的常用方法如表
threading.Thread 类常用方法
def __init__(self,group=None,target=None,name=None,args=(),kwargs=None,*,daemon=None)
构建一个线程对象,参数作用如下
group:分组定义
target:线程处理对象(代替run()方法)
name:线程名称,若不设置,则自动分配一个名称
args:线程处理对象所需要的执行参数
kwargs:调用对象字典
daemon:是否设置为后台线程
def start(self) 线程启动
def run(self) 线程操作主题,若没设置target处理函数,则执行此方法
def join(self,timeout=None) 线程强制执行
def name(self) 获取线程名称
def ident(self) 获取线程标识
def is_alive(self) 判断线程存活状态
使用threading.Thread实现的多线程可以设置线程的执行函数,也可以定义单独的线程处理类,由于
多线程的运行状态不确定,所以可以利用threading.current_thread()函数动态获取当前正在执行方法体的
线程对象
"""
import threading, time # 导入线程实现模块
def thread_handle(delay): # 线程处理函数
for num in range(5): # 迭代操作
time.sleep(delay) # 操作延迟
print("【%s】num = %s" % (
threading.current_thread().getName(), num)) # 输出线程提示信息
def main(): # 主函数
for item in range(10): # 迭代操作
thread = threading.Thread(target=thread_handle, args=(1,), name="执行线程 - %s" % item)
thread.start() # 启动子线程
if __name__ == "__main__": # 判断程序执行名称
main()
|
570c8d8fd4d94da4e86523921a2766ec545495bf | Calvonator/30-Days-of-Python | /Exercises/day-25-exercises.py | 215 | 4.0625 | 4 | def greet():
name = input("Hello, what is your name?").strip()
name = "".join(name.split())
if name is None:
print("Greetings World!")
else:
print(f"Greetings {name}!")
greet()
|
6c911134627ee02735567e30bb0216a502f98761 | stys/y-test-ranknet | /lib/la.py | 3,326 | 4.4375 | 4 | """
Linear algebra functions
========================
Notation
--------
Integers: i, j, k, m, n, q
Scalars: a, b, c, d, e, g, h
Vectors: s, t, u, v, w, x, y, z
Matrices: A, B, C, D, G, H
Column-wise matrix representation
---------------------------------
Matrix are stored column-wise, i.e A[j] gets the j-th column of the matrix.
"""
from sqlite3 import collections
from _ctypes import Array
def unit(n, k):
"""
Create a unit vector
"""
return [ 0 if j!=k else 1 for j in xrange(n) ]
def vsum(x, y):
"""
Compute element-wise sum of two vectors
"""
assert len(x) == len(y), "Vectors must be of the same size"
result = [0] * len(x) # preallocate result
for j in xrange(0, len(x)):
result[j] = x[j] + y[j] # sum of corresponding elements
return result
def vmul(x, y):
"""
Compute element-wise multiplication of two vectors
"""
assert len(x) == len(y), "Vectors must be of the same size"
result = [0] * len(x) # preallocate result
for j in xrange(0, len(x)):
result[j] = x[j] * y[j] # product of corresponding elements
return result
def inner(x, y):
"""
Inner product of two vectors
Returns
-------
Scalar value
"""
assert len(x) == len(y), "Vectors must be of the same size"
result = 0
for j in xrange(0, len(x)):
result += x[j]*y[j]
return result
def sax(a, x):
"""
Multiply vector by scalar. The name is derived from `saxpy`:
"Scalar `a` multiplied by vector `x` plus vector `y`"
"""
# multiply each element of vector by scalar
return map( lambda element: element*a, x )
def outer(x, y):
"""
Outer product of two vectors
Returns
-------
Column-wise matrix
"""
# Multiply x by elements of y
return map(lambda e: sax(e, x), y)
def gax(A, x):
"""
Multiply matrix A by vector x. The name is derived from `gaxpy`:
"General matrix `A` by vector `x` plus vector `y`"
"""
assert len(A) == len(x), "Matrix dimensions must agree"
z = [0] * len(A[0]) # initial result is zero vector
for j in xrange(0, len(A)):
z = vsum(z, sax(x[j], A[j]))
return z
def lgax(x, A):
"""
Left side version of gax
"""
assert len(x) == len(A[0])
z = [0] * len(A)
for j in xrange(0, len(A)):
z[j] = inner(x, A[j])
return z
class idxview(collections.Sequence):
"""
View of list indexed by another list
"""
def __init__(self, arr, idx):
self.arr = arr
self.idx = idx
def __getitem__(self, index):
return self.arr[self.idx[index]]
def __len__(self):
return len(self.idx)
def __iter__(self):
self.current = -1
return self
def next(self):
if self.current >= len(self.idx)-1:
raise StopIteration
else:
self.current += 1
return self.arr[self.idx[self.current]]
|
727b0c62d37ef4e844da8851adfb1e43e91cc261 | Max6411/Python_ | /Matplotlib-14-second axis.py | 425 | 3.890625 | 4 | import matplotlib.pyplot as plt
import numpy as np
x = np.arange(0, 10, 0.1)
y1 = 0.05 * x**2
y2 = -1*y1
fig, ax1 = plt.subplots() # 返回初始窗口fig, 和分好的窗口
ax2 = ax1.twinx() # 生成如同镜面效果后的ax2
ax1.plot(x, y1, color='green')
ax1.set_xlabel('X data')
ax1.set_ylabel('Y1 data', color='green')
ax2.plot(x, y2, color='blue')
ax2.set_ylabel('Y2 data', color="blue")
plt.show() |
6a7bd9cd2d5a9a653d1c115b24ce24d333f6a5f8 | Indiana3/python_exercises | /wb_chapter7/exercise162.py | 1,542 | 4.3125 | 4 | ##
# Compute the proportion of each alphabetic letter in a list of words
#
# Create an empty dict with letter as keys and occurences as values
letter_occurences = {}
# All the 26 letters of tha alphabet
alphabet = "abcdefghijklmnopqrstuvwxyz"
# Open the file with the list of words
fl = open("words.txt", "r", encoding="utf-8")
# Start counting the number of words in the list
num_words = 0
# Read each line (there is one word per line)
for line in fl:
# If the line has all uppercase letters or is an empty line, skip it
if line.isupper() or line == "":
continue
# Remove EOL chars from the line
word = line.strip()
# For each letter in the alphabet string
for letter in alphabet:
# Check if the letter is in the word
if letter in word:
# Increment the number of occurences of 1
letter_occurences[letter] = letter_occurences.get(letter, 0) + 1
# Increment the word counter of 1
num_words += 1
# Close the file
fl.close()
# Find the letter with the smallest frequency
smallest_frequency = min(letter_occurences.values())
# Print the dictionary with alphabetic characters and their percentage values
for letter, number in letter_occurences.items():
print("Letter {} occurs in {:.2f} percent of words".format(letter, number/num_words * 100))
if number == smallest_frequency:
smallest_letter = letter
print("\n")
# Display the letter with the lowest frequency
print("The letter with the lowest frequency is {}".format(smallest_letter))
|
b98695a590266acfb5bade4c9160d667effc428a | idobarkan/my-code | /interviews/qsort.py | 1,564 | 3.5 | 4 | def q_sort_all(array):
q_sort(array, 0, len(array) - 1)
def q_sort(array, i, k):
if i < k:
pivot = partition(array, i, k)
q_sort(array, i, pivot-1)
q_sort(array, pivot+1, k)
def partition(array, left, right):
pivot_index = choose_pivot(left, right)
pivot_value = array[pivot_index]
array[pivot_index], array[right] = array[right], array[pivot_index]
store_index = left
for i in xrange(left, right):
if array[i] <= pivot_value:
array[i], array[store_index] = array[store_index], array[i]
store_index += 1
array[store_index], array[right] = array[right], array[store_index]
return store_index
def choose_pivot(left, right):
return (left + right) / 2
import unittest
class QuicksortTestcase(unittest.TestCase):
def test_choose_pivot(self):
self.assertEqual(choose_pivot(4, 2), 3)
self.assertEqual(choose_pivot(4, 4), 4)
self.assertEqual(choose_pivot(4, 1), 2)
def test_quick_sort_already_sorted(self):
a = range(5)
q_sort_all(a)
self.assertEqual(a, range(5))
def test_quick_sort_duplicates(self):
a = [1, 1, 3]
q_sort_all(a)
self.assertEqual(a, [1, 1, 3])
def test_quick_sort_reversed(self):
a = [6, 5, 4, 3, 2, 1]
q_sort_all(a)
self.assertEqual(a, sorted(a))
def test_quick_sort_shuffled(self):
a = [3, 10, 19, 13, 2, 8, 6, 15, 11, 0, 7, 18, 16, 1, 14, 9, 12, 5, 4, 17]
q_sort_all(a)
self.assertEqual(a, sorted(a)) |
3301d44d7f2f51daba458833916bceacf12a2690 | SixMJ/Page-Myanmar | /4_string/1_example/6_string_functions.py | 448 | 4.03125 | 4 | #does not affect original str , retuen new str
str1 = "hello"
print('capitalize - ' + str1.capitalize())
str1 = "Hello"
print('casefold - ' + str1.casefold())
print('center - ' + str1.center(20,'*'))
print('count - ' + str(str1.count('l')))
print('find - ' + str(str1.find('o')))
print('endswith - ' + str(str1.endswith('o')))
print('index - ' + str(str1.index('o')))
print('len - ' + str(len(str1)))
print('lower - ' + str1.lower())
|
d034b94dbc20316237fbf91759a6976a17afa5fa | vedpprakash/vedpprakash | /luhn algo.py | 1,110 | 3.59375 | 4 | def cal(num):
sum=0
while(num>0):
rem=num%10
sum=sum+rem
num=num//10
return sum
def split(xl):
return [char for char in xl]
def validate_credit_card_number(card_number):
sum=0
lst=[]
lst1=[]
lst2=[]
templ=[]
final1=[]
x=str(card_number)
re=x[::-1]
lst.append(re[1::2])
xl=str(''.join(lst))
sp=split(xl)
for i in sp:
lst1.append((int(i)*2))
for num in lst1:
if(num>9):
lst2.append(cal(num))
else:
lst2.append(num)
templ.append(re[0::2])
wt=str(''.join(templ))
p=split(wt)
for i in p:
final1.append((int(i)))
for num in lst2:
final1.append(num)
for i in final1:
sum=sum+i
if(sum%10==0):
return True
else:
return False
card_number= 1456734512345698
result=validate_credit_card_number(card_number)
if(result):
print("credit card number is valid")
else:
print("credit card number is invalid")
|
230c05ed1492a6bbde6330998447822bde08ed1b | Persimmonboy/Automate_the_boring_stuff | /regexp_3.py | 390 | 3.5 | 4 | import re
phoneRegex = re.compile(r'\d\d\d-\d\d\d-\d\d\d\d')
resume = 'some random resume with lot of phone numbers'
phoneRegex.search(resume) # Return the first match
phoneRegex.findall(resume) # Return list of matches as string
lyrics = '12 drummers drumming, 11 pipes piping, 10 lords a leaping, 9 ladies dancing'
xmasRegex = re.compile(r'\d+\s\w+')
print(xmasRegex.findall(lyrics))
|
1fec13b0cf879fa0cbbeb610e0bb6d4a6dbf67e5 | thiagofb84jp/python-exercises | /pythonBook/chapter05/exercise5-20.py | 431 | 3.609375 | 4 | '''
5.20. Calcula poupança (2)
'''
deposito = float(input("Depósito inicial: "))
taxa = float(input("Taxa de juros (Ex.: 3 para 3%): "))
investimento = float(input("Depósito mensal: "))
mes = 1
saldo = deposito
while mes <= 24:
saldo = saldo + (saldo * taxa / 100) + investimento
print(f"Saldo do mês {mes} é de R${saldo:5.2f}.")
mes += 1
print("O ganho obtido com os juros foi de R${saldo - deposito:8.2f}.")
|
73c674b756b5b95dfe94bd346e33f3dc93780167 | cainingning/leetcode | /tree_101.py | 992 | 4 | 4 | # Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def isSymmetric(self, root):
"""
:type root: TreeNode
:rtype: bool
"""
if root is None:
return False
return self.isSymmetric_core(root.left, root.right)
def isSymmetric_core(self, l_c, r_c):
if l_c is None and r_c is None:
return True
if l_c is None or r_c is None:
return False
if l_c.val != r_c.val:
return False
if l_c.val == r_c.val:
return self.isSymmetric_core(l_c.left, r_c.right) & self.isSymmetric_core(l_c.right, r_c.left)
if __name__ == '__main__':
root = TreeNode(1)
root.left = TreeNode(2)
root.right = TreeNode(2)
root.left.right = TreeNode(3)
root.right.right = TreeNode(3)
solution = Solution()
print(solution.isSymmetric(root)) |
d7ca974a1569c5a6d57380c549507e18701a4438 | versachin/chapter-4 | /Chapter 4 Problem 9.py | 510 | 4.21875 | 4 | #Write a void function to draw a star,
#where the length of each side is 100 units.
#(Hint: You should turn the turtle by 144 degrees at each point.)
import turtle
wn=turtle.Screen()
wn.bgcolor("lightgreen")
alex=turtle.Turtle()
alex.color("hotpink")
alex.pensize(3)
def draw_star():
alex.right(216)
for i in range(4):
alex.forward(100)
alex.left(216)
alex.forward(100)
for i in range(5):
alex.penup()
alex.left(144)
alex.forward(350)
alex.pendown()
draw_star()
|
946c7f852c7bfbf4abace94bd02c2c99b06c1687 | rlpmeredith/cn-python-programming | /labs/03_more_datatypes/2_lists/04_11_split.py | 448 | 4.4375 | 4 | '''
Write a script that takes in a string from the user. Using the split() method,
create a list of all the words in the string and print the word with the most
occurrences.
'''
# Tested 19-7-19 -> not entirely happy with how it actually works!
my_string = input("Please input a list of words separated by spaces: ")
my_list = my_string.split()
print(set(my_list))
print("Word with most occurences is: ", (max(set(my_list), key=my_list.count)))
|
bc9c3bef916403a1e2eee1696f74d9faad64c469 | er-aditi/Learning-Python | /For_Practice/Factorial.py | 249 | 3.515625 | 4 | current_user = ['aditi', 'robb', 'stark', 'nedd']
new_users = ['aditi', 'thaons', 'thor', 'robb']
for new_user in new_users:
if new_user in current_user:
print(new_user + " Invited ")
else:
print(new_user + " Not Invited")
|
1db4afefac043ea4ecf4f84c07d687def774028f | martakedzior/python-course | /02-instructions/if_zadanie5.py | 722 | 4.03125 | 4 | # Stwórz zmienną password. Hasło powinno składać z liter i cyfr, zwierać conajmniej 1 dużą literę i mieć długość conajmniej 8 znaków.
# Poinformuj użytkownika, jeśli wpisane hasło jest nie poprawne. Wyświetl różne komunikaty w zależności od rodzaju błędu.
password = (input('Please provide your password: '))
if len(password) >= 8:
if password.isdigit() != True:
if password.islower() != True:
print('Your password is correct')
else:
print('Your password should contain one big letter')
else:
print('Your password should contain letters and digits and have one big letter')
else:
print('Your password should have 8 characters or more')
|
91476d8c964d9264a022af840e1a19586641dd1c | danielggc/python | /ejersiciosDiarios/parentesis.py | 2,027 | 3.609375 | 4 | class permutacionParentesis():
def __init__(self,_numeroParentesis:int,_diferencia):
self.parentesisabiertos=_diferencia
self.numeroParentesis:int=_numeroParentesis
def primeraCapa(self,_cantidadParentesis:int)->int:
self.cantidadParentesis=_cantidadParentesis
self.respaldontidadParentesis=self.cantidadParentesis
for self.d in range(0,self.cantidadParentesis):
print("()",end="")
print("fin",end="")
def permutacionBacica(self):
self.parentesisSobrantes=self.numeroParentesis
self.numeroCapasTerminadas:int=1
while self.numeroCapasTerminadas!=self.numeroParentesis:
self.parentesisSobrantes-=1
self.permutaciones()
def permutaciones(self):
for self.i in range(0,self.parentesisSobrantes):
parentesisEsternos(self.parentesisabiertos)
for self.d in range(0,self.parentesisSobrantes):
if self.d==self.i:
print("(",end="")
for self.y in range(0,self.numeroCapasTerminadas):
print("()",end="")
print(")",end="")
else:
print("()",end="")
parentesisserrados(self.parentesisabiertos)
print("",end="fin")
print("|",end="")
self.numeroCapasTerminadas+=1
def parentesisEsternos(_numerentesis):
diferencia=_numerentesis
for l in range(diferencia):
print("(",end="")
def parentesisserrados(_numerentesis):
diferencia=_numerentesis
for l in range(diferencia):
print(")",end="")
numeroParentesis=4
permutaciones=permutacionParentesis(numeroParentesis,0)
respaldoParensis=numeroParentesis
permutaciones.primeraCapa(numeroParentesis)
while numeroParentesis!=0:
print(" ")
diferencia:int=respaldoParensis-numeroParentesis
permutaciones=permutacionParentesis(numeroParentesis,diferencia)
permutaciones.permutacionBacica()
numeroParentesis-=1
|
76bb2838cea5784b861f334f3f782be9c8661570 | alejaksoto/JavaScript | /Poo/python/excersice/nombres.py | 356 | 3.765625 | 4 |
man1 =input('Cual es tu nombre')
edad1 = int(input('cual es tu edad'))
man2 = input('cual es tu nombre')
edad2 = int(input('cual es tu edad'))
if edad1 > edad2:
print(f'{man1} es mayor que {man2} por {edad1 - edad2} años')
elif edad2 > edad1:
print(f'{man2} es mayor que {man1} por {edad2 - edad1} años')
else:
print('tienen edad iguales') |
05b959c4689ca63461804d3cf08998e5eda1cc76 | TheAragont97/Python | /Actividades/Relacion 1/Relacion de ejercicios - Bucles/ejercicio 2/ejercicio_2.py | 118 | 3.75 | 4 | edad = int(input("¿Cuántos años tienes? "))
for i in range(edad):
print("Has cumplido " + str(i+1) + " años")
|
4cabee1eadcc5b881ec24539c188d72e63914072 | Fifi1996/learn_py | /nature.py | 1,282 | 4.40625 | 4 | '''
实例属性和类属性
由于python是u动态语言,根据类创建的实例可以任意绑定属性
给实例绑定属性的方法是通过实例变量,或者通过self变量
'''
class Student(object):
def __init__(self,name):
self.name=name
s=Student('Bob')
s.score=90
#如果Student类本身需要绑定一个属性呢
#可以在class中定义属性,这种属性是类属性,归Student类所有
class Student(object):
name='Student'
#当我们定义了一个类属性后,类的所有实例都可以访问到
s=Student() #创建实例
print(s.name) #打印name属性,因为实例没有name属性,会继续查找class
print(Student.name) #打印类的name属性
s.name='Michael' #给实例绑定name属性
print(s.name) #实例属性优先级比类属性高
#在编写程序时,千万不要对实例属性和类属性使用相同的名字
#相同名称的实例属性将屏蔽掉类属性,当你删除实例属性后,在访问的就是类属性了
'''
练习
为了统计学生人数,可以给Studnet类增加一个类属性,
每创建一个实例,该属性自动增加
'''
class Student(object):
count=0
def __init__(self,name):
self.name=name
Student.count+=1
print(self.count)
bart=Student('Bart') |
6eae44913a58c23f0ca86b624be7ab071ca80f43 | cuyi/pylearn | /introduction/strings_toyi.py | 728 | 4.25 | 4 | #! /usr/bin/env python2.7
# -*- coding: utf8 -*-
mstr = 'hello qianjie'
print mstr
mmstr = 'hello piaoliang\n\
what r u nong sa li?'
print mmstr
hello = '''This is a rather long
string containing several lines of text.'''
print hello
hello1 = """This is a rather long
string containing several lines of text."""
print hello1
# Strings can be concatenated with the + operator, and repeated with *
word = 'Help' + 'Poor Yi'
print word
print word*5
# Strings can be subscripted (indexed)
print word[0]
print word[0:2]
# Unlike a C string, Python strings cannot be changed.
# The following code is WRONG
# word[0] = 'x'
# built-in function len() returns the length of a string:
s = 'I love you: qian jie'
print len(s)
|
e146b2ba07b0a1556ee330944e2b867209add8cd | spnow/XECryption | /XECrypt.py | 1,411 | 4.03125 | 4 | """
Simple number crunching/ASCII converting script to solve hackthissite.org's
Realistic Mission #6. Adds triplets of numbers together to get a valid ASCII value,
then subtracts the total ASCII value of the pass phrase to get the desired message.
Numbers are delimited by periods. Newlines are removed before processing.
WARNING: SOME ASCII CHARACTERS, ESPECIALLY NON-ALPHANUMERIC ONES, ARE CONTROL
SEQUENCES IN VARIOUS TERMINAL PROGRAMS (for example, ESC c and ESC ^ for OSX terminal).
CAREFULLY CHECK YOUR OUTPUT TO ENSURE NOTHING UNUSUAL HAS HAPPENED TO THE TEXT.
For more information, see del.py.
"""
from collections import Counter
def decrypt(one, two, three, passtotal):
return int(one) + int(two) + int(three) - passtotal
inp = open("XEcrypt_text.txt", 'r').read()
passtotal=int(input("Enter the total sum of the passphrase's ASCII values: "))
nums = inp.replace("\n", "").split(".")
result = ""
nums.pop(0)
asciiarray = list()
chararray = list()
while len(nums) > 0:
intvalue = decrypt(nums.pop(0), nums.pop(0), nums.pop(0), passtotal)
result = result + chr(intvalue)
asciiarray.append(intvalue)
chararray.append(chr(intvalue))
print(asciiarray)
print(chararray)
#extra stuff
for i in range(len(result)):
input()
print(result[i])
print("Pass: ", passtotal, " Result: ",result)
nums = inp.replace("\n", "").split(".")
nums.pop(0)
print(nums)
print(Counter(nums)) |
9758932f9f5ff8b7fd148cd62983aa083b14e434 | mihaivalentistoica/Python-Fundamentals | /Curs4/Exercitii_singuri.py | 1,805 | 3.578125 | 4 | '''
1. Creati clasa Animal cu 2 atribute private(mangled): nume si varsta. La initiere se dau valori default.
Sa se creeze getteri si setteri pentru ambele campuri.
Creati cel putin 2 obiecte si testati proprietatile create.
'''
class Animal:
def __init__(self):
self.__nume = None
self.__varsta = None
@property
def nume(self):
return self.__nume
@nume.setter
def nume(self, nume_in):
self.__nume = nume_in
def get_varsta(self):
return self.__varsta
def set_varsta(self, varsta_in):
self.__varsta = varsta_in
pisica = Animal() # cream cele 2 obiecte pisca si animal
caine = Animal()
pisica.nume = 'Pisica'
pisica.set_varsta(0.1)
caine.nume = 'Caine'
caine.set_varsta(3)
print(f'Animalul {pisica.nume} are o varsta de {pisica.get_varsta()}')
print(f'Animalul {caine.nume} are o varsta de {caine.get_varsta()}')
'''
2. Creati o clasa caine cu 2 atribute statice publice si 2 atribute de instanta protejate.
Faceti modificarea variabilelor din exteriorul clasei pentru 2 instante.
'''
'''
3. Creati o clasa Animal cu 1 atribut public nume si 1 atribut privat varsta.
Creati getteri si setteri pentru variabila privata.
Sa se verifice in cadrul setter-ului daca valoarea transmisa este pozitiva.
Daca valoarea nu este corecta, se afiseaza un mesaj corespunzator si nu se efectueaza atribuirea.
Sa se creeze 1 obiect(instanta) de tip animal si inca unul ce este o copie a primului( prin referinta).
Sa se modifice campurile celui de-al doilea obiect si sa se verifice modificarile aparute in cadrul ambelor obiecte create.
Sa se repete procedeul cu o alta copie a obiectului, de aceeasta data utilizandu-se 'deepcopy'.
'''
class Animal:
def __init__(self):
self.__nume = None
self.__varsta = None
|
c60fc98d3d1c5e9b855b68bc497e6acd35ed00b3 | Rakshita-Shetty/Scrapy-Workflow | /pipelines.py | 1,053 | 3.671875 | 4 | import sqlite3
class BooksPipeline(object):
def __init__(self):
self.create_connection()
self.create_table()
def create_connection(self):
self.conn = sqlite3.connect("books.db")
self.curr = self.conn.cursor()
def create_table(self):
self.curr.execute("""DROP TABLE IF EXISTS books_tb""")
self.curr.execute(""" create table books_tb(
title text,
price text
)""")
def process_item(self, item, spider):
self.store_db(item)
return item
def store_db(self, item):
self.curr.execute("""insert into books_tb values(?,?)""",
(
str(item['title']),
str(item['price']),
#str(item['image_url']),
#str(item['book_url'])
))
self.conn.commit()
#we wont close the connection as we need to add multiple quotes
|
d7482fb55a2549fe25d911edfbdf8195dce00b7f | tsubauaaa/SpiralBookPython | /Part2/3-5.py | 734 | 3.859375 | 4 | def bubble_sort(A, N):
flag = True
i = 0
while flag:
flag = False
for j in reversed(range(N)):
if j == i:
break
if A[j][1] < A[j - 1][1]:
A[j], A[j - 1] = A[j - 1], A[j]
flag = True
i += 1
return A
def selection_sort(A, N):
for i in range(N - 1):
minj = i
for j in range(i, N):
if A[j][1] < A[minj][1]:
minj = j
A[i], A[minj] = A[minj], A[i]
return A
N = int(input())
A = input().split()
A_bub = A[:]
A_bub = bubble_sort(A_bub, N)
A_sel = selection_sort(A, N)
print(*A_bub)
print("Stable")
print(*A_sel)
print("Stable" if A_bub == A_sel else "Not stable")
|
c5eb05c6d1a9239661ada80c87897121cb7fc3bc | CodingEZ/Scrabble-AI | /helper.py | 874 | 3.953125 | 4 | def areValidLetters(letters):
for letter in letters:
if len(letter) != 1 and (letter not in 'qwertyuiopasdfghjklzxcvbnm'):
return False
return True
def areValidLocations(spaces):
for space in spaces:
try:
location = int(space)
except ValueError:
return False
return True
def binarySearch(target, elements):
left = 0
right = len(elements)
if right == 0:
return False
middle = 1
while (left + 1 != right):
middle = (left + right + 1) // 2
if target < elements[middle]:
right = middle
elif target > elements[middle]:
left = middle
else:
return True
if target == elements[left]:
return True
else:
return False
print(low, middle, high)
|
e65f169e08e6f78fe150cdd10c5ffe23915f6768 | YasinLiu/Data-structure | /example/timing1.py | 1,051 | 4 | 4 | """
File: timing1.py
Prints the running times for problem sizes that double,
using a single loop
"""
import time
problemSize = 100
iterations = 0
def work1(problemSize):
work = 1
for _ in range(problemSize):
global iterations
iterations += 1
work += 1
work -= 1
def work2(problemSize):
work = 1
for _ in range(problemSize):
for _ in range(problemSize):
global iterations
iterations += 1
work += 1
work -= 1
if __name__ == '__main__':
# print('%12s%16s' % ('Problem Size', 'Seconds'))
print('{:_>12}{:>16}{:>12}'.format('Problem Size', 'Seconds', 'iterations'))
for count in range(5):
start = time.time()
# The start of the algorithm
work2(problemSize)
# The end of the algorithm
elapsed = time.time() - start
# print('%12d%16.3f' % (problemSize, elapsed))
print('{:>12d}{:16.3f}{:>12d}'.format(problemSize, elapsed, iterations))
problemSize *= 2
|
5fdc2af8a3995e17c8c5518e12a4ff4674fd30fa | BioGeek/euler | /problem030.py | 887 | 3.953125 | 4 | # Surprisingly there are only three numbers that can be written as the sum of
# fourth powers of their digits:
#
# 1634 = 1^4 + 6^4 + 3^4 + 4^4
# 8208 = 8^4 + 2^4 + 0^4 + 8^4
# 9474 = 9^4 + 4^4 + 7^4 + 4^4
#
# As 1 = 1^4 is not a sum it is not included.
#
# The sum of these numbers is 1634 + 8208 + 9474 = 19316.
#
# Find the sum of all the numbers that can be written as the sum of fifth powers
# of their digits.
def equals_sum_of_power(number, power):
return number == sum(map(lambda x: x**power, map(int, str(number))))
# check for example values
assert equals_sum_of_power(1634, 4)
assert equals_sum_of_power(8208, 4)
assert equals_sum_of_power(9474, 4)
assert sum(i for i in range(2,10000) if equals_sum_of_power(i, 4)) == 19316
print sum(i for i in range(2,1000000) if equals_sum_of_power(i, 5))
# 443839
#
# real 0m7.172s
# user 0m7.100s
# sys 0m0.040s
|
fa11f79d15661ea4e93d272fe577fdf0d7ae32c3 | MadisonStevens98/Python-Pyramid-Practice-Project | /Pyramid Practice.py | 877 | 4.21875 | 4 | def print_pyramid(type): ##required method
if type == "left": #selection statement for left
print("# \n## \n### \n#### \n##### \n###### \n#######") #left pyramid
elif type == "right": #selection for right
print(" #\n ##\n ###\n ####\n #####\n ######\n #######")#right pyramid
elif type == "both":#selection for both
print(" ## \n ####\n ######\n ########\n ##########\n ############\n##############")#both pyramid
else:#improper input
print("Invalid input, please type right, left, or both")
#tells user they messed up
def main():#main loop through program
while True:
prompt()
def prompt():#prompts user for type
type = input("Please type right, left, or both")#assigns type
print_pyramid(type)#calls and runs print_pyramid
if __name__ == '__main__':
main()
|
55094820e2faf29bb12b938721b5d1bc8a0b819b | Jasmined26/JCalc | /JCalc.py | 8,764 | 3.859375 | 4 | # Import modules
import tkinter
import tkinter.font as tkFont
from tkinter import *
# Class to create a button that changes color when hovered over
# Inherits from tkinter Button class
class HoverButton1(tkinter.Button):
def __init__(self, **kw):
tkinter.Button.__init__(self, **kw)
self['bd'] = 1
self['background'] = '#88b5fc'
self.defaultBackground = self['background']
self.bind('<Enter>', self.on_enter)
self.bind('<Leave>', self.on_leave)
def on_enter(self, e):
self['background'] = '#4287f5'
def on_leave(self, e):
self['background'] = self.defaultBackground
# Hover button inheriting from HoverButton1 class
# (has a different color)
class HoverButton2(HoverButton1):
def __init__(self, **kw):
HoverButton1.__init__(self, **kw)
self['background'] = '#e3abff'
self.defaultBackground = self['background']
def on_enter(self, e):
self['background'] = '#d278ff'
# Hover button inheriting from HoverButton1 class
# (has a different color)
class HoverButton3(HoverButton1):
def __init__(self, **kw):
HoverButton1.__init__(self, **kw)
self['background'] = '#63ff9a'
self.defaultBackground = self['background']
def on_enter(self, e):
self['background'] = '#00bf43'
# GUI class
class standardCalculator:
def __init__(self):
# Create main window
self.main_window = tkinter.Tk()
# Window design/attributes
self.main_window['background'] = '#0d0063'
self.main_window.attributes('-alpha', 0.95)
self.main_window.title('JCalc')
self.main_window.minsize(250, 300)
# Window size
w = 350
h = 600
# Settings to place window in middle of screen when ran
ws = self.main_window.winfo_screenwidth()
hs = self.main_window.winfo_screenheight()
x = (ws/2) - (w/2)
y = (hs/2) - (h/2)
self.main_window.geometry('%dx%d+%d+%d' % (w, h, x, y))
# Font settings to be used with window widgets
window_font1 = tkFont.Font(family = 'Bahnschrift', size = 20)
window_font2 = tkFont.Font(family = 'Bahnschrift Light', size = 16)
# StringVars to update expression and result labels
self.expressionVar = tkinter.StringVar()
self.resultVar = tkinter.StringVar()
# Create widgets
# Labels for expression and result
self.expression_label = tkinter.Label(bg = '#0d0063', fg = '#f4f2ff', textvariable = self.expressionVar, font = ('Bahnschrift', 16), anchor = 'e')
self.result_label = tkinter.Label(bg = '#0d0063', fg = '#f4f2ff', textvariable = self.resultVar, font = ('Bahnschrift', 46), anchor = 'e')
# Digit buttons
self.zero_btn = HoverButton1(text = '0', command = lambda: self.update_input(self.zero_btn), font = window_font1)
self.one_btn = HoverButton1(text = '1', command = lambda: self.update_input(self.one_btn), font = window_font1)
self.two_btn = HoverButton1(text = '2', command = lambda: self.update_input(self.two_btn), font = window_font1)
self.three_btn = HoverButton1(text = '3', command = lambda: self.update_input(self.three_btn), font = window_font1)
self.four_btn = HoverButton1(text = '4', command = lambda: self.update_input(self.four_btn), font = window_font1)
self.five_btn = HoverButton1(text = '5', command = lambda: self.update_input(self.five_btn), font = window_font1)
self.six_btn = HoverButton1(text = '6', command = lambda: self.update_input(self.six_btn), font = window_font1)
self.seven_btn = HoverButton1(text = '7', command = lambda: self.update_input(self.seven_btn), font = window_font1)
self.eight_btn = HoverButton1(text = '8', command = lambda: self.update_input(self.eight_btn), font = window_font1)
self.nine_btn = HoverButton1(text = '9', command = lambda: self.update_input(self.nine_btn), font = window_font1)
# Operation buttons
self.add_btn = HoverButton2(text = '+', command = lambda: self.update_input(self.add_btn), font = window_font2)
self.sub_btn = HoverButton2(text = '-', command = lambda: self.update_input(self.sub_btn), font = window_font2)
self.mult_btn = HoverButton2(text = '*', command = lambda: self.update_input(self.mult_btn), font = window_font2)
self.div_btn = HoverButton2(text = '/', command = lambda: self.update_input(self.div_btn), font = window_font2)
self.eq_btn = HoverButton3(text = '=', command = self.equals, font = window_font2)
self.dec_btn = HoverButton1(text = '.', command = lambda: self.update_input(self.dec_btn), font = window_font2)
# Delete/Clear buttons
self.del_btn = HoverButton3(text = 'DEL', command = self.delete_entry, font = window_font2)
self.clear_btn = HoverButton3(text = 'C', command = self.clear, font = window_font2)
# Configure column weights
self.main_window.columnconfigure(0, weight = 3)
self.main_window.columnconfigure(1, weight = 3)
self.main_window.columnconfigure(2, weight = 3)
self.main_window.columnconfigure(3, weight = 3)
# Configure row weights
self.main_window.rowconfigure(0, weight = 1)
self.main_window.rowconfigure(1, weight = 3)
self.main_window.rowconfigure(2, weight = 3)
self.main_window.rowconfigure(3, weight = 3)
self.main_window.rowconfigure(4, weight = 3)
self.main_window.rowconfigure(5, weight = 3)
self.main_window.rowconfigure(6, weight = 3)
# Grid widgets
self.expression_label.grid(row = 0, column = 0, rowspan = 1, columnspan = 4, sticky = 'NSEW')
self.result_label.grid(row = 1, column = 0, rowspan = 1, columnspan = 4, sticky = 'NSEW')
self.zero_btn.grid(row = 6, column = 0, sticky = 'NSEW', padx = 1, pady = 1)
self.one_btn.grid(row = 5, column = 0, sticky = 'NSEW', padx = 1, pady = 1)
self.two_btn.grid(row = 5, column = 1, sticky = 'NSEW', padx = 1, pady = 1)
self.three_btn.grid(row = 5, column = 2, sticky = 'NSEW', padx = 1, pady = 1)
self.four_btn.grid(row = 4, column = 0, sticky = 'NSEW', padx = 1, pady = 1)
self.five_btn.grid(row = 4, column = 1, sticky = 'NSEW', padx = 1, pady = 1)
self.six_btn.grid(row = 4, column = 2, sticky = 'NSEW', padx = 1, pady = 1)
self.seven_btn.grid(row = 3, column = 0, sticky = 'NSEW', padx = 1, pady = 1)
self.eight_btn.grid(row = 3, column = 1, sticky = 'NSEW', padx = 1, pady = 1)
self.nine_btn.grid(row = 3, column = 2, sticky = 'NSEW', padx = 1, pady = 1)
self.add_btn.grid(row = 6, column = 3, sticky = 'NSEW', padx = 1, pady = 1)
self.sub_btn.grid(row = 5, column = 3, sticky = 'NSEW', padx = 1, pady = 1)
self.mult_btn.grid(row = 4, column = 3, sticky = 'NSEW', padx = 1, pady = 1)
self.div_btn.grid(row = 3, column = 3, sticky = 'NSEW', padx = 1, pady = 1)
self.eq_btn.grid(row = 6, column = 2, sticky = 'NSEW', padx = 1, pady = 1)
self.dec_btn.grid(row = 6, column = 1, sticky = 'NSEW', padx = 1, pady = 1)
self.del_btn.grid(row = 2, column = 3, sticky = 'NSEW', padx = 1, pady = 1)
self.clear_btn.grid(row = 2, column = 2, sticky = 'NSEW', padx = 1, pady = 1)
tkinter.mainloop()
# Function to update labels with button entries
def update_input(self, btn):
self.expressionVar.set(self.expressionVar.get() + btn['text'])
self.get_result()
# Function to attempt to get the result of the current expression and update
# the results label
def get_result(self):
try:
result = eval(self.expressionVar.get())
# Formatting large numbers to scientific notation
if (len(str(result)) > 10):
result = "{:.5e}".format(result)
self.resultVar.set(result)
except:
self.resultVar.set('')
# Callback function for the 'clear' button
def clear(self):
self.resultVar.set('')
self.expressionVar.set('')
# Callback function for the 'delete' button
def delete_entry(self):
self.expressionVar.set(self.expressionVar.get()[:-1])
self.get_result()
# Callback function for the equal button
def equals(self):
try:
result = eval(self.expressionVar.get())
# Formatting large numbers to scientific notation
if (len(str(result)) > 10):
result = "{:.5e}".format(result)
self.expressionVar.set(result)
self.resultVar.set('')
except:
self.resultVar.set('Invalid input')
calc1 = standardCalculator() |
de11d111ad16b1cca3b43af4be2ccc4145faf4eb | rakiasomai/holbertonschool-higher_level_programming | /0x05-python-exceptions/2-safe_print_list_integers.py | 333 | 3.609375 | 4 | #!/usr/bin/python3
def safe_print_list_integers(my_list=[], x=0):
try:
z = 0
for y in range(x):
if isinstance(my_list[y], int):
z += 1
print("{:d}".format(my_list[y]), end="")
except TypeError as err:
print(err)
else:
print("")
return z
|
d154192d969a63c1803ec25e947c3822b2e06c5a | yhasansenyurt/PythonAssignments-PythonOdevleri | /problem sets/problem1/matrix.py | 4,426 | 4.125 | 4 | #########################################################################################
# QUESTION IV
# Description: This program operates matrices in different ways and analyzes maximum
# value of matrix.
#########################################################################################
print("SOLUTION OF QUESTION IV:")
print("\n")
import random
import sys
def readMatrix(numberOfRows , numberOfColumns, file):
matrix = [] # Create an empty list
for row in range(numberOfRows):
matrix.append([]) # Add an empty new row
line = file.readline()
rowdata = [int(x) for x in line.split(' ')]
for column in range(numberOfColumns):
matrix[row].append(rowdata[column])
return matrix
def printMatrix(matrix):
for row in range(len(matrix)):
for column in range(len(matrix[row])):
print(format(matrix[row][column],"5d"), end = " ")
print() # Print a new line
def fillMatrixRandomly(numberOfRows,numberOfColumns ):
matrix = [] # Create an empty list
for row in range(numberOfRows):
matrix.append([]) # Add an empty new row
for column in range(numberOfColumns):
matrix[row].append(random.randint(0, 99))
return matrix
def generateZeroMatrix(numberOfRows,numberOfColumns):
matrix = [ [ 0 for i in range(numberOfRows) ] for j in range(numberOfColumns) ]
return matrix
def addMatrix(A,B):
C = generateZeroMatrix (len(A),len(A[0]))
for row in range(len(A)):
for column in range(len(A[row])):
C[row][column] = A[row][column] + B[row][column]
return C
def multiplyMatrix(A,B):
multiply = generateZeroMatrix(len(A),len(A[0]))
for i in range(len(A)):
for j in range(len(A[0])):
for k in range(len(B)):
multiply[i][j] += A[i][k] * B[k][j]
return multiply
def transpose(A):
transpose = generateZeroMatrix(len(A),len(A[0]))
for i in range(len(A)):
for j in range(len(A[0])):
transpose[j][i] = A[i][j]
return transpose
def maxOfElements(A):
max =-sys.maxsize - 1
for i in range(len(A)):
for j in range(len(A[i])):
if A[i][j] > max:
max = A[i][j]
return max
def subtractMatrix(A,B):
subtract = generateZeroMatrix(len(A), len(A[0]))
for row in range(len(A)):
for column in range(len(A[row])):
subtract[row][column] = A[row][column] - B[row][column]
return subtract
# Redirect standard output device (console) to output.txt file
# print statements will write into output.txt file
sys.stdout = open('output.txt', 'w')
print("\nReading data from inputs.txt file in current directory\n")
f = open("inputs.txt","r")
# Read Matrix A
line = f.readline()
numberOfRows , numberOfColumns = [int (x) for x in line.split(' ')]
A = readMatrix(numberOfRows , numberOfColumns, f)
print(" **** Matrix A **** ")
printMatrix(A)
# Read Matrix B
line = f.readline()
numberOfRows, numberOfColumns = [int(x) for x in line.split(' ')]
B = readMatrix(numberOfRows, numberOfColumns, f)
print(" **** Matrix B **** ")
printMatrix(B)
# Read Matrix C
line = f.readline()
numberOfRows, numberOfColumns = [int(x) for x in line.split(' ')]
C = readMatrix(numberOfRows, numberOfColumns, f)
print(" **** Matrix C **** ")
printMatrix(C)
# Generate 4x4 matrix from random numbers.
D = fillMatrixRandomly(numberOfRows, numberOfColumns)
print(" **** Matrix D **** ")
printMatrix(D)
# Compute S = (A+B) * Transpose(C) + D - A
print("\n *** Computing S = (A+B) * Transpose(C) + D) - A *** \n")
# T1 = A + B
def main():
T1 = addMatrix(A, B)
T2 = transpose(C)
T3 = multiplyMatrix(T1, T2)
T4 = addMatrix(T3, D)
S = subtractMatrix(T4, A)
print(" **** MatriX T1 = (A+B) ****")
print()
printMatrix(T1)
print()
print(" **** MatriX T2 = Transpose (C) ****")
print()
printMatrix(T2)
print()
print(" **** MatriX T3 = (A+B) * Transpose (C) ****")
print()
printMatrix(T3)
print()
print(" **** MatriX T4 = (A+B) * Transpose (C) + D ****")
print()
printMatrix(T4)
print()
print(" **** MatriX S = ((A+B) * Transpose (C) + D) - A ****")
print()
printMatrix(S)
print()
print("Maximum Element in S =", maxOfElements(S))
main()
|
02fa8f5552fe61ded4562647aa2fe64605d1e7f8 | NipunGarg01/Python | /Python Debugger.py | 231 | 3.59375 | 4 | import pdb;pdb.set_trace()
x=[1,2,3]
y=3
z=9
sum=y+z
pdb.set_trace() # Set the trace to where you see issue is....then we will reach at python debugger.
sum1=x+y
print(sum)
print(sum1)
|
9182e8aae119937443c5ea4b4c96809aceaf5613 | eminkartci/HistorySimulation | /src/Community.py | 2,848 | 4.0625 | 4 | # import os library
import os
class Community:
# construction
def __init__(self,id,title,location,population,religion):
self.id = id
self.title = title
self.location = location
self.population = population # Population Amount
self.people = [] # People Objects
self.religion = religion # Religion String -> Will be an object in future
self.print_community_console() # after initializing the object print to the screen
self.save_community_txt() # after initializing the object save it automatically
# print object to the screen
def print_community_console(self,willPrint = True):
# initialize a string
self.communityContent = f"""
------ community {self.id} ------
| Title : {self.title}
| Location : {self.location}
| Population : {self.population}
| Religion : {self.religion}
"""
# deafult value -> true
# if it is specified don't print
if willPrint:
print(self.communityContent)
# save object as txt file
def save_community_txt(self):
# file name -> id_name.txt
fileName = str(self.id)+"_"+self.title+".txt"
# path -> db -> community
filePath = os.getcwd()+"/db/Community/"
# open a txt file
f = open(filePath + fileName, "w")
# write the content
f.write(self.communityContent)
# Write your rights
f.write("\n\n@2021 All Rights Reserved Emin K & Asya B")
# Close the file
f.close()
# read a community object from a txt file
def read_community_txt(id,title):
# file name -> id_name.txt
fileName = str(id)+"_"+title+".txt"
# path -> db -> community
filePath = os.getcwd()+"/db/Community/"
# open the file
f = open(filePath+fileName, "r")
# read whole file
currentcommunityContent = f.read()
# Split the string by referring the : sign
contentArr = currentcommunityContent.split(":")
# ignore the first part beacuse we don't need it
contentArr = contentArr[1:]
# initialize a list
communityInfo = []
# for each element
for line in contentArr:
# Split the string by referring the next line
lineArr = line.split("\n")
# append the list
# get the 0 index and clear the space
communityInfo.append(lineArr[0].lstrip())
# create a new object by using the parameters
# id name surname gender age
currentcommunity = Community(id,communityInfo[0],communityInfo[1],communityInfo[2],communityInfo[3])
|
5e877a0c47970af4cb1b8cc71a76b9e7d1234c67 | namekun/pythonStudy | /ch03_For_Set/집합.py | 719 | 3.609375 | 4 | # 집합(set)은 리스트와 같이 정보를 여러개 넣어서 보관할 수 있는 파이썬의 기능이다.
# 다만 집합 하나에는 같은 자료가 중복되어 들어가지 않고, 자료의 순서도 의미가 없다는 점이 리스트와 다르다.
s = set()
s.add(1)
s.add(2)
s.add(2) # 이미 있는 값이기에 중복되서 들어가지 않는다.
print(s)
len(s)
# len(x) : 집합의 길이를 구하는 방법
# add(x) : 집합에 자료 x를 추가합니다.
# discard(x) : 집합에 자료 x가 들어 있다면 삭제합니다. 없다면 변화없음
# clear() : 집합의 모든 자료를 지운다.
# x in s : 어떤 자료 x가 집합 s에 들어가 있는지 확인, 반대는 not in
|
89ea1af39768fd3c0f7ea63b36fa507b67d66233 | tapans/Algorithms-Puzzles-Challenges | /CTCI_6e/8.1_triple_step.py | 1,001 | 4.125 | 4 | #!/usr/bin/python
import unittest
def triple_step(n, memo={}):
'''
Returns Count of how many possible ways the child can run up the stairs
Context: Child is running up a stairacase with n steps
and can hop either 1 step, 2 steps or 3 steps at a time
Time Complexity: O(n)
Space Complexity: O(n)
'''
if n < 0:
return 0
elif n == 0:
return 1
else:
hop_3_down = memo[n-3] if n-3 in memo else triple_step(n - 3, memo)
hop_2_down = memo[n-2] if n-2 in memo else triple_step(n - 2, memo)
hope_1_down = memo[n-1] if n-1 in memo else triple_step(n - 1, memo)
memo[n] = sum([hop_3_down, hop_2_down, hope_1_down])
return memo[n]
class Test_Tripe_Step(unittest.TestCase):
def test_regular_case(self):
self.assertEquals(1, triple_step(1))
self.assertEquals(2, triple_step(2))
self.assertEquals(4, triple_step(3))
self.assertEquals(7, triple_step(4))
self.assertEquals(13, triple_step(5))
if __name__ == '__main__':
unittest.main() |
74d6140d60bc8b68a714b541070a31853e5e82bf | lludu/100DaysOfCode-Python | /Day 02 - Tip Calculator/Exercise 3.py | 425 | 3.6875 | 4 | #This is day 2, understanding data types and string Manipulation
#Exercise 3 - Your Life in weeks:
age = 90 - int(age)
months= age*12
weeks= age*52
days= age*365
# F STRINGS ARE AWESOME!
print(f'You have {days} days, {weeks} weeks, and {months} months left.')
#link to exercise
# https://replit.com/@Lludu/day-2-3-exercise#main.py
#link to exercise checker
# https://replit.com/@Lludu/day-2-3-test-your-code#main.py
|
3827f6116cb68c34aae886051b5dc99f02466206 | Gabriel-Mbugua/Coding-Questions | /Python/SumOfOddNumbers/solution.py | 271 | 3.765625 | 4 | #my solution
def row_sum_odd_numbers(n):
lst = []
for i in range(n*(n-1), n * (n + 2)):
if len(lst) < n and i % 2 != 0 :
lst.append(i)
return sum(lst)
row_sum_odd_numbers(3)
#actual solution
def row_sum_odd_numbers(n):
return n ** 3 |
8cc1ec9fd86341dc0e29d2594738e83b7f228e7e | Iqrar99/Project-Euler | /problem39.py | 689 | 3.5625 | 4 | """
To make the computation faster, we need to do arithmetic approach first.
- a^2 + b^2 = c^2 (1)
- a + b + c = p (2)
we can rewrite them as
c = p - a - b
a^2 + b^2 = (p - a - b)^2 = p^2 +a^2 + b^2 - 2pa -2pb -2ab
b = p(p - 2a) / 2(p - a)
"""
def main():
result = 0
answer = 0
for p in range(2, 1001, 2):
number_of_solution = 0
for a in range(2, p // 3):
if (p * (p - 2 * a) % (2 * (p - a))) == 0:
number_of_solution += 1
if number_of_solution > result:
result = number_of_solution
answer = p
print(f"answer : {answer}")
if __name__ == "__main__":
main()
|
eee0e6f11b89398c591d536cb9d011df480884b7 | yifange/leetcode | /remove_duplicates_from_sorted_list.py | 1,054 | 3.625 | 4 | # Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
# @param head, a ListNode
# @return a ListNode
def deleteDuplicates(self, head):
if not head:
return head
fake_head = ListNode(head.val - 1)
fake_head.next = head
q = fake_head
p = head
while p:
t = p.next
to_del = False
while t and t.val == p.val:
t = t.next
to_del = True
if to_del:
q.next = t
p = t
else:
q = p
p = p.next
head = fake_head.next
del fake_head
return head
sol = Solution()
n1 = ListNode(1)
n2 = ListNode(1)
n3 = ListNode(1)
n4 = ListNode(3)
n5 = ListNode(4)
n6 = ListNode(4)
n7 = ListNode(5)
n1.next = n2
n2.next = n3
n3.next = n4
n4.next = n5
n5.next = n6
n6.next = n7
p = sol.deleteDuplicates(n1)
while p:
print p.val
p = p.next
|
2518b6486c596d60f6e9e631a03a9d140471bdcb | heyyitzaashi02/JISAssasins | /Untitled11.py | 211 | 3.859375 | 4 | #!/usr/bin/env python
# coding: utf-8
# In[1]:
# program to accept a sentence from the user and reverse
msg="you are a good girl"
words=msg.split(" ")
words.reverse()
print(' '.join(words))
# In[ ]:
|
a261a039383a8bfa9d09b47841a4d071e1449d5c | Natumeme/Leetcode | /easy/20_isvalid.py | 805 | 3.9375 | 4 | #!usr/bin/env python
#-*- coding:utf-8 -*-
'''
给定一个只包括 '(',')','{','}','[',']' 的字符串,判断字符串是否有效。
有效字符串需满足:
左括号必须用相同类型的右括号闭合。
左括号必须以正确的顺序闭合。
'''
class Solution:
def isValid(self,s):
sta=[None]
dic={")":"(","}":"{","]":"["}
for t in s:
if(t in dic and dic[t]==sta[len(sta)-1]):
sta.pop()
else:
sta.append(t)
return len(sta)==1
if __name__=='__main__':
x=Solution()
s="()"
print(x.isValid(s))
'''
知识补充:
栈的作用
1.内栈管理中使用的堆栈
2.基于栈实现的二叉树的遍历
3.在处理需求中的平衡问题:
a.判断符号是成堆出现的,比如()
b.判断这个字符串是否是回文字符串
''' |
d513ffd2a10041f42fc9f983ffd537860e63a45f | LiseIL/SDDproject-G1 | /Queue.py | 2,147 | 4.0625 | 4 | ################
# Python 3.5 File
# Group 1
# created oct. 24th 2018
# last modif oct. 24th 2018
# Queue.py
################
#Une file est une liste où les insertions se font d'un
#côté et les suppressions se font de l'autre côté
#[Lise]
#1- Est-ce qu'on est sûre que les valeurs stockées dans notre queue
# doivent obligatoirement être de type Int ? Pourquoi pas des Float ? ou Str ?
#2- J'ai un petit soucis avec les types des méthodes.
# Par exemple, pour getList() j'aurais indiqué:
# """Queue -> List"""
# Est-ce que tu es d'accord ?
#3-Pour dequeue, est-ce qu'on autorise l'application de la méthode à une file vide ?
class Queue :
def __init__(self, list):
""" The empty queue is the queue with [] argument """
#assert isinstance(list, list)
self.list = list
def getList(self):
""" None --> list """
return self.list
def setList(self, value):
""" List --> None """
assert isinstance(value, list) #[Lise] j'aurais mis cette ligne de vérification
# dans la constructeur __init__ qu'en penses-tu ?
self.list = value
def isEmpty(self):
""" None --> Bool """
if (self.getList() == []):
return True
else:
return False
def enqueue(self, value):
""" Int --> None
----------------
Add the value at the end (right side) of the queue """
assert isinstance(value, int)
list = self.getList()
list = list + [value]
self.setList(list)
def dequeue(self):
""" Queue --> None
----------------
If the queue is not empty, removes the value at the beginning (left side) of the queue
Else, nothing"""
if not self.isEmpty():
queue = self.getList()
newQueue = queue[1:]
self.setList(newQueue)
def peek(self):
"""Queue -> Int
---------------
Peek the first element (on the left-hand side of the queue)"""
assert(not self.isEmpty())
list = self.getList()
return list[0]
|
df77d9c94a8993ed14edb483ef33785491224d26 | mosot624/ProgrammingChallenges | /RecursiveChallegen.py | 668 | 3.796875 | 4 | # -*- coding: utf-8 -*-
"""
Created on Mon Mar 25 10:15:33 2019
@author: Michael
"""
#http://www1.udel.edu/CIS/181/pconrad/05S/examples/recursion/recursion.problems
def changetoString(var,n,counter):
if n == 1:
var = int(var[0])
return var
elif counter == len(var):
var = var[0]
return var
else:
var[0] += var[counter]
counter +=1
return changetoString(var,n,counter)
if __name__ == "__main__":
vad = 888
val = []
vas = list(str(vad))
val = []
for i in vas:
val.append(int(i))
print(changetoString(val,len(val),1))
|
22d7a123b034a3b8c8e117be179bcfde85d479fb | DudekKonrad/Python2020 | /Lab2/2.15.py | 165 | 3.703125 | 4 | numbers_list = [5, 23, 654, 26, 45875, 12, 3, 56, 3]
result = ""
for number in numbers_list:
str_number = str(number)
result += str_number
print("Result:", result) |
d1d7d831531bae6f182d8d4d56369cbdf68dab92 | Wenzurk-Ma/Python-Crash-Course | /Chapter 04/foods.py | 436 | 3.609375 | 4 | # Title : TODO
# Objective : TODO
# Created by: Wenzurk
# Created on: 2018/2/5
my_foods = ['pizza', 'falafel', 'carrot cake']
# 这样行不通
# friend_foods = my_foods
friend_foods =my_foods[:]
my_foods.append('cannoli')
friend_foods.append('ice cream')
print("My favorite foods are:")
for my_food in my_foods:
print(my_food)
print("My friend's favorite foods are:")
for friend_food in friend_foods:
print(friend_food) |
90885d5dd5dba2ef4ef093b4b94cbe3826e9e211 | eudaemonic-one/Lifelong-Learning | /LeetCode/Python3/unique-binary-search-trees-ii.py | 770 | 3.78125 | 4 | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def generateTrees(self, n: int) -> List[TreeNode]:
def dfs(start, end):
if start >= end:
return [None]
ans = []
for i in range(start, end): # root node
for l in dfs(start, i): # left child tree
for r in dfs(i+1, end): # right child tree
root = TreeNode(i)
root.left = l
root.right = r
ans.append(root)
return ans
if n == 0:
return None
return dfs(1, n+1)
|
2a950a911ecf66bcdeb9438a01c3336564d11018 | mglerner/MathematicalPhysics | /WavesOnStrings/both_waves_with_sum.py | 1,365 | 3.875 | 4 | #!/usr/bin/env python
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import animation
# First set up the figure, the axis, and the plot element we want to animate
outerlim, innerlim = 4, 2
v1, v2 = 0.01, -0.013453456
fig = plt.figure()
ax = plt.axes(xlim=(-outerlim, outerlim), ylim=(-2, 2))
line1, = ax.plot([], [], lw=2, color='blue', alpha=0.5)
line2, = ax.plot([], [], lw=2, color='red', alpha=0.5)
line3, = ax.plot([], [], lw=2, color='green', )
# initialization function: plot the background of each frame
def init():
line1.set_data([], [])
line2.set_data([], [])
line3.set_data([], [])
return line1, line2, line3
# animation function. This is called sequentially
x = np.linspace(-outerlim, outerlim, 2000)
x_both = np.linspace(-innerlim, innerlim, 1000)
def animate(t):
y1 = np.sin(2 * np.pi * (1.1*x - v1 * t))
y2 = np.sin(2 * np.pi * (x - v2 * t))
y3 = np.sin(2 * np.pi * (1.1*x_both - v2 * t)) + np.sin(2 * np.pi * (x_both + v2 * t))
line1.set_data(x, y1)
line2.set_data(x, y2)
line3.set_data(x_both, y3)
return line1, line2, line3
# call the animator. blit=True means only re-draw the parts that have changed.
anim = animation.FuncAnimation(fig, animate, init_func=init,
frames=100, interval=20, blit=True)
plt.show()
|
d415ed990d000664bd5bfd4ed652088f4839365e | madelgi/learning | /books/think_stats/ch1.py | 1,933 | 4.3125 | 4 | import survey
"""
Exercise 1.3
In this exercise, we will explore the data in the Pregnancies
table
"""
def ex13():
table = survey.Pregnancies()
table.ReadRecords()
print 'Number of pregnancies', len(table.records)
# Part 2 of ex13. This function iterates through Pregnancies and counts
# the numbe of live births.
def loop_table():
table = survey.Pregnancies()
table.ReadRecords()
live_births = 0
for record in table.records:
if getattr(record, 'outcome') == 1:
live_births += 1
print live_births
# Part 3 of ex1.3. This is similar to part two, except now we divide the
# the live births into two groups: first born child, and other.
def loop_table_groups():
table = survey.Pregnancies()
table.ReadRecords()
live_births_first = 0
live_births_other = 0
for record in table.records:
if getattr(record, 'outcome') == 1:
if getattr(record, 'birthord') == 1:
live_births_first += 1
else:
live_births_other += 1
return live_births_first, live_births_other
# Part 4 of ex1.3. Now, we compute the average pregnancy length for babies
# in each of the two groups
def pregnancy_lengths():
table = survey.Pregnancies()
table.ReadRecords()
live_births_first = 0
first_length = 0
live_births_other = 0
other_length = 0
for record in table.records:
if getattr(record, 'outcome') == 1:
if getattr(record, 'birthord') == 1:
first_length += getattr(record, 'prglength')
live_births_first += 1
else:
other_length += getattr(record, 'prglength')
live_births_other += 1
avg_first = float(first_length)/live_births_first
avg_other = float(other_length)/live_births_other
return avg_first, avg_other
if __name__ == '__main__':
print pregnancy_lengths()
|
613c4ff4124869c98dee20deca33ac160d8edbe6 | dials/dials | /src/dials/algorithms/scaling/reflection_selection.py | 16,547 | 3.953125 | 4 | """
Algorithm to select a well connected subset of reflections for scaling.
Description of reflection selection algorithm. To get the best 'connectedness',
we want to select groups of reflections which belong to more than one class.
A class in this case is a volume of reciprocal space (e.g 12 areas on the
surface of a sphere) or a class can be a dataset (i.e. n classes for n datasets).
First we construct the following matrix of classes vs symmetry groups. For
example, this matrix describes 3 datasets with 7 symmetry unique groups.
symmetry groups
0 1 2 3 4 5 6
0 3 3 2 0 1 1 1
classes 1 0 2 0 0 3 2 1
2 2 1 1 5 0 4 0
Here the entries of the matrix are the number of reflections belonging to the
group and class. Then, the matrix is sorted by the number of classes that each
group covers, i.e. the number of nonzero entries in the column:
number of nonzero entries: [2, 3, 2, 1, 2, 3, 2]
sorted matrix:
symmetry groups
1 5 0 2 4 6 3
0 3 1 3 2 1 1 0
classes 1 2 2 0 0 3 1 0
2 1 4 2 1 0 0 5
Now, we choose a target number of reflections per class e.g. 5. To choose the
reflection groups, we start with the first column.
number of chosen reflections per class: [3, 2, 1]
symmetry groups used: [1]
To determine the next group to add, we search for the first group (matrix column)
that has a reflection in the least populated class so far i.e. class 2.
In this case, the first unused group is group 5:
number of chosen reflections per class: [4, 4, 5]
symmetry groups used: [1, 5]
In this way, we build up the dataset by choosing the highest-connected groups
that have a reflection in the most-deficient class.
Next we need to add a group with a reflection in class 0 (first we find is group 0):
number of chosen reflections per class: [7, 4, 7]
symmetry groups used: [1, 5, 0]
Next we need to add a group with a reflection in class 1 (first we find is group 4):
number of chosen reflections per class: [8, 7, 7]
symmetry groups used: [1, 5, 0, 4]
We have now reached our target for all classes and can therefore stop.
The four symmetry groups are the highest connected groups that give use good
coverage across all classes, and are therefore the best reflections to use for
minimisation. If there were fewer reflections in one class than the target,
then this algorithm will add all groups with reflections in that class and then
continue with the remaining classes.
For single dataset minimisation, this algorithm is used to select reflection
groups with good reciprocal space coverage, repeated across resolution bins.
For multi dataset minimisation, this algorithm is also used to select highly
connected reflections between datasets. The reflections used for minimisation
are those which are selected by either method - inter-dataset connectedness or
intra-dataset connectedness.
"""
from __future__ import annotations
import logging
from math import floor
import numpy as np
from dxtbx import flumpy
from scitbx import sparse
from dials.algorithms.scaling.scaling_utilities import (
BadDatasetForScalingException,
Reasons,
)
from dials.array_family import flex
from dials.util import tabulate
logger = logging.getLogger("dials")
def _build_class_matrix(class_index, class_matrix, offset=0):
for (i, val) in enumerate(class_index, start=offset):
class_matrix[val, i] = 1.0
return class_matrix
def _select_groups_on_Isigma_cutoff(Ih_table, cutoff=2.0):
"""Select groups with multiplicity>1, Isigma>cutoff"""
sumIsigm = Ih_table.sum_in_groups(
Ih_table.intensities / np.sqrt(Ih_table.variances)
)
n = Ih_table.group_multiplicities()
avg_Isigma = sumIsigm / n
sel = avg_Isigma > cutoff
sel2 = n > 1
if not sel2.any():
raise SystemExit(
"""
Could not find any cross-dataset connected reflections with multiplicity > 1,
scaling not possible."""
)
sel &= sel2
if not sel.any():
logger.warning(
"""
Warning: Could not select any reflections for <I/sI> > %s.
Reducing Isigma_cutoff to zero to attempt continuation.""",
cutoff,
)
sel = avg_Isigma > 0.0 & sel2
if not sel.any():
raise SystemExit(
"""
Could not find any cross-dataset connected groups with <I/sI> > 0,
scaling not possible."""
)
sel_Ih_table = Ih_table.select_on_groups(sel)
return sel_Ih_table
def _perform_quasi_random_selection(
Ih_table, n_datasets, min_per_class, min_total, max_total
):
class_matrix = sparse.matrix(n_datasets, Ih_table.size)
class_matrix = _build_class_matrix(
flumpy.from_numpy(Ih_table.Ih_table["dataset_id"].to_numpy()), class_matrix
)
segments_in_groups = class_matrix * Ih_table.h_index_matrix
total = flex.double(segments_in_groups.n_cols, 0)
for i, col in enumerate(segments_in_groups.cols()):
total[i] = col.non_zeroes
perm = flex.sort_permutation(total, reverse=True)
sorted_class_matrix = segments_in_groups.select_columns(perm)
# matrix of segment index vs asu groups
# now want to fill up until good coverage across board
total_in_classes, cols_not_used = _loop_over_class_matrix(
sorted_class_matrix, min_per_class, min_total, max_total
)
cols_used = flex.bool(sorted_class_matrix.n_cols, True)
cols_used.set_selected(cols_not_used, False)
actual_cols_used = perm.select(cols_used)
# now need to get reflection selection
reduced_Ih = Ih_table.select_on_groups(actual_cols_used)
indices_this_res = reduced_Ih.Ih_table["loc_indices"]
dataset_ids_this_res = reduced_Ih.Ih_table["dataset_id"]
n_groups_used = len(actual_cols_used)
return (
flumpy.from_numpy(indices_this_res.to_numpy()),
flumpy.from_numpy(dataset_ids_this_res.to_numpy()),
n_groups_used,
total_in_classes,
)
def select_connected_reflections_across_datasets(
Ih_table, experiment, Isigma_cutoff=2.0, min_total=40000, n_resolution_bins=20
):
"""Select highly connected reflections across datasets."""
assert Ih_table.n_work_blocks == 1
Ih_table = Ih_table.Ih_table_blocks[0]
sel_Ih_table = _select_groups_on_Isigma_cutoff(Ih_table, Isigma_cutoff)
# now split into resolution bins
sel_Ih_table.setup_binner(
experiment.crystal.get_unit_cell(),
experiment.crystal.get_space_group(),
n_resolution_bins,
)
binner = sel_Ih_table.binner
# prepare parameters for selection algorithm.
n_datasets = len(set(sel_Ih_table.Ih_table["dataset_id"].to_numpy()))
min_per_class = min_total / (n_datasets * 4.0)
max_total = min_total * 1.2
logger.info(
"""
Using quasi-random reflection selection. Selecting from %s symmetry groups
with <I/sI> > %s (%s reflections)). Selection target of %.2f reflections
from each dataset, with a total number between %.2f and %.2f.
""",
sel_Ih_table.n_groups,
Isigma_cutoff,
sel_Ih_table.size,
min_per_class,
min_total,
max_total,
)
# split across resolution bins
mpc = int(min_per_class / n_resolution_bins)
mint = int(min_total / n_resolution_bins)
maxt = int(max_total / n_resolution_bins)
header = ["d-range", "n_groups", "n_refl"] + [str(i) for i in range(n_datasets)]
rows = []
if n_datasets >= 15:
summary_rows = []
summary_header = ["d-range", "n_groups", "n_refl"]
indices = flex.size_t()
dataset_ids = flex.size_t()
total_groups_used = 0
n_cols_used = 0
for ibin in binner.range_all():
sel = binner.selection(ibin)
res_Ih_table = sel_Ih_table.select(flumpy.to_numpy(sel))
if not res_Ih_table.Ih_table.size:
continue
(
indices_this_res,
dataset_ids_this_res,
n_groups_used,
total_per_dataset,
) = _perform_quasi_random_selection(res_Ih_table, n_datasets, mpc, mint, maxt)
indices.extend(indices_this_res)
dataset_ids.extend(dataset_ids_this_res)
total_groups_used += n_groups_used
d0, d1 = binner.bin_d_range(ibin)
drange = str(round(d0, 3)) + " - " + str(round(d1, 3))
n_refl = str(int(indices_this_res.size()))
rows.append(
[drange, str(n_groups_used), n_refl]
+ [str(int(i)) for i in total_per_dataset]
)
if n_datasets >= 15:
summary_rows.append([drange, str(n_groups_used), n_refl])
n_cols_used += n_groups_used
logger.info(
"Summary of cross-dataset reflection groups chosen (%s groups, %s reflections):",
n_cols_used,
indices.size(),
)
if n_datasets < 15:
logger.info(tabulate(rows, header))
else:
logger.info(tabulate(summary_rows, summary_header))
logger.debug(tabulate(rows, header))
return indices, dataset_ids
def _loop_over_class_matrix(
sorted_class_matrix, min_per_area, min_per_bin, max_per_bin
):
"""Build up the reflection set by looping over the class matrix."""
def _get_next_row_needed(total_in_classes):
current_min = flex.min(total_in_classes)
for i, val in enumerate(total_in_classes):
if val == current_min:
row_needed = i
break
return row_needed
def _add_next_column(
cols_not_used, row_needed, sorted_class_matrix, total_in_classes
):
for i, col in enumerate(cols_not_used):
if sorted_class_matrix.col(col)[row_needed] != 0.0:
total_in_classes += sorted_class_matrix.col(col).as_dense_vector()
del cols_not_used[i]
return cols_not_used, total_in_classes, True
# else couldn't find enough of this one!
return cols_not_used, total_in_classes, False
total_in_classes = sorted_class_matrix.col(0).as_dense_vector()
defecit = flex.double(sorted_class_matrix.n_rows, 0)
cols_not_used = flex.size_t(range(1, sorted_class_matrix.n_cols))
total_deficit = 0
while (
flex.min(total_in_classes) < min_per_area
and (flex.sum(total_in_classes) - total_deficit) < max_per_bin
):
# first find which class need most of
row_needed = _get_next_row_needed(total_in_classes)
# now try to add the most-connected column that includes that class
cols_not_used, total_in_classes, success = _add_next_column(
cols_not_used, row_needed, sorted_class_matrix, total_in_classes
)
# return whether successful, updated totals and which cols are left.
if not success:
# want to stop looking for that class as no more left
current_in_row = total_in_classes[row_needed]
defecit[row_needed] = min_per_area - current_in_row
total_deficit += min_per_area - current_in_row
total_in_classes[row_needed] = min_per_area
if flex.sum(total_in_classes) > max_per_bin:
# if we have reached the maximum, then finish there
return total_in_classes - defecit, cols_not_used
total_in_classes -= defecit
n = flex.sum(total_in_classes)
# if we haven't reached the minimum total, then need to add more until we
# reach it or run out of reflections
if n < min_per_bin and cols_not_used:
# how many have deficit? (i.e. no more left?)
c = sum(1 for d in defecit if d != 0.0)
n_classes = sorted_class_matrix.n_rows
multiplier = int(floor(min_per_bin * (n_classes - c) / (n * n_classes)) + 1)
new_limit = min_per_area * multiplier # new limit per area
for i, d in enumerate(defecit):
if d != 0.0:
# don't want to be searching for those classes that we know dont have any left
total_in_classes[i] = new_limit
defecit[i] = d + new_limit - min_per_area
while cols_not_used and flex.min(total_in_classes) < new_limit:
row_needed = _get_next_row_needed(total_in_classes)
cols_not_used, total_in_classes, success = _add_next_column(
cols_not_used, row_needed, sorted_class_matrix, total_in_classes
)
if not success:
current_in_row = total_in_classes[row_needed]
defecit[row_needed] = new_limit - current_in_row
total_in_classes[row_needed] = new_limit
return total_in_classes - defecit, cols_not_used
return total_in_classes, cols_not_used
def _determine_Isigma_selection(reflection_table, params):
Ioversigma = reflection_table["intensity"] / flex.sqrt(reflection_table["variance"])
Isiglow, Isighigh = params.reflection_selection.Isigma_range
selection = Ioversigma > Isiglow
if Isighigh != 0.0:
selection &= Ioversigma < Isighigh
reason = f"in I/sigma range ({Isighigh} > I/sig > {Isiglow})"
else:
reason = f"in I/sigma range (I/sig > {Isiglow})"
return selection, reason
def _determine_partiality_selection(reflection_table, params):
min_partiality = params.reflection_selection.min_partiality
selection = reflection_table["partiality"] > min_partiality
reason = f"above min partiality ( > {min_partiality})"
return selection, reason
def _determine_d_range_selection(reflection_table, params):
d_min, d_max = params.reflection_selection.d_range
d_sel = reflection_table["d"] > d_min
d_sel &= reflection_table["d"] < d_max
reason = f"in d range ({d_max} > d > {d_min})"
return d_sel, reason
def _determine_E2_range_selection(reflection_table, params):
Elow, Ehigh = params.reflection_selection.E2_range
sel1 = reflection_table["Esq"] > Elow
sel2 = reflection_table["Esq"] < Ehigh
Esq_sel = sel1 & sel2
reason = f"in E^2 range ({Ehigh} > E^2 > {Elow})"
return Esq_sel, reason
def calculate_scaling_subset_ranges(reflection_table, params, print_summary=False):
selection, reasons = _common_range_selections(Reasons(), reflection_table, params)
if print_summary:
logger.info(
"%s reflections were preselected for scale factor determination \n"
+ "out of %s suitable reflections: \n%s",
selection.count(True),
reflection_table.size(),
reasons,
)
if selection.count(True) == 0:
raise BadDatasetForScalingException(
"""No reflections pass all user-controllable selection criteria"""
)
return selection
def _common_range_selections(reasons, reflection_table, params):
selection, reason = _determine_Isigma_selection(reflection_table, params)
reasons.add_reason(reason, selection.count(True))
if "partiality" in reflection_table:
sel, reason = _determine_partiality_selection(reflection_table, params)
reasons.add_reason(reason, sel.count(True))
selection &= sel
if params.reflection_selection.d_range:
sel, reason = _determine_d_range_selection(reflection_table, params)
reasons.add_reason(reason, sel.count(True))
selection &= sel
return selection, reasons
def calculate_scaling_subset_ranges_with_E2(reflection_table, params):
"""Select reflections with non-zero weight and update scale weights."""
reasons = Reasons()
selection = ~reflection_table.get_flags(
reflection_table.flags.user_excluded_in_scaling
)
selection &= ~reflection_table.get_flags(
reflection_table.flags.excluded_for_scaling
)
reasons.add_reason("suitable/selected for scaling", selection.count(True))
if reflection_table["Esq"].count(1.0) != reflection_table.size():
sel, reason = _determine_E2_range_selection(reflection_table, params)
reasons.add_reason(reason, sel.count(True))
selection &= sel
sel, reasons = _common_range_selections(reasons, reflection_table, params)
selection &= sel
logger.info(
"%s reflections were selected for scale factor determination \n"
+ "out of %s suitable reflections: \n%s",
selection.count(True),
reflection_table.size(),
reasons,
)
if selection.count(True) == 0:
raise BadDatasetForScalingException(
"""No reflections pass all user-controllable selection criteria"""
)
return selection
|
8443c8be3aa1b0be20d3755901e55f098d2c8370 | green-fox-academy/judashgriff | /Week 3/Day 3/line_play_quarters.py | 1,336 | 3.59375 | 4 | from tkinter import *
root = Tk()
canvas = Canvas(root, width='300', height='300')
canvas.pack()
# divide the canvas into 4 equal parts
# and repeat this pattern in each quarter:
# [https://github.com/greenfox-academy/teaching-materials/blob/master/workshop/drawing/line-play/r1.png]
def make_colorful_lines():
for each in range(15):
canvas.create_line(0, 10 + (each - 1) * 10, 10 + (each - 1) * 10, 150, fill='green')
for each in range(15):
canvas.create_line(150 + (each - 1) * 10, 0, 300, 10 + (each - 1) * 10, fill='purple')
for each in range(15):
canvas.create_line(0, 160 + (each - 1) * 10, 10 + (each - 1) * 10, 300, fill='green')
for each in range(15):
canvas.create_line(150 + (each - 1) * 10, 150, 300, 160 + (each - 1) * 10, fill='purple')
for each in range(15):
canvas.create_line(10 + (each - 1) * 10, 0, 150, 10 + (each - 1) * 10, fill='purple')
for each in range(15):
canvas.create_line(150, 10 + (each - 1) * 10, 160 + (each - 1) * 10, 150, fill='green')
for each in range(15):
canvas.create_line(10 + (each - 1) * 10, 150, 150, 160 + (each - 1) * 10, fill='purple')
for each in range(15):
canvas.create_line(150, 160 + (each - 1) * 10, 160 + (each - 1) * 10, 300, fill='green')
make_colorful_lines()
root.mainloop() |
49926b528cc201dab20b03b51d5475075b80d936 | noahbjohnson/FileTypeCounter | /filecount.py | 4,307 | 3.734375 | 4 | import os
def changedirectory():
cwd = os.getcwd()
print("The current directory is:", cwd)
loop = True
while loop:
changeboolian = input("Would you like to change the working directory? (Y/N)")
if changeboolian == "Y" or changeboolian == "N":
loop = False
else:
print("Please type either 'N' or 'Y' to proceed)")
if changeboolian == "N":
return cwd
else:
wd = input("What would you like to change the directory to? (Must be an absolute path)")
os.chdir(wd)
print("Changed directory!")
return
def getdirectories():
listtoreturn = []
directories = os.listdir(os.getcwd())
for directory in directories:
listtoreturn.append(directory)
return listtoreturn
def getfiles(directories):
filelist = []
originaldirectory = os.getcwd()
for directory in directories:
newWD = originaldirectory + "/" + directory
if os.path.isdir(newWD):
ls = os.listdir(newWD)
for file in ls:
filelist.append(file)
ls = os.listdir(originaldirectory)
for file in ls:
filepath = originaldirectory + "/" + file
if os.path.isfile(filepath):
filelist.append(file)
return filelist
def hasextension(file):
for character in file:
if character == ".":
return True
return False
def getextension(file):
charlist = []
periodLocations = []
for character in file:
charlist.append(character)
for i in range(len(charlist)):
if charlist[i] == ".":
periodLocations.append(i)
index = periodLocations[-1]
return file[-(len(file) - index - 1):]
def getExtensionList(filelist):
extensionlist = []
for file in filelist:
if hasextension(file):
extensionlist.append(getextension(file))
else:
extensionlist.append("NONE")
return extensionlist
def countExtensions(extensionlist):
extDict = {}
extSet = set()
for extension in extensionlist:
if extension not in extSet:
extSet.add(extension)
extDict[extension] = 1
else:
extDict[extension] += 1
return extDict
def writetofile(extensioncount, path):
output = open(path + "/" + "output.csv", 'w')
output.write("File Type,Count\n")
for key in extensioncount:
toprint = str(key) + "," + str(extensioncount[key]) + "\n"
output.write(toprint)
def filecount():
loop = True
while loop: # Include subdirectories query
changeboolian = input("Would you like to include subdirectories? (Y/N)")
if changeboolian == "Y":
directories = getdirectories()
loop = False
elif changeboolian == "N":
directories = [os.getcwd()]
loop = False
else:
print("Please type either 'N' or 'Y' to proceed)")
filelist = getfiles(directories) # get list of all file names
extensionlist = getExtensionList(filelist)
extensioncount = countExtensions(extensionlist)
loop = True
while loop: # Include subdirectories query
changeboolian = input("Would you like to to save the output csv to a different folder? (Y/N)")
if changeboolian == "Y":
path = input("What is the path to the directory? (must be an absolute path)")
loop = False
elif changeboolian == "N":
path = os.getcwd()
loop = False
else:
print("Please type either 'N' or 'Y' to proceed)")
writetofile(extensioncount, path)
return
def main():
loop = True
while loop:
print("Welcome to the Filetype Lister 1.0")
option = input("Type 'R' to run the tool, 'D' to change directory, or 'exit' to exit the tool: ")
if option == "R":
confirm = input("Press enter to continue or type anything else to return to the menu")
if len(confirm) == 0:
filecount()
elif option == "D":
changedirectory()
elif option == "exit":
print("Now exiting the tool, Goodbye.")
exit()
else:
print("Error: Input not recognized, returning to the menu.")
main()
|
c89e1b8f2975b7d16913269d085b0388b489f21d | btatkerson/CIS110 | /Excercises/Ch2/ch2ex06.py | 454 | 3.953125 | 4 | def main():
print("This program calculates the future value of an investment.")
print()
principal=eval(input("Enter the initial principal: "))
apr=eval(input("Enter the annual interest rate(APR): "))
years=eval(input("How many years are you investing? "))
for i in range(years):
principal=principal*(1+apr)
print("The amount in ",years,"year investment is: ",principal)
print("Press <Enter> to quit.")
main()
|
dc3601bc9863866a0a2768e4380c9f5d5521b010 | JoaoFiorelli/ExerciciosCV | /Ex069.py | 759 | 3.640625 | 4 | totalmaioridade = totalhomem = totalmulhernova = 0
while True:
idade = int(input("Qual a idade da pessoa cadastrada? "))
sexo = " "
while sexo not in "MF":
sexo = input("Qual o sexo da pessoa cadastrada [M/F]? ").strip().upper()[0]
if idade > 18:
totalmaioridade += 1
if sexo == "M":
totalhomem += 1
if sexo == "F" and idade < 20:
totalmulhernova += 1
desejo = " "
while desejo not in "SN":
desejo = input("Deseja prosseguir [S/N}? ").strip().upper()[0]
if desejo == "N":
break
print(f"""Foram cadastrados:
-{totalmaioridade} pessoas com mais de 18 anos.
-{totalhomem} homens foram cadastrados.
-{totalmulhernova} mulheres com menos de 20 anos foram cadastradas.""")
|
1ca4c758cd0de961041b90833d7a8337749f5e5a | Wytzepakito/Rosalind | /Expected_offsprings.py | 533 | 3.640625 | 4 | #! /usr/bin/python37
"""
Author: Wytze Gelderloos
Date: 29-7-2019
Calculating expected offspring.
This script calculates the expected number of offspring having the dominant
phenotype. Looking to Mendel's diagrams will give the proper answer for this
question.
"""
string= "18852 19128 19528 18742 17855 19195"
numbers = list(map(int,string.split(" ")))
new_pop=0
new_pop += numbers[0]*2
new_pop += numbers[1]*2
new_pop += numbers[2]*2
new_pop += numbers[3]*2*0.75
new_pop += numbers[4]*2*0.5
new_pop += numbers[5]*2*0
print(new_pop)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.