text stringlengths 37 1.41M |
|---|
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def getIntersectionNode(self, headA, headB):
"""
:type head1, head1: ListNode
:rtype: ListNode
"""
dummyA, dummyB = headA, headB
lenA, lenB = 0, 0
while dummyA:
lenA += 1
dummyA = dummyA.next
while dummyB:
lenB += 1
dummyB = dummyB.next
(short, long) = (headA, headB) if lenA < lenB else (headB, headA)
diff = abs(lenA - lenB)
while diff:
long = long.next
diff -= 1
while short and long:
if short == long:
return short
else:
short, long = short.next, long.next |
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def minDepth(self, root):
"""
:type root: TreeNode
:rtype: int
"""
self.depth = 0
def dfs(root, depth):
if not root: return
if not root.left and not root.right:
self.depth = depth if not self.depth else min(self.depth, depth)
return
dfs(root.left, depth + 1)
dfs(root.right, depth + 1)
dfs(root, 1)
return self.depth |
class RomanToInt(object):
def romanToInt(self, s):
"""
:type s: str
:rtype: int
"""
dict = {"M": 1000, "CM": 900, "D": 500,
"CD": 400, "C": 100, "XC": 90,
"L": 50, "XL": 40, "X": 10, "IX": 9,
"V": 5, "IV": 4, "I": 1}
i, res = 0, 0
while i + 1 < len(s):
if s[i:i + 2] in dict:
res += dict[s[i:i + 2]]
i += 2
elif s[i] in dict:
res += dict[s[i]]
i += 1
else:
return 0
if i == len(s) - 1 and s[i] in dict:
res += dict[s[i]]
return res |
class Solution:
def addDigits(self, num):
"""
:type num: int
:rtype: int
"""
if not num:
return 0
res = num % 9
if not res: return 9
else: return res |
def else_example(item_list):
for item in item_list:
if item == 'banana':
return 'Banana found'
else:
print(ValueError('No banana flavor found!'))
my_list_1 = ['apple', 'banana', 'peach']
my_list_2 = ['apple', 'peach', 'blueberry']
print('my_list_1:')
print(else_example(my_list_1))
print('my_list_2:')
print(else_example(my_list_2), '\n')
try:
else_example(my_list_2)
except ValueError:
print('no banana found')
else:
print('banana tear-down after ValueError raised')
|
from collections import namedtuple
Result = namedtuple('Result', 'count average')
# the subgenerator:
def averager():
total = 0.0
count = 0
average = None
while True:
term = yield
if term is None:
break
total += term
count += 1
average = total/count
return Result(count, average)
# the delegating generator
def grouper(results, key):
while True:
results[key] = yield from averager()
# the client code, a.k.a. the caller
def main(data):
results = {}
for key, values in data.items():
group = grouper(results, key)
next(group)
for value in values:
group.send(value)
group.send(None) # important!
# print (results) # uncomment to debug
report(results)
# output report
def report(results):
for key, result in sorted(results.items()):
group, unit = key.split(';')
print(f'{result.count:2} {group:5} averaging {result.average:.2f} {unit}')
|
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 1 15:55:08 2018
@author: RickyLi
"""
import numpy as np
def factors( n ):
fs = []
n = int(n)
for i in range(2,n):
if n % i == 0.0:
fs.append(i)
#print(i)
return fs
def fraction( n ):
n_str = str( n )
decimal_part = n_str[ n_str.find( '.' )+1: ]
# 1. Multiply by ten repeatedly.
numer = n * 10 ** len(decimal_part)
denom = float(10 ** len(decimal_part))
# 2. Find factors.
numer_factors = factors(numer)
denom_factors = factors(denom)
max = []
# factor = 1
for f in numer_factors:# ??? find greatest common factor of both numerator and denominator
if f in denom_factors:
max.append(f)
f = np.max(max)
numer = numer/f
denom = denom/f
#denom_factors.remove(f)# ??? divide both by GCF before returning them
return ( numer,denom )
print(fraction(88)) |
import numpy as np
import time
from sklearn.naive_bayes import BernoulliNB
from sklearn.naive_bayes import MultinomialNB
from sklearn.svm import LinearSVC
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
# You may use this function as you like.
error = lambda y, yhat: np.mean(y != yhat)
class Question1(object):
# The sequence in this problem is different from the one you saw in the jupyter notebook. This makes it easier to grade. Apologies for any inconvenience.
def BernoulliNB_classifier(self, traindata, trainlabels, valdata, vallabels):
""" Train and evaluate a BernoulliNB classifier using the given data.
Parameters:
1. traindata (Nt, d) numpy ndarray. The features in the training set.
2. trainlabels (Nt, ) numpy ndarray. The labels in the training set.
3. valdata (Nv, d) numpy ndarray. The features in the validation set.
4. vallabels (Nv, ) numpy ndarray. The labels in the validation set.
Outputs:
1. classifier The classifier already trained on the training data.
2. trainingError Float. The reported training error. It should be less than 1.
3. validationError Float. The reported validation error. It should be less than 1.
4. fittingTime Float. The time it takes to fit the classifier (i.e. time to perform xxx.fit(X,y)). This is not evaluated.
5. valPredictingTime Float. The time it takes to run the classifier on the validation data (i.e. time to perform xxx.predict(X,y)). This is not evaluated.
You can ignore all errors, if any.
"""
# Put your code below
classifier = BernoulliNB()
fit_start = time.time()
classifier.fit(traindata, trainlabels)
fit_end = time.time()
fittingTime = fit_end - fit_start
esttrlabels = classifier.predict(traindata)
trainingError = error(esttrlabels, trainlabels)
val_start = time.time()
estvallabels = classifier.predict(valdata)
val_end = time.time()
valPredictingTime = val_end - val_start
validationError = error(estvallabels, vallabels)
# Do not change this sequence!
return (classifier, trainingError, validationError, fittingTime, valPredictingTime)
def MultinomialNB_classifier(self, traindata, trainlabels, valdata, vallabels):
""" Train and evaluate a MultinomialNB classifier using the given data.
Parameters:
1. traindata (Nt, d) numpy ndarray. The features in the training set.
2. trainlabels (Nt, ) numpy ndarray. The labels in the training set.
3. valdata (Nv, d) numpy ndarray. The features in the validation set.
4. vallabels (Nv, ) numpy ndarray. The labels in the validation set.
Outputs:
1. classifier The classifier already trained on the training data.
2. trainingError Float. The reported training error. It should be less than 1.
3. validationError Float. The reported validation error. It should be less than 1.
4. fittingTime Float. The time it takes to fit the classifier (i.e. time to perform xxx.fit(X,y)). This is not evaluated.
5. valPredictingTime Float. The time it takes to run the classifier on the validation data (i.e. time to perform xxx.predict(X,y)). This is not evaluated.
You can ignore all errors, if any.
"""
# Put your code below
classifier = MultinomialNB()
fit_start = time.time()
classifier.fit(traindata, trainlabels)
fit_end = time.time()
fittingTime = fit_end - fit_start
esttrlabels = classifier.predict(traindata)
trainingError = error(esttrlabels, trainlabels)
val_start = time.time()
estvallabels = classifier.predict(valdata)
val_end = time.time()
valPredictingTime = val_end - val_start
validationError = error(estvallabels, vallabels)
# Do not change this sequence!
return (classifier, trainingError, validationError, fittingTime, valPredictingTime)
def LinearSVC_classifier(self, traindata, trainlabels, valdata, vallabels):
""" Train and evaluate a LinearSVC classifier using the given data.
Parameters:
1. traindata (Nt, d) numpy ndarray. The features in the training set.
2. trainlabels (Nt, ) numpy ndarray. The labels in the training set.
3. valdata (Nv, d) numpy ndarray. The features in the validation set.
4. vallabels (Nv, ) numpy ndarray. The labels in the validation set.
Outputs:
1. classifier The classifier already trained on the training data.
2. trainingError Float. The reported training error. It should be less than 1.
3. validationError Float. The reported validation error. It should be less than 1.
4. fittingTime Float. The time it takes to fit the classifier (i.e. time to perform xxx.fit(X,y)). This is not evaluated.
5. valPredictingTime Float. The time it takes to run the classifier on the validation data (i.e. time to perform xxx.predict(X,y)). This is not evaluated.
You can ignore all errors, if any.
"""
# Put your code below
classifier = LinearSVC()
fit_start = time.time()
classifier.fit(traindata, trainlabels)
fit_end = time.time()
fittingTime = fit_end - fit_start
esttrlabels = classifier.predict(traindata)
trainingError = error(esttrlabels, trainlabels)
val_start = time.time()
estvallabels = classifier.predict(valdata)
val_end = time.time()
valPredictingTime = val_end - val_start
validationError = error(estvallabels, vallabels)
# Do not change this sequence!
return (classifier, trainingError, validationError, fittingTime, valPredictingTime)
def LogisticRegression_classifier(self, traindata, trainlabels, valdata, vallabels):
""" Train and evaluate a LogisticRegression classifier using the given data.
Parameters:
1. traindata (Nt, d) numpy ndarray. The features in the training set.
2. trainlabels (Nt, ) numpy ndarray. The labels in the training set.
3. valdata (Nv, d) numpy ndarray. The features in the validation set.
4. vallabels (Nv, ) numpy ndarray. The labels in the validation set.
Outputs:
1. classifier The classifier already trained on the training data.
2. trainingError Float. The reported training error. It should be less than 1.
3. validationError Float. The reported validation error. It should be less than 1.
4. fittingTime Float. The time it takes to fit the classifier (i.e. time to perform xxx.fit(X,y)). This is not evaluated.
5. valPredictingTime Float. The time it takes to run the classifier on the validation data (i.e. time to perform xxx.predict(X,y)). This is not evaluated.
You can ignore all errors, if any.
"""
# Put your code below
classifier = LogisticRegression()
fit_start = time.time()
classifier.fit(traindata, trainlabels)
fit_end = time.time()
fittingTime = fit_end - fit_start
esttrlabels = classifier.predict(traindata)
trainingError = error(esttrlabels, trainlabels)
val_start = time.time()
estvallabels = classifier.predict(valdata)
val_end = time.time()
valPredictingTime = val_end - val_start
validationError = error(estvallabels, vallabels)
# Do not change this sequence!
return (classifier, trainingError, validationError, fittingTime, valPredictingTime)
def NN_classifier(self, traindata, trainlabels, valdata, vallabels):
""" Train and evaluate a Nearest Neighbor classifier using the given data.
Make sure to modify the default parameter.
Parameters:
1. traindata (Nt, d) numpy ndarray. The features in the training set.
2. trainlabels (Nt, ) numpy ndarray. The labels in the training set.
3. valdata (Nv, d) numpy ndarray. The features in the validation set.
4. vallabels (Nv, ) numpy ndarray. The labels in the validation set.
Outputs:
1. classifier The classifier already trained on the training data.
2. trainingError Float. The reported training error. It should be less than 1.
3. validationError Float. The reported validation error. It should be less than 1.
4. fittingTime Float. The time it takes to fit the classifier (i.e. time to perform xxx.fit(X,y)). This is not evaluated.
5. valPredictingTime Float. The time it takes to run the classifier on the validation data (i.e. time to perform xxx.predict(X,y)). This is not evaluated.
You can ignore all errors, if any.
"""
# Put your code below
classifier = KNeighborsClassifier(n_neighbors=1)
fit_start = time.time()
classifier.fit(traindata, trainlabels)
fit_end = time.time()
fittingTime = fit_end - fit_start
esttrlabels = classifier.predict(traindata)
trainingError = error(esttrlabels, trainlabels)
val_start = time.time()
estvallabels = classifier.predict(valdata)
val_end = time.time()
valPredictingTime = val_end - val_start
validationError = error(estvallabels, vallabels)
# Do not change this sequence!
return (classifier, trainingError, validationError, fittingTime, valPredictingTime)
def confMatrix(self, truelabels, estimatedlabels):
""" Write a function that calculates the confusion matrix (cf. Fig. 2.1 in the notes).
You may wish to read Section 2.1.1 in the notes -- it may be helpful, but is not necessary to complete this problem.
Parameters:
1. truelabels (Nv, ) numpy ndarray. The ground truth labels.
2. estimatedlabels (Nv, ) numpy ndarray. The estimated labels from the output of some classifier.
Outputs:
1. cm (2,2) numpy ndarray. The calculated confusion matrix.
"""
cm = np.zeros((2, 2))
# Put your code below
tp, fp, fn, tn = 0, 0, 0, 0
for idx in range(len(truelabels)):
if truelabels[idx] == estimatedlabels[idx]:
if truelabels[idx] == 1:
tp += 1
elif truelabels[idx] == -1:
tn += 1
else:
if truelabels[idx] == 1:
fn += 1
elif truelabels[idx] == -1:
fp += 1
cm = np.array([[tp, fp], [fn, tn]])
return cm
def classify(self, traindata, trainlabels, testdata, testlabels):
""" Run the classifier you selected in the previous part of the problem on the test data.
Parameters:
1. traindata (Nt, d) numpy ndarray. The features in the training set.
2. trainlabels (Nt, ) numpy ndarray. The labels in the training set.
3. testdata (Nte, d) numpy ndarray. The features in the test set.
4. testlabels (Nte, ) numpy ndarray. The labels in the test set.
Outputs:
1. classifier The classifier already trained on the training data.
2. testError Float. The reported test error. It should be less than 1.
3. confusionMatrix (2,2) numpy ndarray. The resulting confusion matrix. This will not be graded.
"""
# You can freely use the following line
# confusionMatrix = self.confMatrix(testlabels, est_labels)
# Put your code below
classifier, trainingError, testError, fittingTime, testPredictingTime = \
self.LogisticRegression_classifier(traindata, trainlabels, testdata, testlabels)
est_labels = classifier.predict(testdata)
confusionMatrix = self.confMatrix(testlabels, est_labels)
# Do not change this sequence!
return (classifier, testError, confusionMatrix)
class Question2(object):
def crossValidationkNN(self, traindata, trainlabels, k):
""" Write a function which implements 5-fold cross-validation to estimate the error of a classifier with cross-validation with the 0,1-loss for k-Nearest Neighbors (kNN).
For this problem, take your folds to be 0:N/5, N/5:2N/5, ..., 4N/5:N for cross-validation.
Parameters:
1. traindata (Nt, d) numpy ndarray. The features in the training set.
2. trainlabels (Nt, ) numpy ndarray. The labels in the training set.
3. k Integer. The cross-validated error estimates will be outputted for 1,...,k.
Outputs:
1. err (k+1,) numpy ndarray. err[i] is the cross-validated estimate of using i neighbors (the zero-th component of the vector will be meaningless).
"""
# Put your code below
err = np.ones(k+1)
group_size = len(traindata) // 5
for j in range(1, k+1):
err_sum = 0.0
classifier = KNeighborsClassifier(n_neighbors=j)
for i in range(5):
tdata = np.concatenate((traindata[:i*group_size, :], traindata[(i+1)*group_size:, :]), axis=0)
tlabels = np.append(trainlabels[:i*group_size], trainlabels[(i+1)*group_size:])
vdata = traindata[i*group_size: (i+1)*group_size, :]
vlabels = trainlabels[i*group_size: (i+1)*group_size]
classifier.fit(tdata, tlabels)
estvallabels = classifier.predict(vdata)
err_sum += error(estvallabels, vlabels)
err[j] = err_sum / 5
return err
def minimizer_K(self, traindata, trainlabels, k):
""" Write a function that calls the above function and returns
1) the output from the previous function,
2) the number of neighbors within 1,...,k that minimizes the cross-validation error, and
3) the correponding minimum error.
Parameters:
1. traindata (Nt, d) numpy ndarray. The features in the training set.
2. trainlabels (Nt, ) numpy ndarray. The labels in the training set.
3. k Integer. The cross-validated error estimates will be outputted for 1,...,k.
Outputs:
1. err (k+1,) numpy ndarray. The output from crossValidationkNN().
2. k_min Integer (np.int64 or int). The number of neighbors within 1,...,k that minimizes the cross-validation error.
3. err_min Float. The correponding minimum error.
"""
err = self.crossValidationkNN(traindata, trainlabels, k)
# Put your code below
k_min = np.argmin(err)
err_min = np.min(err)
# Do not change this sequence!
return (err, k_min, err_min)
def classify(self, traindata, trainlabels, testdata, testlabels):
""" Train a kNN model on the whole training data using the number of neighbors you found in the previous part
of the question, and apply it to the test data.
Parameters:
1. traindata (Nt, d) numpy ndarray. The features in the training set.
2. trainlabels (Nt, ) numpy ndarray. The labels in the training set.
3. testdata (Nte, d) numpy ndarray. The features in the test set.
4. testlabels (Nte, ) numpy ndarray. The labels in the test set.
Outputs:
1. classifier The classifier already trained on the training data. Use the best k value that you choose.
2. testError Float. The reported test error. It should be less than 1.
"""
# Put your code below
err, k_min, err_min = self.minimizer_K(traindata, trainlabels, 30)
classifier = KNeighborsClassifier(n_neighbors=k_min)
classifier.fit(traindata, trainlabels)
esttestlabels = classifier.predict(testdata)
testError = error(esttestlabels, testlabels)
# Do not change this sequence!
return (classifier, testError)
from sklearn.svm import SVC
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
class Question3(object):
def LinearSVC_crossValidation(self, traindata, trainlabels):
""" Use cross-validation to select a value of C for a linear SVM by varying C from 2^{-5},...,2^{15}.
You should seaerch by hand.
Parameters:
1. traindata (Nt, d) numpy ndarray. The features in the training set.
2. trainlabels (Nt, ) numpy ndarray. The labels in the training set.
Outputs:
1. C_min Float. The hyper-parameter C that minimizes the validation error.
2. min_err Float. The correponding minimum error.
"""
# Put your code below
errors = [] # errors for each C
for c in range(-5, 16):
classifier = LinearSVC(C=2.0**c)
errs = 1 - cross_val_score(classifier, traindata, trainlabels, cv=10) # errors for each group
err = np.sum(errs) / 10
errors.append(err)
# print(errors)
idx_min = np.argmin(errors)
C_min = 2.0 ** (idx_min - 5.0)
min_err = errors[idx_min]
# Do not change this sequence!
return (C_min, min_err)
def SVC_crossValidation(self, traindata, trainlabels):
""" Use cross-validation to select a value of C for a linear SVM by varying C from 2^{-5},...,2^{15}
and \gamma from 2^{-15},...,2^{3}.
Use GridSearchCV to perform a grid search.
Parameters:
1. traindata (Nt, d) numpy ndarray. The features in the training set.
2. trainlabels (Nt, ) numpy ndarray. The labels in the training set.
Outputs:
1. C_min Float. The hyper-parameter C that minimizes the validation error.
2. gamma_min Float. The hyper-parameter \gamma that minimizes the validation error.
3. min_err Float. The correponding minimum error.
"""
# Put your code below
params = {
'C': [2**c for c in range(-5, 16)],
'gamma': [2**g for g in range(-15, 4)]
}
classifier = SVC()
grid_search = GridSearchCV(classifier, params, cv=10, scoring='accuracy')
grid_search.fit(traindata, trainlabels)
best_params = grid_search.best_params_
C_min = best_params['C']
gamma_min = best_params['gamma']
min_err = 1 - grid_search.best_score_
# Do not change this sequence!
return (C_min, gamma_min, min_err)
def LogisticRegression_crossValidation(self, traindata, trainlabels):
""" Use cross-validation to select a value of C for a linear SVM by varying C from 2^{-14},...,2^{14}.
You may either use GridSearchCV or search by hand.
Parameters:
1. traindata (Nt, d) numpy ndarray. The features in the training set.
2. trainlabels (Nt, ) numpy ndarray. The labels in the training set.
Outputs:
1. C_min Float. The hyper-parameter C that minimizes the validation error.
2. min_err Float. The correponding minimum error.
"""
# Put your code below
params = {
'C': [2**c for c in range(-14, 15)]
}
classifier = LogisticRegression()
grid_search = GridSearchCV(classifier, params, cv=10, scoring='accuracy')
grid_search.fit(traindata, trainlabels)
C_min = grid_search.best_params_['C']
min_err = 1 - grid_search.best_score_
# Do not change this sequence!
return (C_min, min_err)
def classify(self, traindata, trainlabels, testdata, testlabels):
""" Train the best classifier selected above on the whole training set.
Parameters:
1. traindata (Nt, d) numpy ndarray. The features in the training set.
2. trainlabels (Nt, ) numpy ndarray. The labels in the training set.
3. testdata (Nte, d) numpy ndarray. The features in the test set.
4. testlabels (Nte, ) numpy ndarray. The labels in the test set.
Outputs:
1. classifier The classifier already trained on the training data. Use the best classifier that you choose.
2. testError Float. The reported test error. It should be less than 1.
"""
# Put your code below
# running the above 3 classifiers turns out that the SVC is the best classifier
classifier = SVC(C=8, gamma=0.125)
classifier.fit(traindata, trainlabels)
testError = 1 - classifier.score(testdata, testlabels)
# Do not change this sequence!
return (classifier, testError)
|
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 12 20:09:59 2018
@author: RickyLi
"""
import numpy as np
def mc_pi(n):
xy = np.random.random((n,2))*2-1
d = (xy[:,0]**2 + xy[:,1]**2)**0.5 - 1
d = np.maximum(d,0)
return d
print(mc_pi(10)) |
from Classes.Conta import Conta
import json
response = requests.get("http://jsonplaceholder.typicode.com/comments")
print(response.status_code)
class Pessoa():
def __init__(self, nome, altura, idade):
self.conta = Conta()
self.__nome = nome
self.__altura = altura
self.__idade = idade
self.__stringMsg = "Nome: {0} Idade: {1} Altura: {2}"
def getStringPessoa(self):
return self.__stringMsg.format(self.__nome, self.__idade, self.__altura)
listPessoa = []
while True:
nome = input("Digite um nome: ")
idade = int(input("Digite a idade: "))
altura = float(input("Digite a altura"))
p = Pessoa(nome, altura, idade)
listPessoa.append(p)
p.conta.Deposito(float(input("Informe o valor do deposito")))
print(p.conta.GetSaldo())
if(input("Continuar? [s/n]") == "n"):
break
for __p in listPessoa:
print(__p.getStringPessoa())
|
#!/usr/bin/env python3
# -*- coding: utf -8-*-
import sys
def reverse_iterative(sequence, left, right):
""" Funkcja iteratywnie odwraca sekwencję sequence od liczby o indeksie left do right wlacznie
>>> reverse_iterative([1,2,3,4,5,6], 1, 4)
[1, 5, 4, 3, 2, 6]
>>> reverse_iterative([10,9,8,7,6,5,4,3,2,1], 0, 6)
[4, 5, 6, 7, 8, 9, 10, 3, 2, 1]
"""
return(sequence[:left] + sequence[left: right+1][::-1] + sequence[right+1:])
def reverse_recursive(sequence, left, right):
""" Funkcja rekursywnie odwraca sekwencję sequence od liczby o indeksie left fo right wlacznie
>>> reverse_recursive([1,2,3,4,5,6], 1, 4)
[1, 5, 4, 3, 2, 6]
>>> reverse_recursive([10, 9, 8, 7, 6, 5, 4, 3, 2, 1], 0, 6)
[4, 5, 6, 7, 8, 9, 10, 3, 2, 1]
"""
if right <= left:
return sequence
sequence[left], sequence[right] = sequence[right], sequence[left]
return reverse_recursive(sequence, left+1, right-1)
if __name__ == '__main__':
# testy
import doctest
doctest.testmod()
|
#!usr/bin/env python3
# -*- coding: utf -8-*-
import sys
N = 100000
class Node:
"""Klasa reprezentująca węzeł listy dwukierunkowej."""
def __init__(self, data=None, next=None, prev=None):
self.data = data
self.next = next
self.prev = prev
def __str__(self):
return str(self.data)
class DoubleList:
"""Klasa reprezentująca całą listę dwukierunkową."""
def __init__(self):
self.length = 0
self.head = None
self.tail = None
def is_empty(self):
# return self.length == 0
return self.head is None
def count(self):
return self.length
def insert_head(self, node):
if self.head:
node.next = self.head
self.head.prev = node # stary head
self.head = node # nowy head
else: # pusta lista
self.head = node
self.tail = node
self.length += 1
def insert_tail(self, node):
if self.tail:
node.prev = self.tail
self.tail.next = node # stary tail
self.tail = node # nowy tail
else: # pusta lista
self.head = node
self.tail = node
self.length += 1
def remove_head(self): # zwraca node
if self.head is None:
raise ValueError("pusta lista")
elif self.head is self.tail:
node = self.head
self.head = None
self.tail = None
self.length = 0
return node
else:
node = self.head
self.head = self.head.next
self.head.prev = None # czyszczenie
self.length -= 1
return node
def remove_tail(self): # zwraca node
if self.head is None:
raise ValueError("pusta lista")
elif self.head is self.tail:
node = self.tail
self.head = None
self.tail = None
self.length = 0
return node
else:
node = self.tail
self.tail = self.tail.prev
self.tail.next = None # czyszczenie
self.length -= 1
return node
def find_max(self):
# Zwraca łącze do węzła z największym kluczem.
if(self.length == 0):
raise ValueError("Pusta lista")
tmp = self.head
node = self.head
while(node).next:
if(node.next.data > tmp.data):
tmp = node.next
node = node.next
return tmp
def find_min(self):
# Zwraca łącze do węzła z najmniejszym kluczem.
if(self.length == 0):
raise ValueError("Pusta lista")
tmp = self.head
node = self.head
while(node.next):
if(node.next.data < tmp.data):
tmp = node.next
node = node.next
return tmp
def remove(self, node):
# Usuwa wskazany węzeł z listy.
if self.head is None:
raise ValueError("pusta lista")
poprzedni = node.prev
nastepny = node.next
poprzedni.next = nastepny
nastepny.prev = poprzedni
node.data = None
node.prev = None
node.nect = None
self.length -=1
def clear(self): # czyszczenie list
node = self.head
while(node):
node.prev = None
node.data = None
node = node.next
def output(self):
node = self.head
while node is not None:
print(node.data)
node = node.next
if __name__ == '__main__':
lista = DoubleList()
pierwszy = Node(2)
drugi = Node(8)
trzeci = Node (7)
czwarty = Node(13)
piaty = Node(1)
szosty = Node(4)
lista.insert_head(pierwszy)
lista.insert_tail(drugi)
lista.insert_tail(trzeci)
lista.insert_tail(czwarty)
lista.insert_tail(piaty)
lista.insert_tail(szosty)
assert(lista.find_min() == piaty)
assert(lista.find_max() == czwarty)
lista.remove(drugi)
assert(lista.count() == 5)
lista.output()
|
#!/usr/bin/env python3
# -*- coding: utf -8-*-
import sys
def flatten(sequence):
""" Funkcja zwraca splaszczoa liste wszystkich elementow z sekwencji
>>> flatten([1, [2, [3]]])
[1, 2, 3]
>>> flatten([1,(2,3),[],[4,(5,6,7)],8,[9]])
[1, 2, 3, 4, 5, 6, 7, 8, 9]
"""
out = []
for item in sequence:
if isinstance(item, (list, tuple)):
out += flatten(item)
else:
out.append(item)
return out
if __name__ == '__main__':
# testy
import doctest
doctest.testmod()
|
#asking for user input
string = input("Please Enter your own single word: ")
#what is that below? it's slicing and when you write it this way
#it starts from the end towards the first, using index values
if(string == string[:: -1]):
print("True")
else:
print("False") |
import random
# the question/answer dictionary.
my_dict = {
"What is my first name:": "Gift",
"Where do I live(county):": "Mombasa",
"Which HighSchool do I go to": "Makueni Boys",
"How old am I:": "18",
"Which form am I": "4",
"What do I love the most in this world": "Computers",
"Do you think I love you": "Yes",
"Which primary school did I go to": "Brights Academy",
"Am I tall": "Yes",
"Am I dark or light skinned": "Dark",
"Do you love my game": "Yes",
"What is my favorite colour": "Black"
}
# the questions
name = input("What is your name?\n")
print("Do you think you know me well? " + name + "!\n")
print("Lets see! Answer all these questions about me correctly. \n"
"=============================================================")
print("0 = You're a stranger.\n"
"1 = Really!!!\n"
"2 = And you claim to know me..\n"
"3 = We need to hang out more\n"
"4 = You know me alright.\n"
"5 = Meet me for pizza.. " + name + "\n")
print("Note: There are 12 questions in total \n "
"HAVE FUN!\n "
"***********\n")
playing = True
# the quiz will end when this variable becomes 'False'
while playing:
# set score to 0 initially
score = 0
# gets the number of questions the player wants to answer
num = int(input("\nHow many questions would you like: \n"))
# loop the correct number of times
for i in range(num):
# the question is one of the dictionary keys, picked at random
question = (random.choice(list(my_dict.keys())))
# the answer is the string mapped to the question key
answer = my_dict[question]
# print the question, along with the question number
print("\nQuestion " + str(i + 1))
print(question + "? ")
# get the user's answer attempt
guess = input("> ")
# if their guess is the same as the answer
if guess.lower() == answer.lower():
# add 1 to the score and print a message
print("Correct! ")
score += 1
else:
print(" Nope! " + "Answer is " + answer)
# after the quiz, print their final score
print("\nYour final score was " + str(score))
"\n"
if score == 0:
print("You're a stranger")
elif score == 1:
print("Really!")
elif score == 2:
print("And you claim to know me")
elif score == 3:
print("We should hang out more " + name)
elif score == 4:
print("You know me alright")
elif score > 5:
print("Meet me for pizza.. " + name)
"\n"
# store the user's input...
again = input("Enter any key to play again, or 'q' to quit.")
# ...and quit if they types 'q'
if again.lower() == 'q':
playing = False
|
import ctypes
class Array:
"""
Class that represents low-level array data structure
"""
def __init__(self, size):
"""
Initializes an array with size
:param size: int
"""
if size <= 0:
raise ValueError
self._size = size
py_array_type = ctypes.py_object * size
self._elements = py_array_type()
self.clear(None)
def __len__(self):
"""
Returns the size of array
:return: int
"""
return self._size
def __getitem__(self, index):
"""
Returns an item by index
:param index: int
:return: object
"""
if index < 0 or index >= self._size:
raise IndexError
return self._elements[index]
def __setitem__(self, index, value):
"""
Sets the item by index
:param index: int
:param value: object
:return:
"""
if index < 0 or index >= self._size:
raise IndexError
self._elements[index] = value
def clear(self, value):
"""
Fills the array with value
:param value: object
:return:
"""
for i in range(len(self)):
self._elements[i] = value
def __iter__(self):
"""
Returns the iterator for array
:return: iterator
"""
return _ArrayIterator(self._elements)
class _ArrayIterator:
"""
Iterator of 2D array
"""
def __init__(self, the_array):
"""
Initializes an iterator with parent array
:param the_array: Array
"""
self._array_ref = the_array
self._cur_index = 0
def __iter__(self):
return self
def __next__(self):
"""
Returns nex element or raises StopIteration exception
:return:
"""
if self._cur_index < len(self._array_ref):
entry = self._array_ref[self._cur_index]
self._cur_index += 1
return entry
else:
raise StopIteration
class Array2D:
"""
Class that represents 2D Array data structure
"""
def __init__(self, num_rows, num_cols):
"""
Initializes the 2d array with rows (arrays
:param num_rows: int
:param num_cols: int
"""
self.rows = Array(num_rows)
for i in range(num_rows):
self.rows[i] = Array(num_cols)
def num_rows(self):
"""
Returns the number of rows
:return: int
"""
return len(self.rows)
def num_cols(self):
"""
Returns the number of columns
:return: int
"""
return len(self.rows[0])
def clear(self, value):
"""
Fills the array with value
:param value: object
:return:
"""
for row in self.rows:
row.clear(value)
def __getitem__(self, index_tuple):
"""
Returns the item by position
:param index_tuple: position (int, int)
:return:
"""
if len(index_tuple) != 2:
raise ValueError
row = index_tuple[0]
col = index_tuple[1]
if (not 0 <= row < self.num_rows()) or (not 0 <= col < self.num_cols()):
raise IndexError
array_1d = self.rows[row]
return array_1d[col]
def __setitem__(self, index_tuple, value):
"""
Sets the item by position
:param index_tuple: position (int, int)
:param value: object
:return:
"""
if len(index_tuple) != 2:
raise ValueError
row = index_tuple[0]
col = index_tuple[1]
if (not 0 <= row < self.num_rows()) or (not 0 <= col < self.num_cols()):
raise IndexError
array_1d = self.rows[row]
array_1d[col] = value
def __iter__(self):
"""
Returns the array iterator for rows
:return: iterator
"""
return _ArrayIterator(self.rows)
|
STACK = False
def main(args):
args = map(lambda arg: arg if arg[-1:] == '"' and arg[0] == '"' else 'str({0})'.format(arg),
args)
return '"".join([{0}])'.format(','.join(args))
|
data_A = []
data_B = []
count = 0
while True:
add_data_A = int(input('Enter numbers between 0 - 100 : '))
add_data_B = int(input('Enter numbers between 0 - 100 : '))
count += 1
if add_data_A < 0 or add_data_A > 100:
del add_data_A[count]
print('Invalid input.')
count -= 1
elif add_data_B < 0 or add_data_B > 100:
del add_data_B[count]
print('Invalid input.')
count -= 1
else:
data_A.append(add_data_A)
data_B.append(add_data_B)
pass
ask = str(input('Add new data? y/n'))
if ask.lower() == 'n':
break
else:
continue
for x in data_A:
for y in data_B:
if x != y:
data_A.append(x)
else:
pass
print(seen)
|
from Car import Cars
class Trucks(Cars):
def __init__(self):
super().__init__()
self.type = 'Truck'
self.weight = 0
def in_information(self, line):
self.weight = line[1]
self.engine_power = line[2]
self.fuel_consumption = line[3]
self.global_weight = line[1]
self.ratio_calc()
def display(self, file, i):
file.write(str(i) + '.Грузовик' + "\n")
file.write('грузоподъёмность: ' + str(self.weight) + "\n")
def display_filtr(self, file, i):
file.write(str(i) + '.Грузовик' + "\n")
file.write('грузоподъёмность: ' + str(self.weight) + "\n")
file.write('мощность двигателя: ' + str(self.engine_power) + "\n")
file.write('расход топлива: ' + str(self.fuel_consumption) + "\n")
file.write('отношение веса груза к мощности двигателя: ' + str(self.ratio) + "\n")
|
# In DNA strings, symbols "A" and "T" are complements of each other, as "C" and "G". You have function with one side of the DNA (string, except for Haskell); you need to get the other complementary side. DNA strand is never empty or there is no DNA at all (again, except for Haskell).
def DNA_strand(dna):
comp = ''
dna = list(dna)
for x in dna:
if x == 'A':
comp += 'T'
elif x == 'T':
comp += 'A'
elif x == 'C':
comp += 'G'
else :
comp += 'C'
return comp
|
Pandas series data structure
======================================================================================================================================
- one dimensional labeled array object (No column headers)
>>> c = pd.Series([1,2,3])
>>>
>>> c
0 1
1 2
2 3
dtype: int64
>>>
>>> c
0 1
1 2
2 3
dtype: int64
>>>
>>> c = pd.Series((1,2,3))
>>> c
0 1
1 2
2 3
dtype: int64
>>> numpy.array
<built-in function array>
>>> c = pd.Series({1:2})
>>> c
1 2
dtype: int64
We can define the row indexes as well
==============================================
>>> a = pd.Series({1:5,2:10,5:47,6:40,9:13})
>>> a
1 5
2 10
5 47
6 40
9 13
dtype: int64
We can select rows based on some indexes
=============================================
>>> a[[1,5]]
1 5
5 47
dtype: int64
>>> b = [1,5,9]
>>> a[b]
1 5
5 47
9 13
dtype: int64
Modifying/updating
Pandas series object can be processed as a dictionary
=====================================================
>>> obj = a = pd.Series([12,34], index = ['a','b'])
>>> 'a' in obj
True
>>> 'e' in obj
False
-- we can convert a python dictionary easily to a pandas series object
>>> dic = {'Ram':'Shyam', 'Jodu':'Modhu'}
>>> a = pd.Series(dic)
>>> a
Ram Shyam
Jodu Modhu
dtype: object
-- and the vice versa is also true
>>> back = dict(a)
>>> back
{'Ram': 'Shyam', 'Jodu': 'Modhu'}
-- checking whether the series object has any NaN/null values
using pandas "isnull" and ""notnull" method
>>> a = [1,2, float('NaN')]
>>>
>>> a
[1, 2, nan]
>>>
>>> pd.isnull(pd.Series(a))
0 False
1 False
2 True
dtype: bool
>>> pd.notnull(b)
0 True
1 True
2 False
dtype: bool
-- using series's notnull method
>>> b = pd.Series(a)
>>> b
0 1.0
1 2.0
2 NaN
dtype: float64
>>>
>>> b.notnull()
0 True
1 True
2 False
dtype: bool
-- adding up two series - will add based on the indexes of the objects (this is more like joining the series objects
where the joining the column is the index)
>>> obj1 = pd.Series([10,20,30], index = ['Utah', 'Ohio', 'Kentucky'])
>>> obj1
Utah 10
Ohio 20
Kentucky 30
dtype: int64
>>>
>>> obj2 = pd.Series([13,43,57], index = ['Utah', 'NYC', 'Ohio'])
>>> obj2
Utah 13
NYC 43
Ohio 57
dtype: int64
>>>
>>> obj1 + obj2
Kentucky NaN
NYC NaN
Ohio 77.0
Utah 23.0
dtype: float64
-- we can name the series index and the values
>>> obj3
Kentucky NaN
NYC NaN
Ohio 77.0
Utah 23.0
dtype: float64
>>> obj3.index.name = "States"
>>> obj3.name = 'Vals'
>>>
>>> obj3
States
Kentucky NaN
NYC NaN
Ohio 77.0
Utah 23.0
Name: Vals, dtype: float64
-- renaming the indexes
>>> obj3.index = ['K','N','O','U']
>>>
>>> obj3
K NaN
N NaN
O 77.0
U 23.0
Name: Vals, dtype: float64
Pandas DataFrame data structure
================================================================================================================================
>>> a = pd.DataFrame({'Name':['Puja', 'Sankar'], 'ID':[1,2], 'Salary':[10,20]})
>>> a
Name ID Salary
0 Puja 1 10
1 Sankar 2 20
# head returns the top 5 rows in a dataframe
>>> a.head
<bound method NDFrame.head of Name ID Salary Newname
0 Puja 1 10 Puja
1 Sankar 2 20 Sankar
>>> a.columns
Index(['Name', 'ID', 'Salary'], dtype='object')
>>> a.index
RangeIndex(start=0, stop=2, step=1)
# if you use one square bracket to select a column, you would get a series object while double will return a dataframe
>>> b = a['Name']
>>> b
0 Puja
1 Sankar
Name: Name, dtype: object
>>> type(b)
<class 'pandas.core.series.Series'>
>>>
>>> b = a[['Name']]
>>> b
Name
0 Puja
1 Sankar
>>> type(b)
<class 'pandas.core.frame.DataFrame'>
>>> b = a.Name
>>> b
0 Puja
1 Sankar
Name: Name, dtype: object
>>> type(b)
<class 'pandas.core.series.Series'>
>>>
>>> a['Newname'] = a.Name
>>> a
Name ID Salary Newname
0 Puja 1 10 Puja
1 Sankar 2 20 Sankar
>>>
>>> a.Name.tolist()
['Puja', 'Sankar']
-- deleting a column from a dataframe
>>> del frame3['pop']
>>> frame3
state year
0 Ohio 2000
1 Ohio 2001
2 Ohio 2002
3 Nevada 2001
4 Nevada 2002
5 Nevada 2003
>>> frame2 = pd.DataFrame(data, columns=['year', 'state', 'pop', 'debt'],index=['one', 'two', 'three', 'four','five', 'six'])
>>> frame2
year state pop debt
one 2000 Ohio 1.5 NaN
two 2001 Ohio 1.7 NaN
three 2002 Ohio 3.6 NaN
four 2001 Nevada 2.4 NaN
five 2002 Nevada 2.9 NaN
six 2003 Nevada 3.2 NaN
>>>
-- .loc returns a series object containing rows of data (not columns)
>>> frame2.loc['three']
year 2002
state Ohio
pop 3.6
debt NaN
Name: three, dtype: object
>>> type(frame2.loc['three'])
<class 'pandas.core.series.Series'>
>>> frame2.loc['three'].tolist()
[2002, 'Ohio', 3.6, nan]
-- if we convert a python dataframe to a python dictionary using dict()
we will get a dictionary, the values of which will be series objects containing each column values and the keys will be the column
names.
>>> dic = dict(frame)
>>> dic
{'state': 0 Ohio
1 Ohio
2 Ohio
3 Nevada
4 Nevada
5 Nevada
Name: state, dtype: object, 'year': 0 2000
1 2001
2 2002
3 2001
4 2002
5 2003
Name: year, dtype: int64}
>>> dic.keys()
dict_keys(['state', 'year'])
*** loc and iloc => they enable you to select a subset of the rows and columns from a dataframe
|
Python Regex
=======================================================================================================================================
import re
1) findall => returns a list of all the matches
================================================
>>> re.findall('Tame', 'Tame is in tame')
['Tame']
>>> re.compile("Tame").findall("Tame is in tame")
['Tame']
>>> x = re.findall("Spain", "Spain has to be in front of Spain, no spain?", re.I)
>>> x
['Spain', 'Spain', 'spain']
multiple keyword search on the string =>
>>> l = ['Spain', 'bow']
>>> r = re.compile('|'.join([i for i in l]), re.I)
>>> r
re.compile('Spain|bow', re.IGNORECASE)
>>> word = "Spain has to bow down"
>>> r.findall(word)
['Spain', 'bow']
2) search => returns a match object if there is a match anywhere in the string
=====================================================================================
Scan through string looking for the first location where the regular expression pattern produces a match
>>> txt = "The rain in Spain"
>>> x = re.search(r"\bS\w+", txt)
>>> print(x.span())
(12, 17)
>>> x.string
'The rain in Spain'
>>> x.gro
x.group( x.groupdict( x.groups(
>>> x.groups()
()
>>> x
<re.Match object; span=(12, 17), match='Spain'>
>>> x.groups(0)
()
3) match / fullmatch => returns a match object
==================================================
If zero or more characters at the beginning of string match the regular expression pattern, return a corresponding match object.
>>> re.match("spa", "Spain is spa", flags = re.I)
<re.Match object; span=(0, 3), match='Spa'>
>>>
>>> re.match("spa", "Spain is spa", flags = re.I).group()
'Spa'
>>> re.match("spa", "Spain is spa", flags = re.I).span()
>>> re.fullmatch("spa", "Spain is spa ", flags = re.I)
>>>
>>> re.fullmatch("spa", "spa", flags = re.I)
<re.Match object; span=(0, 3), match='spa'>
>>> m = re.match(r"(\w+) (\w+)(,)(\w+)", "Isaac Newton, physicist")
>>> m
>>> m = re.match(r"(\w+) (\w+)(,) (\w+)", "Isaac Newton, physicist")
>>> m
<re.Match object; span=(0, 23), match='Isaac Newton, physicist'>
4) sub => Replaces one or many matches with a string
=====================================================
>>> re.sub("\W","40", "String 123")
'String40123'
>>> re.sub("[0-9]","", "String 123")
'String '
|
# Shallow Copy and Deep Copy
# When you use "=" to create a copy of an object, It only creates a new variable that shares the reference of the original object.
a = [1,2,3,4]
b = a
a.append(5)
a[2] = 100
print(a,b)
=> [1, 2, 100, 4, 5] [1, 2, 100, 4, 5]
-- Shallow copy creates a copy of t
import copy
a = [1,2,3,4]
b = copy.copy(a)
b.append(5)
print(a,b)
-- [1, 2, 3, 4] [1, 2, 3, 4, 5]
import copy
a = [1,2,3,4]
b = copy.copy(a)
b[0] = 100
print(a,b)
-- [1, 2, 3, 4] [100, 2, 3, 4]
import copy
a = [[1],[2],[3],[4]]
b = copy.copy(a)
a.append(5)
a[0][0] = 100
print(a,b)
-- [[100], [2], [3], [4], 5] [[100], [2], [3], [4]]
-- Deep Copy
it creates a completely new object with the elements of the existing object and they have no relation at all.
import copy
old_list = [[1, 1, 1], [2, 2, 2], [3, 3, 3]]
new_list = copy.deepcopy(old_list)
old_list[1][0] = 'BB'
print("Old list:", old_list)
print("New list:", new_list)
Old list: [[1, 1, 1], ['BB', 2, 2], [3, 3, 3]]
New list: [[1, 1, 1], [2, 2, 2], [3, 3, 3]]
================================================================================
# Python args and kargs
The special syntax **kwargs in function definitions in python is used to pass a keyworded, variable-length argument list.
def myFun(**kwargs):
for key, value in kwargs.items():
print ("%s == %s" %(key, value))
# Driver code
myFun(first ='Geeks', mid ='for', last='Geeks')
-- The special syntax *args in function definitions in python is used to pass a variable number of arguments to a function
You cant pass a key-worded parameter.
def myFun(*argv):
for arg in argv:
print (arg)
myFun('Hello', 'Welcome', 'to', 'GeeksforGeeks')
# Python Decorators
-- A design pattern to in python - takes in a function, adds some functionality and returns it.
-- This is also called metaprogramming because a part of the program tries to modify another part of the program at compile time
def make_pretty(func):
def inner():
print("I got decorated")
func()
return inner
def ordinary():
print("I am ordinary")
>>> ordinary()
I am ordinary
>>> # let's decorate this ordinary function
>>> pretty = make_pretty(ordinary)
>>> pretty()
I got decorated
I am ordinary
#Python Serialization
===============================================
Pickling is the process whereby a Python object hierarchy is converted into a byte stream (usually not human readable) to be written to a file,
this is also known as Serialization. Unpickling is the reverse operation, whereby a byte stream is converted back into a working Python object hierarchy.
import pickle
# serializes
pickle.dump()
#deserializes
pickle.load()
'''to run python in command prompt, use "python", (windows :considering you have set up environment variable)
The interactive prompt runs code and echoes results as you go, but it doesn’t save your code in a file
'''
# enumerate() in python ==> it will give you the index numbers while iterating
>>> for n,i in enumerate(arr):
... print(n,i)
...
0 6
1 4
2 2
3 1
4 3
5 5
6 7
>>> arr
[6, 4, 2, 1, 3, 5, 7]
#to get current working directory
>>> import os
>>> os.getcwd()
'/Users/sankar.biswas'
#changing current direcctory
>>> os.chdir('/Users/sankar.biswas/Desktop/Python/coding')
>>> os.getcwd()
'/Users/sankar.biswas/Desktop/Python/coding'
# to run a python script from command prompt
python file1.py
#saving the output in a file
python script1.py > saveit.txt
# "dir" - you can use it to fetch a list of all the names available inside a module
>>> import sys
>>> dir(sys)
['__breakpointhook__', '__displayhook__', '__doc__', '__excepthook__', '__interactivehook__', '__loader__', '__name__', '__package__', '__spec__', '__stderr__',
'__stdin__', '__stdout__', '_clear_type_cache', '_current_frames', '_debugmallocstats', '_framework', '_getframe', '_git',
'_home', '_xoptions', 'abiflags', 'api_version', 'argv', 'base_exec_prefix', 'base_prefix', 'breakpointhook', 'builtin_module_names',
'byteorder', 'call_tracing', 'callstats', 'copyright', 'displayhook', 'dont_write_bytecode', 'exc_info', 'excepthook', 'exec_prefix', 'executable', 'exit',
'flags', 'float_info', 'float_repr_style', 'get_asyncgen_hooks', 'get_coroutine_origin_tracking_depth', 'get_coroutine_wrapper',
'getallocatedblocks', 'getcheckinterval', 'getdefaultencoding', 'getdlopenflags', 'getfilesystemencodeerrors', 'getfilesystemencoding',
'getprofile', 'getrecursionlimit', 'getrefcount', 'getsizeof', 'getswitchinterval', 'gettrace', 'hash_info', 'hexversion', 'implementation', 'int_info', 'intern', 'is_finalizing', 'maxsize', 'maxunicode',
'meta_path', 'modules', 'path', 'path_hooks', 'path_importer_cache', 'platform', 'prefix', 'ps1', 'ps2', 'set_asyncgen_hooks',
'set_coroutine_origin_tracking_depth', 'set_coroutine_wrapper', 'setcheckinterval', 'setdlopenflags', 'setprofile', 'setrecursionlimit',
'setswitchinterval', 'settrace', 'stderr', 'stdin', 'stdout', 'thread_info', 'version', 'version_info', 'warnoptions']
# the " exec(open('module.py').read())" built-in function call is another way to launch files from the interactive prompt without having to import and later reload
#you can also find out the functions you can apply on a variable using "dir"
>>> a = 234
>>> dir(a)
['__abs__', '__add__', '__and__', '__bool__', '__ceil__', '__class__', '__delattr__', '__dir__', '__divmod__', '__doc__',
'__eq__', '__float__', '__floor__', '__floordiv__', '__format__', '__ge__', '__getattribute__', '__getnewargs__', '__gt__',
'__hash__', '__index__', '__init__', '__init_subclass__', '__int__', '__invert__', '__le__', '__lshift__', '__lt__', '__mod__'
, '__mul__', '__ne__', '__neg__', '__new__', '__or__', '__pos__', '__pow__', '__radd__', '__rand__', '__rdivmod__', '__reduce__',
'__reduce_ex__', '__repr__', '__rfloordiv__', '__rlshift__', '__rmod__', '__rmul__', '__ror__', '__round__', '__rpow__', '__rrshift__',
'__rshift__', '__rsub__', '__rtruediv__', '__rxor__', '__setattr__', '__sizeof__', '__str__', '__sub__', '__subclasshook__', '__truediv__',
'__trunc__', '__xor__', 'bit_length', 'conjugate', 'denominator', 'from_bytes', 'imag', 'numerator', 'real', 'to_bytes']
# this will help you get a knowledge on the functionality of the function, dial 'q' to escape
>>> help(a.__abs__)
# Pattern Matching
>>> match = re.match('Hello[ \t]*(.*)world', 'Hello Python world')
>>> match
<re.Match object; span=(0, 18), match='Hello Python world'>
>>> match.group(1)
'Python '
>>> match = re.match('[/:](.*)[/:](.*)[/:](.*)', '/usr/home:lumberjack')
>>> match.groups()
('usr', 'home', 'lumberjack')
>>> re.split('[/:]', '/usr/home/lumberjack')
['', 'usr', 'home', 'lumberjack']
#List Operations
>>> L = [123, 'spam', 1.23]
>>> len(L)
3
>>> L*2
[123, 'spam', 1.23, 123, 'spam', 1.23]
>>> L[:]
[123, 'spam', 1.23]
>>> L[2:]
[1.23]
>>> L[:-1]
[123, 'spam']
>>> L.append(23)
[123, 'spam', 1.23, 23]
>>> L.pop(2)
1.23
>>> L
[123, 'spam', 23]
>>> list = [1,23,4,56,33,656,564]
>>> list.sort()
>>> list
[1, 4, 23, 33, 56, 564, 656]
#adding multiple elements to an existing list
>>> L
[123, 'abc', 1.23, {}]
>>> L.extend([5,6,7])
>>> L
[123, 'abc', 1.23, {}, 5, 6, 7]
#deleting all the elements
>>> L.clear()
>>> L
[]
#deleting a single element by index
>>> L = [123, 'abc', 1.23, {}]
>>> del L[0]
>>> L
['abc', 1.23, {}]
#selecting a partcular column from a 2D list
>>> list2D = [[1,2,3],[4,5,6],[7,8,9]]
>>> list2D[1][2]
6
>>> col2 = [row[1] for row in list2D] #Give me row[1] (2nd element) for each row in matrix M, in a new list.
>>> col2
[2, 5, 8]
>>> M
['bb', 'aa', 'cc']
>>> M.sort()
>>> M
['aa', 'bb', 'cc']
>>> [row[1] for row in M if row[1] % 2 == 0] #Filter out odd items
[2, 8]
#diagonal matrix
>>> diag = [M[i][i] for i in [0, 1, 2]] >>> diag
[1, 5, 9]
# Repeat characters in a string
>>> doubles = [c * 2 for c in 'spam'] >>> doubles
['ss', 'pp', 'aa', 'mm']
>>> list(range(4))
[0, 1, 2, 3]
>>> a = list(range(-6,7,2))
>>> a
[-6, -4, -2, 0, 2, 4, 6]
>>> [[x ** 2, x **3] for x in range(4)]
[[0, 0], [1, 1], [4, 8], [9, 27]]
>>> [[x, x / 2, x * 2] for x in range(-6, 7, 2) if x > 0]
[[2, 1.0, 4], [4, 2.0, 8], [6, 3.0, 12]]
>>> [[x, int(x / 2), x * 2] for x in range(-6, 7, 2) if x > 0]
[[2, 1, 4], [4, 2, 8], [6, 3, 12]]
>>> G = (sum(row) for row in M)
>>> G
<generator object <genexpr> at 0x105b29408>
>>> next(G)
6
>>> next(G)
15
>>> next(G)
24
'''Dictionaries :: Dictionaries, the only mapping type (not a sequence) in Python’s core objects set, are also mutable '''
>>> D = {}
>>> type(D)
<class 'dict'>
>>> D = {'food': 'Spam', 'quantity': 4, 'color': 'pink'}
>>> D
{'food': 'Spam', 'quantity': 4, 'color': 'pink'}
#using dict to define a dictionary
>>> bob1 = dict(name='Bob', job='dev', age=40)
>>> bob1
{'age': 40, 'name': 'Bob', 'job': 'dev'}
#zipping way to define dictionary
>>> bob2 = dict(zip(['name', 'job', 'age'], ['Bob', 'dev', 40]))
>>> bob2
{'name': 'Bob', 'job': 'dev', 'age': 40}
#Complex nesting of different types in python - one of the advantage of using python, complex nesting is easy to implement
>>> rec = {'name': {'first': 'Bob', 'last': 'Smith'}, 'jobs': ['dev', 'mgr'], 'age': 40.5}
>>> rec['jobs'][1]
'mgr'
>>> rec['name']['last']
'Smith'
>>> rec['jobs'].append('support')
>>> rec
{'name': {'first': 'Bob', 'last': 'Smith'}, 'jobs': ['dev', 'mgr', 'support'], 'age': 40.5}
#In Python, when we lose the last reference to the object—by assigning its variable to something else
>>> rec = 0
#Python has a feature known as garbage collection that cleans up unused memory as your program runs and frees you from having to manage such details in your code.
>>> D = {'a': 1, 'b': 2, 'c': 3}
#so now, what ".get" does is it will select the data with the key 'x' in dictionary D, if it doesnyt find it, it will return 0
>>> value = D.get('x', 0)
>>> value
0
#Sorting Keys: for Loops
>>> sorted(D)
['a', 'b', 'c']
>>> Ks = list(D.keys())
>>> Ks
['a', 'c', 'b']
>>> Ks.sort()
>>> Ks
['a', 'b', 'c']
#Tuples :: tuples are sequences, like lists, but they are immutable. Functionally, they’re used to represent fixed collections of items.
>>> T = (1, 2, 3, 4, 5)
>>> len(T)
5
>>> T + (5,6)
(1, 2, 3, 4, 5, 5, 6)
>>> T
(1, 2, 3, 4, 5)
>>> T[0]
1
>>> T.index(4)
3
>>> T.count(4)
1
#tuples provide a sort of integrity constraint
#String slicing, so the last number is the gap of skipping, that is 1,3,5,... will be skipped
>>> S = "I a m s a d"
>>> S[::2]
'Iamsad'
#the third index if given negative will reverse the selection
>>> S[::-2]
'dasmaI'
>>> S
'I evol being alone'
>>> S[5:1:-1]
'love'
>>>
>>> S[::-1]
'enola gnieb love I'
#converting whatever we have into string
>>> repr(42)
'42'
#converting into ASCII
>>> ord('A')
65
#converting integer to binary
>>> bin(13)
'0b1101'
#converting binary to integer
>>> int('1101', 2)
13
|
#!/usr/bin/python
import subprocess
import re
# Function that prompts users for yes or no response
def yes_no(answer):
# Expected 'yes' formats
yes = set(['yes', 'y'])
# Expected 'no' formats
no = set(['no', 'n'])
# Prompt user for input until they answer either 'yes' or 'no'
while True:
choice = raw_input(answer).lower()
if choice in yes:
return True
elif choice in no:
return False
elif choice == "":
return True
else:
print("Please respond with 'yes' or 'no'")
# Function that will write user input to file
def write_line(file, string):
with open(file, 'a') as the_file:
the_file.write(string)
the_file.write("\n")
def ip_network(answer, default):
# Validates the input of any IP network addresses
while True:
input = raw_input(answer)
# if the input matches the format expected return it
if re.match("^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}$", input):
return input
# if the input is blank use the default value
elif input == "":
return default
# for anything else reprompt the user
else:
print("Please give the IP in this format: 192.168.0.")
def ip_client(answer, default):
# Validates the input of any IP host addresses
while True:
input = raw_input(answer)
# if the input matches the format expected return it
if re.match("^(([1-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-4]))$", input):
return input
# if the input is blank use the default value
elif input == "":
return default
# for anything else reprompt the user
else:
print("Please give the host address in this format: 255")
def ip_full(answer, default):
# Validates the input of any full IP addresses
while True:
input = raw_input(answer)
# if the input matches the format expected return it
if re.match("^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$", input):
return input
# if the input is blank use the default value
elif input == "":
return default
# for anything else reprompt the user
else:
print("Please give the full IP address in this format: 192.168.0.1")
def netmask(answer, default):
# Validates the input of any netmasks
while True:
input = raw_input(answer)
# if the input matches the format return it
if re.match("^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$", input):
return input
# if the input is blank return the default value
elif input == "":
return default
# for anything else reprompt the user
else:
print("Please give the netmask in this format: 255.255.255.0")
def is_alphanumeric(answer, default):
# Validates the input is alphanumeric
while True:
input = raw_input(answer)
# if the input matches the format return it
if input.isalnum() == True:
return input
# if the input is blank
elif input == "":
return default
# for anything else reprompt the user
else:
print("Please only use alphanumeric characters[abc123]")
def generic(answer, default):
# Checks to make sure the entry isn't blank
input = raw_input(answer)
if input == "":
return default
else:
return input
def required(answer):
# Prompts until the user enters an answer
while True:
input = raw_input(answer)
if not input:
print("Please enter a response")
else:
return input
def is_num(answer, default):
# Validates the input is a number
while True:
input = raw_input(answer)
# if the input matches the format return it
if input.isdigit() == True:
return input
# if the input is blank return the defualt
elif input == "":
return default
else:
print("Please enter a number")
def efi_legacy(answer):
# Validates that the user entered an expected answer
while True:
input = raw_input(answer).lower()
if input == "efi":
return "efi"
elif input == "legacy":
return "legacy"
else:
print("Please enter efi or legacy")
def compiler(answer):
# Validates that the user entered an expected answer
while True:
input = raw_input(answer).lower()
if input == "gnu":
return "gnu"
elif input == "llvm":
return "llvm"
else:
print("Please enter gnu or llvm")
def mpi(answer):
# Validates that the user entered an expected answer
while True:
input = raw_input(answer).lower()
if input == "ethernet":
return "ethernet"
elif input == "infiniband":
return "infiniband"
elif input == "opa":
return "opa"
elif input == "pmix":
return "pmix"
else:
print("Please enter openmpi, mpich, or mvapich")
def default_dev(answer):
# Validates that the user entered an expected answer
while True:
input = raw_input(answer).lower()
if input == "openmpi":
return "openmpi"
elif input == "mpich":
return "mpich"
elif input == "mvapich":
return "mvapich"
else:
print("Please enter openmpi, mpich, or mvapich")
minimal = yes_no('Deploy Minimal Configuration? ')
if minimal == True:
# External IP
ipaddr_network_ex = ip_network("Please enter external IP network[192.168.0.]: ", "192.168.0.")
ipaddr_client_ex = ip_client("Please enter external IP client address[1]: ", "1")
with open('hosts', 'w') as the_file:
the_file.write('{0}\n{1}\n{2}\n{3}\n{4}{5}{6}\n'.format("all:", " hosts:", " headnode:", " ansible_port: 22", " ansible_host: ", ipaddr_network_ex, ipaddr_client_ex))
# BOS
ipaddr_network = ip_network("Please enter headnode IP network[192.168.0.]: ", "192.168.0.")
ipaddr_client = ip_client("Please enter headnode IP client address[1]: ", "1")
buffer = "sms_ip: " + ipaddr_network + ipaddr_client + "\n"
with open('./group_vars/all', 'w') as the_file:
the_file.write(buffer)
sms_name = generic("Please enter the headnode hostname[headnode]: ", "headnode")
buffer = "sms_name: " + sms_name + "\n"
with open('./group_vars/all', 'a') as the_file:
the_file.write(buffer)
with open('site.yml', 'w') as the_file:
the_file.write('{0}\n{1}\n{2}\n{3}\n{4}\n{5}\n{6}\n{7}\n{8}\n{9}\n'.format("# This playbook deploys the whole OpenHPC software stack", "", "- name: Deploy OpenHPC", " hosts: all", " remote_user: afpaget", " become: yes", " become_method: sudo", "", " roles:", " - install_bos"))
# Enable ohpc repo
repo = generic("Please enter the OpenHPC repo to use[http://build.openhpc.community/OpenHPC:/1.3/CentOS_7/x86_64/ohpc-release-1.3-1.el7.x86_64.rpm]:", "http://build.openhpc.community/OpenHPC:/1.3/CentOS_7/x86_64/ohpc-release-1.3-1.el7.x86_64.rpm")
buffer = "ohpc_repo: " + repo + "\n"
with open('./group_vars/all', 'a') as the_file:
the_file.write(buffer)
with open('site.yml', 'a') as the_file:
the_file.write(' - enable_ohpc_repo\n')
# Add provision
ntp = generic("Please enter the NTP to use[0.centos.pool.ntp.org]: ", "0.centos.pool.ntp.org")
buffer = "ntp_server: " + ntp + "\n"
with open('./group_vars/all', 'a') as the_file:
the_file.write(buffer)
with open('site.yml', 'a')as the_file:
the_file.write(" - add_provision\n")
# Resource management
compute_name = is_alphanumeric("Please enter the name for the compute nodes[compute]: ", "compute")
buffer = "c_name: " + compute_name + "\n"
with open('./group_vars/all', 'a') as the_file:
the_file.write(buffer)
compute_no = is_num("Please enter the number of compute nodes[4]: ", "4")
buffer = "num_computes: " + compute_no + "\n"
with open('./group_vars/all', 'a') as the_file:
the_file.write(buffer)
socket_no = is_num("Please enter the number of sockets[2]: ", "2")
buffer = "num_sockets: " + socket_no + "\n"
with open('./group_vars/all', 'a') as the_file:
the_file.write(buffer)
core_no = is_num("Please enter the number of cores per socket[8]: ", "8")
buffer = "num_cores: " + core_no + "\n"
with open('./group_vars/all', 'a') as the_file:
the_file.write(buffer)
threads_no = is_num("Please enter the number of threads per core[2]: ", "2")
buffer = "num_threads: " + threads_no + "\n"
with open('./group_vars/all', 'a') as the_file:
the_file.write(buffer)
with open('site.yml', 'a') as the_file:
the_file.write(" - resource_management\n")
# Basic warewulf
interal_interface = required("Please enter the provisioning interface for the headnode: ")
buffer = "sms_eth_internal: " + interal_interface + "\n"
with open('./group_vars/all', 'a') as the_file:
the_file.write(buffer)
ip_netmask = netmask("Please enter the netmask for the headnode[255.255.255.0]: ", "255.255.255.0")
buffer = "sms_netmask: " + ip_netmask + "\n"
with open('./group_vars/all', 'a') as the_file:
the_file.write(buffer)
with open('site.yml', 'a') as the_file:
the_file.write(" - basic_warewulf\n")
# Install BOS
chroot = generic("Please enter the chroot location[/opt/ohpc/admin/images/centos7.4]: ", "/opt/ohpc/admin/images/centos7.4")
buffer = "chroot: " + chroot + "\n"
with open('./group_vars/all', 'a') as the_file:
the_file.write(buffer)
with open('site.yml', 'a')as the_file:
the_file.write(" - initial_bos_image\n")
# OHPC components
with open('site.yml', 'a')as the_file:
the_file.write(" - ohpc_components\n")
# Customise sys config
home_mount = generic("Please enter the home mount point[/home]: ", "/home")
buffer = "home_mount: " + home_mount + "\n"
with open('./group_vars/all', 'a') as the_file:
the_file.write(buffer)
opt_mount = generic("Please enter the opt mount point[/opt/ohpc/pub]: ", "/opt/ohpc/pub")
buffer = "opt_mount: " + opt_mount + "\n"
with open('./group_vars/all', 'a') as the_file:
the_file.write(buffer)
with open('site.yml', 'a') as the_file:
the_file.write(" - customise_sys_config\n")
# import files cores
with open('site.yml', 'a')as the_file:
the_file.write(" - import_files_core\n")
# bootstrap_core
with open('site.yml', 'a')as the_file:
the_file.write(" - bootstrap_core\n")
# VNFS
with open('site.yml', 'a')as the_file:
the_file.write(" - vnfs\n")
# Register nodes core
c_provision = required("Please enter the compute node provisioning interface: ")
buffer = "eth_provision: " + c_provision + "\n"
with open('./group_vars/all', 'a') as the_file:
the_file.write(buffer)
c_ipaddr_network = ip_network("Please enter the compute IP network[192.168.0.]: ", "192.168.0.")
buffer = "c_ip: " + c_ipaddr_network + "\n"
with open('./group_vars/all', 'a') as the_file:
the_file.write(buffer)
c_ipaddr_client = ip_client("Please enter the compute IP client start address[2]: ", "2")
buffer = "c_ip_last: " + c_ipaddr_client + "\n"
with open('./group_vars/all', 'a') as the_file:
the_file.write(buffer)
c_mac = []
for x in range(0, int(compute_no)):
c_mac.append(required("Please enter MAC: "))
with open('./group_vars/all', 'a') as the_file:
the_file.write("c_mac: \n")
for i in range(len(c_mac)):
buffer = " - " + c_mac[i] + "\n"
with open('./group_vars/all', 'a') as the_file:
the_file.write(buffer)
with open('site.yml', 'a') as the_file:
the_file.write(" - register_nodes_core\n")
# Boot nodes
bmc_network = ip_network("Please enter the BMC network address[192.168.1.]: ", "192.168.0.")
buffer = "bmc_host: " + bmc_network + "\n"
with open('./group_vars/all', 'a') as the_file:
the_file.write(buffer)
bmc_client = ip_client("Please enter the BMC client start address[2]: ", "2")
buffer = "bmc_ip: " + bmc_client + "\n"
with open('./group_vars/all', 'a') as the_file:
the_file.write(buffer)
bmc_user = required("Please enter the BMC username: ")
buffer = "bmc_username: " + bmc_user + "\n"
with open('./group_vars/all', 'a') as the_file:
the_file.write(buffer)
bmc_password = required("Please enter the BMC password: ")
buffer = "bmc_password: " + bmc_password + "\n"
with open('./group_vars/all', 'a') as the_file:
the_file.write(buffer)
with open('site.yml', 'a') as the_file:
the_file.write(" - boot_nodes")
else:
# External IP
ipaddr_network_ex = ip_network("Please enter external IP network[192.168.0.]: ", "192.168.0.")
ipaddr_client_ex = ip_client("Please enter external IP client address[1]: ", "1")
with open('hosts', 'w') as the_file:
the_file.write('{0}\n{1}\n{2}\n{3}\n{4}{5}{6}\n'.format("all:", " hosts:", " headnode:", " ansible_port: 22", " ansible_host: ", ipaddr_network_ex, ipaddr_client_ex))
# BOS
ipaddr_network = ip_network("Please enter headnode IP network[192.168.0.]: ", "192.168.0.")
ipaddr_client = ip_client("Please enter headnode IP client address[1]: ", "1")
buffer = "sms_ip: " + ipaddr_network + ipaddr_client + "\n"
with open('./group_vars/all', 'w') as the_file:
the_file.write(buffer)
sms_name = generic("Please enter the headnode hostname[headnode]: ", "headnode")
buffer = "sms_name: " + sms_name + "\n"
with open('./group_vars/all', 'a') as the_file:
the_file.write(buffer)
with open('site.yml', 'w') as the_file:
the_file.write('{0}\n{1}\n{2}\n{3}\n{4}\n{5}\n{6}\n{7}\n{8}\n{9}\n'.format("# This playbook deploys the whole OpenHPC software stack", "", "- name: Deploy OpenHPC", " hosts: all", " remote_user: afpaget", " become: yes", " become_method: sudo", "", " roles:", " - install_bos"))
# Enable ohpc repo
repo = generic("Please enter the OpenHPC repo to use[http://build.openhpc.community/OpenHPC:/1.3/CentOS_7/x86_64/ohpc-release-1.3-1.el7.x86_64.rpm]:", "http://build.openhpc.community/OpenHPC:/1.3/CentOS_7/x86_64/ohpc-release-1.3-1.el7.x86_64.rpm")
buffer = "ohpc_repo: " + repo + "\n"
with open('./group_vars/all', 'a') as the_file:
the_file.write(buffer)
with open('site.yml', 'a') as the_file:
the_file.write(' - enable_ohpc_repo\n')
# Add provision
ntp = generic("Please enter the NTP to use[0.centos.pool.ntp.org]: ", "0.centos.pool.ntp.org")
buffer = "ntp_server: " + ntp + "\n"
with open('./group_vars/all', 'a') as the_file:
the_file.write(buffer)
with open('site.yml', 'a')as the_file:
the_file.write(" - add_provision\n")
# Resource management
compute_name = is_alphanumeric("Please enter the name for the compute nodes[compute]: ", "compute")
buffer = "c_name: " + compute_name + "\n"
with open('./group_vars/all', 'a') as the_file:
the_file.write(buffer)
compute_no = is_num("Please enter the number of compute nodes[4]: ", "4")
buffer = "num_computes: " + compute_no + "\n"
with open('./group_vars/all', 'a') as the_file:
the_file.write(buffer)
socket_no = is_num("Please enter the number of sockets[2]: ", "2")
buffer = "num_sockets: " + socket_no + "\n"
with open('./group_vars/all', 'a') as the_file:
the_file.write(buffer)
core_no = is_num("Please enter the number of cores per socket[8]: ", "8")
buffer = "num_cores: " + core_no + "\n"
with open('./group_vars/all', 'a') as the_file:
the_file.write(buffer)
threads_no = is_num("Please enter the number of threads per core[2]: ", "2")
buffer = "num_threads: " + threads_no + "\n"
with open('./group_vars/all', 'a') as the_file:
the_file.write(buffer)
with open('site.yml', 'a') as the_file:
the_file.write(" - resource_management\n")
# Infiniband support
infiniband_support = yes_no("Add Infiniband Support? ")
if infiniband_support == True:
with open('site.yml', 'a') as the_file:
the_file.write(" - infiniband_support\n")
ib_network = ip_network("Please enter the IB network for the headnode[192.168.5.]: ", "192.168.5.")
ib_client = ip_client("Please enter the IB client address for the headnode[1]: ", "1")
buffer = "master_ipoib: " + ib_network + ib_client + "\n"
with open('./group_vars/all', 'a') as the_file:
the_file.write(buffer)
ib_netmask = netmask("Please enter the IB netmask[255.255.255.0]: ", "255.255.255.0")
buffer = "ipoib_netmask: " + ib_netmask + "\n"
with open('./group_vars/all', 'a') as the_file:
the_file.write(buffer)
# Omnipath base
omnipath_base = yes_no("Add omnipath base? ")
if omnipath_base == True:
with open('site.yml', 'a') as the_file:
the_file.write(" - opa_support_base\n")
# Basic warewulf
interal_interface = required("Please enter the provisioning interface for the headnode: ")
buffer = "sms_eth_internal: " + interal_interface + "\n"
with open('./group_vars/all', 'a') as the_file:
the_file.write(buffer)
ip_netmask = netmask("Please enter the netmask for the headnode[255.255.255.0]: ", "255.255.255.0")
buffer = "sms_netmask: " + ip_netmask + "\n"
with open('./group_vars/all', 'a') as the_file:
the_file.write(buffer)
with open('site.yml', 'a') as the_file:
the_file.write(" - basic_warewulf\n")
# Install BOS
chroot = generic("Please enter the chroot location[/opt/ohpc/admin/images/centos7.4]: ", "/opt/ohpc/admin/images/centos7.4")
buffer = "chroot: " + chroot + "\n"
with open('./group_vars/all', 'a') as the_file:
the_file.write(buffer)
with open('site.yml', 'a')as the_file:
the_file.write(" - initial_bos_image\n")
# OHPC components
with open('site.yml', 'a')as the_file:
the_file.write(" - ohpc_components\n")
# Customise sys config
home_mount = generic("Please enter the home mount point[/home]: ", "/home")
buffer = "home_mount: " + home_mount + "\n"
with open('./group_vars/all', 'a') as the_file:
the_file.write(buffer)
opt_mount = generic("Please enter the opt mount point[/opt/ohpc/pub]: ", "/opt/ohpc/pub")
buffer = "opt_mount: " + opt_mount + "\n"
with open('./group_vars/all', 'a') as the_file:
the_file.write(buffer)
with open('site.yml', 'a') as the_file:
the_file.write(" - customise_sys_config\n")
# Infiniband drivers compute
c_infiniband_support = yes_no("Add Infiniband Support to compute nodes? ")
if c_infiniband_support == True:
with open('site.yml', 'a') as the_file:
the_file.write(" - infiniband_drivers_comp\n")
# mem limit
mem_limit = yes_no("Increase locked memory limit? ")
if mem_limit == True:
with open('site.yml', 'a') as the_file:
the_file.write(" - mem_limit\n")
# SSH control
ssh = yes_no("Enable SSH control via resource manager?" )
if ssh == True:
with open('site.yml', 'a') as the_file:
the_file.write(" - ssh_control\n")
# Beegfs
beegfs = yes_no("Add Beegfs to cluster? ")
if beegfs == True:
beegfs_repo = generic("Please enter Beegfs repo[https://www.beegfs.io/release/latest-stable/dists/beegfs-rhel7.repo]: ", "https://www.beegfs.io/release/latest-stable/dists/beegfs-rhel7.repo")
buffer = "beegfs_repo: " + beegfs_repo + "\n"
with open('group_vars/all', 'a') as the_file:
the_file.write(buffer)
beegfs_ip = ip_full("Please enter the Beegfs IP address[192.168.0.254]: ", "192.168.0.1")
buffer = "systemtd_host: " + beegfs_ip + "\n"
with open('group_vars/all', 'a') as the_file:
the_file.write(buffer)
with open('site.yml', 'a') as the_file:
the_file.write(" - beegfs\n")
# Lustre
lustre = yes_no("Add Lustre to cluster? ")
if lustre == True:
lustre_mount = generic("Please enter the Lustre mount point[/mnt/lustre]: ", "/mnt/lustre")
lustre_ip = ip_full("Please enter the Lustre IP[192.168.0.254]: ", "192.168.0.254")
buffer = "mgs_fs_name: " + lustre_ip + lustre_mount + "\n"
with open('group_vars/all', 'a') as the_file:
the_file.write(buffer)
with open('site.yml', 'a') as the_file:
the_file.write(" - lustre\n")
# Forward logs
logs = yes_no("Forward logs from compute nodes to headnode? ")
with open('site.yml', 'a') as the_file:
the_file.write(" - forward_logs\n")
# Nagios
nagios = yes_no("Add Nagios Monitoring to cluster? ")
if nagios == True:
nagios_user = is_alphanumeric("Please enter the Nagios Admin username[admin]: ", "admin")
buffer = "nagios_web_user: " + nagios_user + "\n"
with open('group_vars/all', 'a') as the_file:
the_file.write(buffer)
nagios_pass = is_alphanumeric("Please enter the Nagios Admin password[password]: ", "password")
buffer = "nagios_web_password: " + nagios_pass + "\n"
with open('group_vars/all', 'a') as the_file:
the_file.write(buffer)
with open('site.yml', 'a') as the_file:
the_file.write(" - nagios\n")
# Ganglia
ganglia = yes_no("Add Ganglia to cluster? ")
if ganglia == True:
with open('site.yml', 'a') as the_file:
the_file.write(" - ganglia\n")
# Clustershell
clustershell = yes_no("Add Clustershell to cluster? ")
if clustershell == True:
with open('site.yml', 'a') as the_file:
the_file.write(" - clustershell\n")
# Mrsh
mrsh = yes_no("Add Mrsh to cluster? ")
if mrsh == True:
with open('site.yml', 'a') as the_file:
the_file.write(" - mrsh\n")
# Genders
genders = yes_no("Add genders to cluster? ")
if genders == True:
with open('site.yml', 'a') as the_file:
the_file.write(" - genders\n")
# Conman
conman = yes_no("Add Conman to cluster? ")
if conman == True:
with open('site.yml', 'a') as the_file:
the_file.write(" - conman\n")
# Import files core
with open('site.yml', 'a')as the_file:
the_file.write(" - import_files_core\n")
# Import files ib
if infiniband_support == True:
with open('site.yml', 'a')as the_file:
the_file.write(" - import_files_ib\n")
# Bootstrap kernel
bootstrap_kernel = yes_no("Add additional kernel arguments to bootstrap? ")
if bootstrap_kernel == True:
with open('site.yml', 'a')as the_file:
the_file.write(" - bootstrap_updates\n")
# Bootstrap Singularity
bootstrap_singularity = yes_no("Add Singularity arguments to kernel? ")
if bootstrap_singularity == True:
with open('site.yml', 'a')as the_file:
the_file.write(" - bootstrap_singularity\n")
# Bootstrap
with open('site.yml', 'a')as the_file:
the_file.write(" - bootstrap_core\n")
# VNFS
with open('site.yml', 'a')as the_file:
the_file.write(" - vnfs\n")
# Register nodes core
c_provision = required("Please enter the compute node provisioning interface: ")
buffer = "eth_provision: " + c_provision + "\n"
with open('./group_vars/all', 'a') as the_file:
the_file.write(buffer)
c_ipaddr_network = ip_network("Please enter the compute IP network[192.168.0.]: ", "192.168.0.")
buffer = "c_ip: " + c_ipaddr_network + "\n"
with open('./group_vars/all', 'a') as the_file:
the_file.write(buffer)
c_ipaddr_client = ip_client("Please enter the compute IP client start address[2]: ", "2")
buffer = "c_ip_last: " + c_ipaddr_client + "\n"
with open('./group_vars/all', 'a') as the_file:
the_file.write(buffer)
c_mac = []
for x in range(0, int(compute_no)):
c_mac.append(required("Please enter MAC: "))
with open('./group_vars/all', 'a') as the_file:
the_file.write("c_mac: \n")
for i in range(len(c_mac)):
buffer = " - " + c_mac[i] + "\n"
with open('./group_vars/all', 'a') as the_file:
the_file.write(buffer)
with open('site.yml', 'a') as the_file:
the_file.write(" - register_nodes_core\n")
# Register nodes IB
if infiniband_support == True:
c_ipoib_network = ip_network("Please enter the compute IPoIB network[192.168.5.]: ", "192.168.5.")
buffer = "c_ipoib: " + c_ipoib_network + "\n"
with open('group_vars/all', 'a') as the_file:
the_file.write(buffer)
c_ipoib_client = ip_client("Please enter the compute IPoIB client start address[2]: ", "2")
buffer = "c_ipoib_last: " + c_ipoib_client + "\n"
with open('group_vars/all', 'a') as the_file:
the_file.write(buffer)
# Register nodes predictable
register_predictable = yes_no("Set compute interface names? ")
if register_predictable == True:
with open('site.yml', 'a') as the_file:
the_file.write(" - register_nodes_predictable\n")
# Stateful
stateful = yes_no("Deploy nodes in stateful configuration? ")
if stateful == True:
stateful_mode = efi_legacy("Deploy nodes as efi or legacy?")
if stateful_mode == "efi":
with open('site.yml', 'a')as the_file:
the_file.write(" - stateful_efi\n")
elif stateful_mode == "legacy":
with open('site.yml', 'a')as the_file:
the_file.write(" - stateful_legacy\n")
else:
print("error reading stateful_mode")
# Boot nodes
bmc_network = ip_network("Please enter the BMC network address[192.168.1.]: ", "192.168.0.")
buffer = "bmc_host: " + bmc_network + "\n"
with open('./group_vars/all', 'a') as the_file:
the_file.write(buffer)
bmc_client = ip_client("Please enter the BMC client start address[2]: ", "2")
buffer = "bmc_ip: " + bmc_client + "\n"
with open('./group_vars/all', 'a') as the_file:
the_file.write(buffer)
bmc_user = required("Please enter the BMC username: ")
buffer = "bmc_username: " + bmc_user + "\n"
with open('./group_vars/all', 'a') as the_file:
the_file.write(buffer)
bmc_password = required("Please enter the BMC password: ")
buffer = "bmc_password: " + bmc_password + "\n"
with open('./group_vars/all', 'a') as the_file:
the_file.write(buffer)
with open('site.yml', 'a') as the_file:
the_file.write(" - boot_nodes\n")
# Development
dev = yes_no("Install Development Tools? ")
if dev == True:
with open('site.yml', 'a') as the_file:
the_file.write(" - development_tools\n")
compiler = compiler("Which compiler should be installed: gnu or llvm? ")
if compiler == "gnu":
with open('group_vars/all', 'a') as the_file:
the_file.write("gnu_compiler: true\n")
elif compiler == "llvm":
with open('group_vars/all', 'a') as the_file:
the_file.write("llvm_compiler: true\n")
else:
print("error reading compiler")
with open('site.yml', 'a') as the_file:
the_file.write(" - compilers\n")
mpi = mpi("Which MPI stack should be installed: ethernet, infiniband, opa, or pmix? ")
if mpi == "ethernet":
with open('group_vars/all', 'a') as the_file:
the_file.write("ethernet_mpi: true\n")
elif mpi == "infiniband":
with open('group_vars/all', 'a') as the_file:
the_file.write("infini_mpi: true\n")
elif mpi == "opa":
with open('group_vars/all', 'a') as the_file:
the_file.write("psm2_mpi: true\n")
elif mpi == "pmix":
with open('group_vars/all', 'a') as the_file:
the_file.write("pmix_mpi: true\n")
with open('site.yml', 'a') as the_file:
the_file.write(" - mpi\n")
default_dev = default_dev("Which toolchain should be installed: openmpi, mpich, or mvapich?")
if default_dev == "openmpi":
with open('group_vars/all', 'a') as the_file:
the_file.write("openmpi3_dev_env: true\n")
elif default_dev == "mpich":
with open('site.yml', 'a') as the_file:
the_file.write("mpich_dev_env: true\n")
elif default_dev == "mvapich":
with open('site.yml', 'a') as the_file:
the_file.write("mvapich_dev_env: true\n")
else:
print("error reading default_dev")
with open('site.yml', 'a') as the_file:
the_file.write(" - default_dev_env\n")
performance_tools = yes_no("Install performance tools? ")
if performance_tools == True:
with open('site.yml', 'a') as the_file:
the_file.write(" - performance_tools\n")
# Resource startup
with open('site.yml', 'a') as the_file:
the_file.write(" - resource_startup\n")
print("Please run \"ansible-playbook -i hosts site.yml -vv\" to deploy the cluster")
|
#!/usr/bin/env python
#
# Show credentials if any
file=open('.credentials')
user = ""
passwd = ""
for line in file:
if line.startswith('username'):
user = line.split()[-1]
if line.startswith('password'):
passwd = line.split()[-1]
if user and passwd:
print "User:", user, "has password:", passwd
|
"""
A small `pdoc` example using Mermaid diagrams.
The relationship for Pet and Dog follows the UML diagram:
```mermaid
classDiagram
class Dog~Pet~{
-__init__(str name) None
+bark(bool loud) None
}
Dog <|-- Pet
Pet : +str name
Pet : +List[Pet] friends
```
"""
class Pet:
name: str
"""The name of our pet."""
friends: list["Pet"]
"""The friends of our pet."""
def __init__(self, name: str):
"""Make a Pet without any friends (yet)."""
self.name = name
self.friends = []
class Dog(Pet):
"""🐕"""
def bark(self, loud: bool = True):
"""*woof*"""
|
import random
import math
Upper = int(input("introduzca rango superior: "))
Lower = int(input("introduzca rango inferior: "))
Num_a_adivinar = random.randint(Lower,Upper)
intentos = int(input("cuantas oportunidades de adivinar necesitas?: "))
count = 0
while count < intentos:
count += 1
adivinar = int(input("Adivina el numero: "))
if Num_a_adivinar == adivinar:
print("****Felicidades, Ganaste****")
break
elif Num_a_adivinar > adivinar:
print("Intenta con un numero mas alto")
elif Num_a_adivinar < adivinar:
print("Intenta con un numero mas bajo")
if count >= intentos:
print("Perdiste")
print("el numero era ", Num_a_adivinar)
|
"""Search for an email address given a fragment of a job description."""
# TODO: Add the required import for the csv module to support the
# parsing of the contact database stored in the provided file
def search_for_email_given_job(job_description: str, contacts: str):
"""Search for and return job(s) given an email address."""
# create an empty list of the contacts
contacts_list = []
# --> refer to the file called inputs/contacts.txt to learn more about
# the format of the comma separated value (CSV) file that we must parse
# --> iterate through each line of the file and extract the current job
for contact_line in csv.reader(
contacts.splitlines(),
quotechar='"',
delimiter=",",
quoting=csv.QUOTE_ALL,
skipinitialspace=True,
):
# TODO: extract the current job for the contact on this line of the CSV
# TODO: the job description matches and thus we should save it in the list
# return the list of the contacts who have a job description that matches
return contacts_list
|
"""
Implementation of address-related features.
"""
from ipaddress import ip_address
import zmq
from .common import unique_identifier
def address_to_host_port(addr):
"""
Try to convert an address to a (host, port) tuple.
Parameters
----------
addr : str, SocketAddress
Returns
-------
tuple
A (host, port) tuple formed with the corresponding data.
"""
if addr is None:
return (None, None)
# Try the most common case (well-defined types)
try:
return _common_address_to_host_port(addr)
except TypeError:
pass
# Try to do something anyway
if hasattr(addr, 'host') and hasattr(addr, 'port'):
return (addr.host, addr.port)
raise ValueError('Unsupported address type "%s"!' % type(addr))
def _common_address_to_host_port(addr):
"""
Try to convert an address to a (host, port) tuple.
This function is meant to be used with well-known types. For a more
general case, use the `address_to_host_port` function instead.
Parameters
----------
addr : str, SocketAddress, AgentAddress
Returns
-------
tuple
A (host, port) tuple formed with the corresponding data.
"""
if isinstance(addr, SocketAddress):
return (addr.host, addr.port)
if isinstance(addr, AgentAddress):
return (addr.address.host, addr.address.port)
if isinstance(addr, str):
aux = addr.split(':')
if len(aux) == 1:
port = None
else:
port = int(aux[-1])
host = aux[0]
return (host, port)
raise TypeError('Unsupported address type "%s"!' % type(addr))
def guess_kind(kind):
"""
Guess if a kind string is an AgentAddressKind or AgentChannelKind.
Parameters
----------
kind : str
The AgentAddressKind or AgentChannelKind in string format.
Returns
----------
AgentAddressKind or AgentChannelKind
The actual kind type.
"""
try:
return AgentAddressKind(kind)
except ValueError:
return AgentChannelKind(kind)
class AgentAddressTransport(str):
"""
Agent's address transport class. It can be 'tcp', 'ipc' or 'inproc'.
"""
def __new__(cls, value):
if value not in ['tcp', 'ipc', 'inproc']:
raise ValueError('Invalid address transport "%s"!' % value)
return super().__new__(cls, value)
class AgentAddressRole(str):
"""
Agent's address role class. It can either be ``'server'`` or ``'client'``.
"""
def __new__(cls, value):
if value not in ['server', 'client']:
raise ValueError('Invalid address role "%s"!' % value)
return super().__new__(cls, value)
def twin(self):
"""
Get the twin role of the current one. ``'server'`` would be the twin
of ``'client'`` and viceversa.
Returns
-------
AgentAddressRole
The twin role.
"""
if self == 'server':
return self.__class__('client')
return self.__class__('server')
class AgentAddressKind(str):
"""
Agent's address kind class.
This kind represents the communication pattern being used by the agent
address: REP, PULL, PUB...
"""
TWIN = {
'REQ': 'REP',
'REP': 'REQ',
'PUSH': 'PULL',
'PULL': 'PUSH',
'PUB': 'SUB',
'SUB': 'PUB',
'PULL_SYNC_PUB': 'PUSH_SYNC_SUB',
'PUSH_SYNC_SUB': 'PULL_SYNC_PUB',
}
ZMQ_KIND_CONVERSION = {
'REQ': zmq.REQ,
'REP': zmq.REP,
'PUSH': zmq.PUSH,
'PULL': zmq.PULL,
'PUB': zmq.PUB,
'SUB': zmq.SUB,
'PULL_SYNC_PUB': zmq.PULL,
'PUSH_SYNC_SUB': zmq.PUSH,
}
REQUIRE_HANDLER = ('REP', 'PULL', 'SUB', 'PULL_SYNC_PUB')
def __new__(cls, kind):
if kind not in cls.TWIN.keys():
raise ValueError('Invalid address kind "%s"!' % kind)
return super().__new__(cls, kind)
def zmq(self):
"""
Get the equivalent ZeroMQ socket kind.
Returns
-------
int
"""
return self.ZMQ_KIND_CONVERSION[self]
def requires_handler(self):
"""
Whether the Agent's address kind requires a handler or not.
A socket which processes incoming messages would require a
handler (i.e. 'REP', 'PULL', 'SUB'...).
Returns
-------
bool
"""
return self in self.REQUIRE_HANDLER
def twin(self):
"""
Get the twin kind of the current one.
``REQ`` would be the twin of ``REP`` and viceversa, ``PUB`` would be
the twin of ``SUB`` and viceversa, etc.
Returns
-------
AgentAddressKind
The twin kind of the current one.
"""
return self.__class__(self.TWIN[self])
class AgentAddressSerializer(str):
"""
Agent's address serializer class.
Each communication channel will have a serializer.
Note that for ``raw`` message passing, everything must be on bytes, and the
programmer is the one responsible for converting data to bytes.
Parameters
----------
serializer_type : str
Serializer type (i.e.: 'raw', 'pickle', 'cloudpickle', 'dill', 'json').
"""
SERIALIZER_SIMPLE = ('raw',)
SERIALIZER_SEPARATOR = ('pickle', 'cloudpickle', 'dill', 'json')
def __new__(cls, value):
if value not in cls.SERIALIZER_SIMPLE + cls.SERIALIZER_SEPARATOR:
raise ValueError('Invalid serializer type %s!' % value)
return super().__new__(cls, value)
def __init__(self, value):
self.requires_separator = value in self.SERIALIZER_SEPARATOR
class SocketAddress(object):
"""
Socket address information consisting on the host and port.
Parameters
----------
host : str, ipaddress.IPv4Address
IP address.
port : int
Port number.
Attributes
----------
host : ipaddress.IPv4Address
IP address.
port : int
Port number.
"""
def __init__(self, host, port):
assert isinstance(
port, int
), 'Incorrect parameter port on SocketAddress; expecting type int.'
self.host = str(ip_address(host))
self.port = port
def __repr__(self):
"""
Return the string representation of the SocketAddress.
Returns
-------
representation : str
"""
return '%s:%s' % (self.host, self.port)
def __hash__(self):
return hash(self.host) ^ hash(self.port)
def __eq__(self, other):
if not isinstance(other, SocketAddress):
return False
return self.host == other.host and self.port == other.port
class AgentAddress:
"""
Agent address information consisting on the transport protocol, address,
kind and role.
Parameters
----------
transport : str, AgentAddressTransport
Agent transport protocol.
address : str
Agent address.
kind : str, AgentAddressKind
Agent kind.
role : str, AgentAddressRole
Agent role.
serializer : str
Agent serializer type.
Attributes
----------
transport : str, AgentAddressTransport
Agent transport protocol.
address : str, SocketAddress
Agent address.
kind : AgentAddressKind
Agent kind.
role : AgentAddressRole
Agent role.
serializer : AgentAddressSerializer
Agent serializer.
"""
def __init__(self, transport, address, kind, role, serializer):
if transport == 'tcp':
address = SocketAddress(*address_to_host_port(address))
self.transport = AgentAddressTransport(transport)
self.address = address
self.kind = AgentAddressKind(kind)
self.role = AgentAddressRole(role)
self.serializer = AgentAddressSerializer(serializer)
def __repr__(self):
"""
Return the string representation of the AgentAddress.
Returns
-------
representation : str
"""
return 'AgentAddress(%s, %s, %s, %s, %s)' % (
self.transport,
self.address,
self.kind,
self.role,
self.serializer,
)
def __hash__(self):
return (
hash(self.transport)
^ hash(self.address)
^ hash(self.kind)
^ hash(self.role)
^ hash(self.serializer)
)
def __eq__(self, other):
if not isinstance(other, AgentAddress):
return False
return (
self.transport == other.transport
and self.address == other.address
and self.kind == other.kind
and self.role == other.role
and self.serializer == other.serializer
)
def twin(self):
"""
Return the twin address of the current one.
While the `host` and `port` are kept for the twin, the `kind` and
`role` change to their corresponding twins, according to the
rules defined in the respective classes.
Returns
-------
AgentAddress
The twin address of the current one.
"""
kind = self.kind.twin()
role = self.role.twin()
return self.__class__(
self.transport, self.address, kind, role, self.serializer
)
class AgentChannelKind(str):
"""
Agent's channel kind class.
This kind represents the communication pattern being used by the agent
channel: ASYNC_REP, STREAM...
"""
TWIN = {
'ASYNC_REP': 'ASYNC_REQ',
'ASYNC_REQ': 'ASYNC_REP',
'SYNC_PUB': 'SYNC_SUB',
'SYNC_SUB': 'SYNC_PUB',
}
def __new__(cls, kind):
if kind not in cls.TWIN.keys():
raise ValueError('Invalid channel kind "%s"!' % kind)
return super().__new__(cls, kind)
def twin(self):
"""
Get the twin kind of the current one.
``REQ`` would be the twin of ``REP`` and viceversa, ``PUB`` would be
the twin of ``SUB`` and viceversa, etc.
Returns
-------
AgentChannelKind
"""
return self.__class__(self.TWIN[self])
class AgentChannel:
"""
Agent channel information.
Channels are communication means with sender and receiver in both sides
(i.e.: PULL+PUB - PUSH-SUB or PULL+PUSH - PUSH+PULL).
Parameters
----------
kind : AgentChannelKind
Agent kind.
sender : str
First AgentAddress.
receiver : str
Second AgentAddress.
Attributes
----------
kind : AgentChannelKind
Agent kind.
sender : str
First AgentAddress.
receiver : str
Second AgentAddress.
"""
def __init__(self, kind, receiver, sender, twin_uuid=None):
self.kind = AgentChannelKind(kind)
self.receiver = receiver
self.sender = sender
self.transport = receiver.transport if receiver else sender.transport
self.serializer = (
receiver.serializer if receiver else sender.serializer
)
self.uuid = unique_identifier()
self.twin_uuid = twin_uuid
# Set up pairs
if sender:
self.sender.channel = self
if receiver:
self.receiver.channel = self
def __repr__(self):
"""
Return the string representation of the AgentChannel.
Returns
-------
representation : str
"""
return 'AgentChannel(kind=%s, receiver=%s, sender=%s)' % (
self.kind,
self.receiver,
self.sender,
)
def __hash__(self):
return hash(self.kind) ^ hash(self.receiver) ^ hash(self.sender)
def __eq__(self, other):
if not isinstance(other, AgentChannel):
return False
return (
self.kind == other.kind
and self.receiver == other.receiver
and self.sender == other.sender
)
def twin(self):
"""
Get the twin channel of the current one.
Returns
-------
AgentChannel
The twin channel.
"""
kind = self.kind.twin()
sender = self.receiver.twin() if self.receiver is not None else None
receiver = self.sender.twin() if self.sender is not None else None
return self.__class__(
kind=kind, receiver=receiver, sender=sender, twin_uuid=self.uuid
)
|
import numpy as np
class MLP:
"""The class is the implementation of multi layer perception (or neural network)."""
def __init__(self, hidden_layer_sizes=(5,), activation=np.tanh, learning_rate=0.1, max_iter=2000, tol=0.05,
momentum=0.9, random_seed=1985):
"""
Args:
hidden_layer_sizes(tuple): A tuple represents the number of neurons for each hidden layers. Default: (5,).
activation (callable): This is the activation function. Default: np.tanh.
learning_rate (float): The learning used in back-propagation. Default: 0.1.
max_iter (int): The maximum number of iterations allowed. Default: 2000.
"""
self.hidden_layer_sizes = hidden_layer_sizes
self.activation = activation
self.learning_rate = learning_rate
self.max_iter = max_iter
self.tol = tol
self.momentum = momentum
self.random_seed = random_seed
self.weights = None
self._product_sums = None
self._outputs = None
self._derivatives = None
self._errors = None
self._inputs = None
self._velocity = None
@staticmethod
def d_tanh(x):
"""Derivative function of tanh."""
return 1 - (np.tanh(x)) ** 2
def _initiate_weights(self, input_dimension):
"""This function initiates the weights in the neural network based on the parameters passed to the class."""
weights = []
for layer in range(len(self.hidden_layer_sizes) + 1):
# When layer = 0, this is the weight connecting the input to the first hidden layer
if layer == 0:
temp = np.random.rand(input_dimension + 1, self.hidden_layer_sizes[0]) - 0.5
weights.append(temp)
# When layer = self.n_hidden_layers, then this weight connects the last hidden layer and the output layer.
elif layer == len(self.hidden_layer_sizes):
temp = np.random.rand(self.hidden_layer_sizes[len(self.hidden_layer_sizes)-1] + 1, 1) - 0.5
weights.append(temp)
else:
temp = np.random.rand(self.hidden_layer_sizes[layer-1]+1, self.hidden_layer_sizes[layer]) - 0.5
weights.append(temp)
self.weights = weights
def _feed_forward(self, x):
"""Feed-forward the data from the input layer to the output layer via the hidden layers
Args:
x (array): The array represents the data of a sample
"""
self._outputs = []
self._product_sums = []
self._inputs = np.append(x, 1).reshape(1, -1)
# feed the signal forward
for layer in range(len(self.hidden_layer_sizes) + 1):
if layer == 0:
product_sum = self._inputs @ self.weights[layer]
else:
product_sum = self._outputs[layer - 1] @ self.weights[layer]
self._product_sums.append(product_sum)
if layer == len(self.hidden_layer_sizes):
output = product_sum
else:
output = np.append(self.activation(product_sum), 1).reshape(1, -1)
self._outputs.append(output)
def predict(self, X):
"""This function calculates the prediction based on the calculated weights"""
y_predict = []
for observation in range(X.shape[0]):
self._feed_forward(X[observation, :])
y_predict.append(np.asscalar(self._outputs[-1]))
return y_predict
@staticmethod
def _calculate_mse(y_predict, y_true):
"""This function calculates the mean square error after feed-forwarding the network
Args:
y (array): The values of the true output.
"""
mse = 0.5 * np.dot(y_predict - y_true, y_predict - y_true) / len(y_true)
return mse
def _back_propagation(self, y):
"""This function runs the back propagation to transmit the error from the output layer to the input layer via
the hidden layers.
Args:
y (array): The data contains the true values.
"""
y_predict = self._outputs[-1]
self._errors = []
self._derivatives = []
for layer in range(len(self.hidden_layer_sizes), -1, -1):
if layer == len(self.hidden_layer_sizes):
error = (y_predict-y)
else:
error = self.d_tanh(self._product_sums[layer]) * \
(self._errors[len(self.hidden_layer_sizes) - layer - 1] @ self.weights[layer + 1][:-1, :].T)
if layer == 0:
derivative = self._inputs.T @ error
else:
derivative = self._outputs[layer - 1].T @ error
self._derivatives.append(derivative)
self._errors.append(error)
def _update_weights(self):
"""This function uses the calculated results in back propagation to update the weights"""
if self.momentum is not None:
# initiate the velocity
self._velocity = [np.zeros_like(v) for v in self.weights]
for layer in range(len(self.hidden_layer_sizes) + 1):
self._velocity[layer] = self.momentum * self._velocity[layer] \
- self.learning_rate * self._derivatives[-1 - layer]
self.weights[layer] += self._velocity[layer]
else:
for layer in range(len(self.hidden_layer_sizes) + 1):
self.weights[layer] -= self.learning_rate * self._derivatives[-1 - layer]
def _should_stop(self, mse, iters):
if mse is None:
return False
else:
if mse < self.tol:
return True
if iters > self.max_iter:
return True
return False
def fit(self, X, y=None):
"""To fit the neural network using X and y.
Args:
X (array): The data stores the attributes.
y (array): The data stores the labels.
"""
np.random.seed(self.random_seed)
self._initiate_weights(X.shape[1])
iters = 0
mse = None
while not self._should_stop(mse, iters):
iters += 1
for observation in range(X.shape[0]):
x_s, y_s = X[observation, :], y[observation]
self._feed_forward(x_s)
self._back_propagation(y_s)
self._update_weights()
indexes = np.random.permutation(X.shape[0])
X, y = X[indexes, :], y[indexes]
y_predict = self.predict(X)
mse = self._calculate_mse(y_predict, y)
print(iters, '-iters, mse: ', mse, 'predict: ', y_predict)
if __name__ == "__main__":
import numpy as np
testobject = MLP(hidden_layer_sizes=(2, ), tol=0.05, max_iter=5000,
random_seed=315, momentum=0.9)
X = np.array([[-0.5, -0.5], [0.5, -0.5], [-0.5, 0.5], [0.5, 0.5]])
y = np.array([-0.5, 0.5, 0.5, -0.5])
testobject.fit(X, y)
|
from array import *
def runRecursive(array):
return runRecursiveHelper(array, 0)
def runRecursiveHelper(array, index):
# base case
if not array:
return []
elif (len(array) == 1):
return sorted(array[0])
elif (len(array) == 2):
return sorted(array[0])
# normal case
elif (index < len(array)-2):
newArray = sorted(array[index])
index += 1
'''
#finalArray = newArray + runRecursiveHelper(array, index)
#print "Final Array: "
print finalArray
return finalArray
'''
return newArray + runRecursiveHelper(array, index)
else:
return run
def runIterative():
array = [[3, 2, 1], [4, 6, 5], [], [9, 7, 8]]
print array
newArray = []
for row in array:
if not row:
pass
temp = sorted(row)
newArray += temp
print newArray
def main():
runIterative()
'''
array = [[3, 2, 1], [4, 6, 5], [], [9, 7, 8]]
print array
print "Array[0]"
print array[0]
print "Array[1]"
print array[1]
print "Array[2]"
print array[2]
print "Array[3]"
print array[3]
print ""
tempArray = []
print "Array[0] sorted"
tempArray = sorted(array[0])
print tempArray
print "Array[1] sorted"
tempArray = sorted(array[1])
print tempArray
print "Array[2] sorted"
tempArray = sorted(array[2])
print tempArray
print "Array[3] sorted"
tempArray = sorted(array[3])
print tempArray
print ""
tempArray = sorted(array[0]) + sorted(array[1]) + sorted(array[2]) + sorted(array[3])
print tempArray
#runRecursive(array)
'''
if __name__ == "__main__":
main()
|
def slices(source, length):
if source == "":
raise ValueError("Source series cannot be blank.")
elif length < 0:
raise ValueError("Length of slices cannot be less than 1.")
elif length > len(source):
raise ValueError("Source length needs to be longer than your requested slice.")
source_length = len(source)
return [
source[index : index + length] for index in range(source_length - length + 1)
]
print(slices("123456", 3))
|
class Person:
def __init__(self, _name: str, _age: int, _gender: str, _money: float):
self.name: str = _name
self.age: int = _age
self.gender: str = _gender
self.money: float = _money
def get_name(self) -> str: return self.name
def get_age(self) -> int: return self.age
def get_gender(self) -> str: return self.gender
def pay_money(self, pay_amount) -> float: self.money -= pay_amount
def recieve_money(self, rec_amount) -> float: self.money += rec_amount |
'''
Created on Feb 22, 2016
@author: MADHUSUDAN
'''
def gcd(m,n):
while m%n!=0:
old_m=m
old_n=n
m=old_n
n=old_m%old_n
return n
class Fraction(object):
'''
classdocs This is a simple class program that defines about abstract datatypes here we are implementing
a fraction function which is mainly used to show fraction in their exact form
'''
def __init__(self, top,bottom):
'''
Constructor defines the way in which the data-objects are created
'''
self.num=top
self.den=bottom
def show(self):
print self.num,"/",self.den
def __str__(self): #overriding a standard string method of the class to print the value of the class
return str(self.num)+"/"+str(self.den)
def __add__(self,other_fraction):
new_num = self.num * other_fraction.den + self.den * other_fraction.num
new_den = self.den * other_fraction.den
common=gcd(new_num,new_den)
return Fraction(new_num/common,new_den/common)
def __sub__(self,other_fraction):
new_num = self.num * other_fraction.den - self.den * other_fraction.num
new_den = self.den * other_fraction.den
common=gcd(new_num,new_den)
return Fraction(new_num/common,new_den/common)
def __mul__(self,other_fraction):
new_num = self.num * other_fraction.num
new_den = self.den * other_fraction.den
common=gcd(new_num,new_den)
return Fraction(new_num/common,new_den/common)
def __div__(self,other_fraction):
new_num = self.num * other_fraction.den
new_den = self.den * other_fraction.num
common=gcd(new_num,new_den)
return Fraction(new_num/common,new_den/common)
def __eq__(self,other_function):
new_val1 = self.num * other_function.den
new_val2 = self.den * other_function.num
return new_val1==new_val2
def __gt__(self,other_function):
new_val1 = self.num * other_function.den
new_val2 = self.den * other_function.num
return new_val1 > new_val2
def __lt__(self,other_function):
new_val1 = self.num * other_function.den
new_val2 = self.den * other_function.num
return new_val1 < new_val2
''''Creating the instance of the Fraction class i.e objects and constructor is invoked'''
my_fraction_1 = Fraction(1,2)
my_fraction_2 = Fraction(1,4)
#print my_fraction_1 # This can only show the actual reference that it is stored in the variable i.e the address of the object my_fraction
#my_fraction_1.show()
print my_fraction_1 + my_fraction_2
if my_fraction_1==my_fraction_2:
print "Works :D"
print my_fraction_1 < my_fraction_2
print my_fraction_1 > my_fraction_2
print my_fraction_1 * my_fraction_2
print my_fraction_1 / my_fraction_2
#print 12 % 14
|
from __future__ import division
# Newtons method to find root
def sqtr_n(n):
root= n/2
for _ in range(20):
root=1/2 *((root)+ (n/root))
return root
print sqtr_n(4) |
#This file contains all conversion functions we will need for this project
#Summary: This function converts a decimal value to binary format
#Precondition: Pass in a decimal value
#Postcondition: Returns binary string of decimal value passed in
def convertToBinary(dVal):
binNum = "{0:b}".format(dVal)
return binNum
#Summary: This function converts a character to its correlated asci value
#Preconditiion: Pass in a character
#Postcondtion: Returns decimal number representing Asci value of that character
def convertToAsci(ch):
asciVal = ord(ch)
return asciVal
#Summary: This function converts a binary value to decimal format
#Precondiition: Pass in binary value
#Postcondtion: Returns decimal value representing the binary value passed in
def convertToDecimal(bVal):
decVal = int(bVal, 2)
return decVal
#Summary: returns the binary number needed with all 0
lengthx=len(convertToBinary(10))
def checkEight (lengthx):
neededZeros=8-lengthx
for i in range (0, neededZeros):
result=result+'0'
binNumber=result+binNum
return binNumber
|
def binery_search(kumpulan, target) :
kiri = 0
kanan = len(kumpulan) - 1
while kiri <= kanan :
tengah = (kiri + kanan) // 2
if kumpulan[tengah] == target :
return tengah
elif kumpulan[tengah] < target :
kiri = tengah + 1
else :
kanan = tengah - 1
return -1
d = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30]
|
def towerofhanoi(n,a,b,c):
if n==1: # base case
print(a, c)
return
towerofhanoi(n-1,a,c,b)
print(a, c)
towerofhanoi(n-1,b,a,c)
n=int(input())
towerofhanoi(n, 'a', 'b', 'c') |
## Tricky question:output as follows
## 1
## 212
## 32123
## 4321234
n = int(input())
i = 1
while(i<=n):
space = n - i
while (space > 0):
print(end=" ")
space = space - 1
j=i
#increasing sequence
while(j>0):
print(j,end="")
j= j-1
#decreasing sequence
j=2
while (j <= i):
print(j, end="")
j = j + 1
print()
i=i+1
|
n = int(input())
for i in range(1,n+1,1):
for j in range(1, i, 1):
print(' ',end="")
for l in range(i, n+1, 1):
print(l,end="")
print()
for i in range(1,n,1):
for j in range(n-i, 1, -1):
print(' ',end="")
for l in range(n-i, n+1, 1):
print(l,end="")
print()
|
#simple example:
i=1
while i<10:
print(i)
i=i+1
else:
print("this will be printeed at the end only once")
#complex example with Break as well:
i=1
while i<10:
if i == 5:
break
print(i)
i=i+1
else:
print("i has reached 5 !!") ## this time it will not be executed bcoz we used for!!
#if you still wanna print at end:
i=1
while i<10:
if i == 5:
break
print(i)
i=i+1
print("i has reached 5 !!") |
def sort(array):
i=0
while i < 3:
low = i
j=0
while j< len(array):
if array[j] == low:
print(low,end=" ")
j+=1
i+=1
N = int(input())
if N<=1:
print()
else:
arr = [int(x) for x in input().split()]
sort(arr) |
# output:
# 1
# 232
# 34543
# 4567654
n = int(input())
for i in range(1,n+1,1):
for space in range(1,(n+1)-i,1):
print(' ',end="")
for j in range(i,i*2,1):
print(j,end="")
for j in range(2*i-2,i-1,-1):
print(j,end="")
print()
|
#_____________________________# Array intersection !!
def unique_arr(list_1,list_2):
i = 0
while i<N1:
count = 0
j = 0
while j<N2:
if list_1[i] == list_2[j]:
list_2[j] = 9999999999
count = count + 1
break
j=j+1
if count >= 1:
print(list_1[i])
i=i+1
N1 = int(input())
if N1<=1:
print()
else:
li1 = [int(x) for x in input().split()]
N2 = int(input())
if N2<=1:
print()
else:
li2 = [int(x) for x in input().split()]
unique_arr(li1,li2) |
def countDuplicate(string):
newstr = ""
string += ' '
l = len(string)
count = 1
for i in range(l-1):
if string[i] == string[i+1]:
count += 1
else:
print(string[i],end="")
if count > 1:
print(count, end="")
count = 1
print(string[-1])
#main
str = input()
if len(str) <= 1:
print()
else:
countDuplicate(str) |
#_____________________________________________________# introduction:
li = [[1,2,3,4],[5,6,7,8],[9,10,11,12],[13,14,15,16]]
print(li[2][3])
#_________________________________________________# storing data in 2D lists :
li = [[1,2,3,4],[5,6,7,8]]
print(li[0])
type(li[0])
li[0][1] = 4
print(li[0])
print(id(li))
print(id(li[0]))
print(id(li[1]))
#_________________________________________________# list of different types : not a 2D list :
li[1] = 'parikh' # this is no more a 2D list now !!!
print(li) |
# Ability to take multiple forms { METHOD OVERIDING }
# order of print in python : 1.=> lowest class 2.=> parent class => .....=> parent's parent's parent's.......class !
class Vehicle:
def __init__(self,color,maxSpeed):
self.color = color
self._maxSpeed = maxSpeed
@classmethod
def getMaxSpeed(cls):
return 15
def setMaxSpeed(self,maxSpeed):
self._maxSpeed = maxSpeed
def print(self): # when both functions are called print
print("Color :" ,self.color)
print("MaxSpeed :",self._maxSpeed)
class Car(Vehicle):
def __init__(self,color,maxSpeed,numGears,isConvertible):
super().__init__(color,maxSpeed)
self.numGears = numGears
self.isConvertible = isConvertible
def print(self): # when both functions are called print
# Correct : super().print() # to print the parent class first we use super().print()
# Incorrect : self.print() # we cannot use this to print parent class first : it will send the function into an infinite loop !
# because self.print() again would go to the lowest class which is car......and so on....
print("Color :" ,self.color)
print("MaxSpeed :",self._maxSpeed)
print("NumGears :",self.numGears)
print("IsConvertible :", self.isConvertible)
# c = Car("red",15,3,False)
# c.print()
#print()
v = Vehicle("red",18)
v.print()
print() |
def linear_search(list,element):
for i in range(len(list)):
if list[i]==element:
return i
break
else:
return -1
li = [1,2,3,4,5]
index = linear_search(li,3) # using function linear search
print("index =",index) |
#________________________________________________________# new list method !#_______________________________________________________________________________________#
# given a list : check if sorted :
#Base case : list of size 0 or 1 (is by default sorted!) : should return TRUE (induction hypothesis)
# check if [0]>[1] : if yes return FAlSE (list not sorted)
# now to check list of size L-1: sorted or not
def isSorted(arr):
l = len(arr)
if l==0 or l==1: # base case 1
return True
if arr[0]>arr[1]: # base case 2
return False
# if NOT then : check base case 2 for : 1 till the end of list again, then, again then, again till true or list ends (restofList) !
restofList = arr[1:] # new list every time function is called !
isSorted(restofList)
# when all cases have passed through base case 2
if isSorted(restofList):
return True
else:
return False
a = (1,2,3,4,5,6,9,8)
print(isSorted(a)) |
#_______________________________________________________________#square of elements :
li = [1,2,3,4]
#_____________________________# long way :
li_new=[]
for ele in li:
li_new.append(ele**2)
print(li_new)
#_______________________________________________________________# using comprehension now :
li_new_c=[ele**2 for ele in li]
print(li_new_c)
#___________________________________________________________# printing square of even elements only :
li_even_c=[ele**2 for ele in li if ele%2==0]
print(li_even_c)
# basically long 5-6 line codes have been reduced to a single line !!
#_______________________________________________________# multiple conditions : ex multiple of 6 :
li = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18]
li_6=[ele for ele in li if ele%2==0 and ele%3==0] # any number of conditions allowed !!
print(li_6)
#________________________________________________________________# multiple FOR LOOPS :
li1 = [1,2,3,4,5,6]
li2 = [2,4,6,7]
li_inter = []
for ele in li2:
for ele1 in li1:
if ele==ele1:
li_inter.append(ele)
print(li_inter)
# OR :
li_inter_c = [ele for ele in li1 for ele1 in li2 if ele==ele1]
print(li_inter_c)
#____________________________________________________# ex: if multiple of 2 give square else print same element :
li = [1,2,3,4,5,6]
li_new = [ele**2 if ele%2==0 else ele for ele in li]
print(li_new)
|
n = int(input()) # no of elements
li = [int(x) for x in input().split()] # elements in the list
print(li)
ele = int(input()) # element to be searched
isFound = False
for i in range(len(li)): # searches elements in the list
if li[i] == ele:
print(i)
isFound = True
break
if isFound is False:
print(-1)
|
#_____________________________________________________________# Sum of 1st n natural numbers !_________________________________________________________________________
#concept : E(n) = n + E(n-1) ______where n is the base case and rest is the follow.
def sum_n(n):
if n == 0:
return 0
smalloutput = sum_n(n-1)
return (n + smalloutput)
n = int(input())
print(sum_n(n))
# similarly !
def fact(n):
if n == 0:
return 1
smallfact = fact(n-1)
return n * smallfact
n = int(input())
print(fact(n)) |
n = int(input())
i=1
x=2
print(1)
while(i<n):
j=1
print(1,end="")
while(j<i):
print(x,end="")
j=j+1
print(1,end="")
print()
i=i+1 |
#______________________________________________# SWAPPING !!
def reverse(list):
n = len(list)
for i in range(0,n//2,1):
li[n-i-1],list[i] = li[i],list[n-i-1] ## this is the syntax
li = [1,2,3,4,5,6,7]
reverse(li)
print(li)
# output : [7, 6, 5, 4, 3, 2, 1]
#___________________________________________# swapping alternate !!
def reverse_alt(list):
if x%2==0:
n = len(list)
for i in range(0,n,2):
list[i+1],list[i] = list[i],list[i+1] ## this is the syntax
for i in range(n):
print(li[i],end=" ")
else:
n = len(list)
for i in range(0,n-1,2):
list[i+1],list[i] = list[i],list[i+1] ## this is the syntax
for i in range(n):
print(li[i],end=" ")
x = int(input())
if x==0:
print()
else:
li = [int(x) for x in input().split()]
reverse_alt(li)
|
# Two main concepts in OOPS:
# 1. classes
# 2. Objects
# way to create a class : and a function inside it :
class student:
def __init__(self,name,age):
self.name = name
self.age = age
s1 = student("abc",16)
s2 = student("def",17)
# display s1 and s2 address:
print(s1,s2)
# print type of anything (obj,class,list,string)
print(type(s1))
#print all info about
print(s1.__dict__)
# check if object has attribute:
print(hasattr(s1,"name")) # checks if s1 has name attribute.
#display the attribute:
print(getattr(s1,"name"))
#delete the attribute you want to remove
delattr(s2,"name")
print(s2.__dict__) |
class Solution(object):
def isValid(self, s):
"""
:type s: str
:rtype: bool
"""
#if len(s) < 1 or len(s) > 10^4 or any(c not in '()[]{}' for c in s):
#print('False')
temp = []
for para in s:
if para == "(" or para == "[" or para == "{":
temp.append(para)
elif temp and para == ")" and temp[-1] == "(":
temp.pop()
elif temp and para == "]" and temp[-1] == "[":
temp.pop()
elif temp and para == "}" and temp[-1] == "{":
temp.pop()
else:
return False
if temp == []:
return True
ValidParentheses = Solution()
ValidParentheses.isValid("()}}}")
# ValidParentheses.isValid("()[]{}")
# ValidParentheses.isValid("(]")
# ValidParentheses.isValid("(]")
# ValidParentheses.isValid("{[]}") |
# -*- coding:utf-8 -*-
from collections import namedtuple
# 问题描述:
student = ('Jim', 16, 'male', 'jim8721@mail.com')
NAME, AGE, SEX, MAIL = range(4)
# NAME = 0
# AGE = 1
# SEX = 2
# EMAIL = 3
# name
print(student[NAME])
# age
if student[1] >= 18:
print(student[AGE])
# sex
if student[2] == 'male':
print(student[SEX])
# 解决方案:
Student = namedtuple('Student', ['name', 'age', 'sex', 'email'])
s = Student('Jim', age=16, sex='male', email='jim8721@gmail.com')
s2 = Student(name='JIm', age=16, sex='male', email='jim123@gmail.com')
print(s.name)
print(s2.age)
|
'''
如何实现可迭代对象和迭代器对象
问题:如果一次抓取所有信息,第一有延时,另外浪费存储空间
方案:使用“用时访问”策略,能够把问题封装到一个对象中,然后进行迭代
解决:
1. 实现可迭代对象 next 方法每次返回一个城市气温
2. 实现可迭代对象 __iter__ 方法 返回一个迭代器对象
'''
from collections import Iterable, Iterator
import requests
class WeatherIterator(Iterator):
def __init__(self, cities):
self.cities = cities
self.index = 0
def getWeather(self, city):
r = requests.get(u'http://wthrcdn.etouch.cn/weather_mini?city=' + city)
data = r.json()['data']['forecast'][0]
return ('%s: %s, %s' % (city, data['low'], data['high']))
def __next__(self):
if self.index == len(self.cities):
raise StopIteration
city = self.cities[self.index]
self.index += 1
return self.getWeather(city)
class WeatherIterable(Iterable):
def __init__(self, cities):
self.cities = cities
def __iter__(self):
return WeatherIterator(self.cities)
for x in WeatherIterable(['北京', '上海', '广州']):
print(x) |
'''
问题:如何解析简单的xml文档?
实际案例
xml是一种十分常用的标记性语言,可提供统一的方法来描述应用程序的结构化数据:
<?xml version="1.0" ?>
<data>
<country name="Liechtenstein">
<rank updated="yes">2</rank>
<year>2008</year>
<gdppc>141100</gdppc>
<neighbor name="Austria" direction="E" />
<neighbor name="Switzer Land" direction="M" />
</country>
<country name="Singapore">
<rank updated="yes">3</rank>
<year>2011</year>
<gdppc>141100</gdppc>
<neighbor name="Austria" direction="E" />
<neighbor name="Switzer Land" direction="M" />
</country>
</data>
解决方案:
使用标准库中的xml.etree. ElementTree,其中的parse函数可以解析xml文档。
'''
from xml.etree.ElementTree import parse
f = open('demo.xml')
et = parse(f)
root = et.getroot() # tag attrib text
for child in root:
print(child.get('name'))
print(root.find('country'))
print(root.findall('country'))
for e in root.iterfind('country'):
print(e.get('name'))
print(list(root.iter('rank')))
# 查找的高级用法
root.findall('country/*') #\ * 匹配任意
root.findall('.//rank') #\ // 所有子集
root.findall('.//rank/..') #\ .. 父级
root.findall('country[@name]') #\ @attrib 属性
root.findall('country[@name="Liechtenstein"]') #\ @attrib="value" 属性 值
root.findall('country[rank]') #\ tag 标签
root.findall('country[rank="2"]') #\ tag="text" 标签内容
root.findall('country[.="2"]') #\ .="text" 所有内容
root.findall('country[1]') #\ position int last() last() - 1 |
###################################_STACK_USING_OOPS###################################################################
class Stack():
def __init__(self):
self.list = []
def empty(self):
return self.list ==[]
def push (self,item):
self.list.append(item)
def pop(self):
return self.list.pop()
def peek(self):
return self.list[len(self.list)-1]
def print(self):
print(self.list)
def size(self):
return len(self.list)
#s = Stack()
#s.push(10)
#s.push(20)
#s.push(30)
#print("The top ofthe stack is",s.peek())
#######################################_INFIX_TO_POSTFIX_USING_STACK_###########################################
def infix_to_postfix(s):
precedence={'/':2,'*':2,'+':1,'-':1,'(':4,')':4,'^':3}
lis =list(s.split())
post_lis =[]
s = Stack()
#print(lis)
for i in lis:
#print(i,post_lis)
if((i in '0123456789')or (i in 'ABCDEFGHIJKLMNOPQRSTUVWYYZ')): #Operand
post_lis.append(i)
else: #Operator
#Check for the bracket terms
if(i==')'):
while(s.peek()!='('):
post_lis.append(s.pop())
s.pop() #Removes the '(' operaator
else:
#Pop all the element in the stack with equal or higher precedence
while((s.empty()==False) and (precedence[i]<=precedence[s.peek()])and (s.peek()!='(')):
post_lis.append(s.pop())
s.push(i)
#As the expression is over so I have to pop all the operator in the Stack into post_lis
while(s.empty()==False):
post_lis.append(s.pop())
#Convert postfix list to postfix string
post_str =''
for i in post_lis:
post_str +=' '+ i
return(post_str)
s ="A * B + C * D"
s2 ="( A + B ) * C - ( D - E ) * ( F + G )"
print(infix_to_postfix(s2))
def operation(op1,op2,op):
if(op =='+'):
return str(op1 + op2)
elif(op == '-'):
return str(op1 - op2)
elif(op == '/'):
return str(op1/op2)
elif(op == '*'):
return str(op1*op2)
elif(op == '^'):
return str(op1**op2)
def postfix_evaluation(st):
s_eval = Stack()
lis = list(st.split())
#print("List is",lis)
for i in lis:
if(i in "0123456789"):
s_eval.push(i)
else:
#Operators
operator = i
b = s_eval.pop() # The 1st element poped out is the 2nd operand
a = s_eval.pop() # The 2nd element popped out is the 1st operand
c = operation(int(a),int(b),operator)
s_eval.push(c)
postfix_eval =s_eval.peek()
return postfix_eval
str1 ='7 8 + 3 2 + /'
str2 ='5 * 3 ^ ( 4 - 2 )'
s=infix_to_postfix(str2)
print(postfix_evaluation(s))
#########################_BASE_CONVERSION_USING_STACK_#############################################################
def base_conversion(num,base):
st = Stack()
while(num>0):
rem = num % base
st.push(rem)
num = num // base
string = ''
while(st.empty()==False):
string += str(st.pop())
print(string)
num = 42
base = 8
base_conversion(num,base)
###################################################################################################################
|
import pandas
import json
'''
Boyer Moore String Search implementation in Python
__author__: Pawda
Date: 2016-04-04
'''
# method bad character to find the shift bit. len(ref) == len(read)
def bad_cha(ref,read):
for i in range(1, len(read)+1):
if not read[-i] == ref[-i]:
for j in range(1,len(read)-i+1):
if ref[-i] == read[-(i+j)]:
return j
return len(read)-i+1
return "success"
# method good suffix to find the shift bit. len(ref) == len(read)
def good_suffix(ref,read):
suffix = ""
# check if ref == read: if not, get the suffix
for i in range(1, len(read)+1):
if read[-i] == ref[-i]:
suffix = read[-i:]
else:
i-=1
break
if suffix == read:
return "success"
post = read[:-i]
l_suffix = len(suffix)
# check if the suffix occurs in the post part
for j in range(len(post)-l_suffix+1):
if j == 0:
if post[-(j+l_suffix):] == suffix:
return j+l_suffix
else:
if post[-(j+l_suffix):-j] == suffix:
return j+l_suffix
# check if the suffix of suffix occurs in the prefix of prefix
for k in range(l_suffix-1):
if suffix[-(l_suffix-k-1):] == post[:l_suffix-k-1]:
return len(read)-(l_suffix-(k+1))
# if no other matches we can directly move to the last part
return len(read)
# this is the main function to find the ***matches*** by BM algorthm
def search(ref,read):
r_list = []
i = 0
l_read = len(read)
while i+l_read <= len(ref):
result = bad_cha(ref[i:l_read+i],read)
if result == "success":
n_dict = dict()
n_dict['alignment'] = read
n_dict['No'] = i+1
r_list.append(n_dict)
i += l_read
continue
else:
result_gs = good_suffix(ref[i:l_read+i],read)
result = max(result_gs,result)
i += result
print r_list
# main execution of the python program.
if __name__ == "__main__":
tw = pandas.read_csv('mini_twitter.csv')
for each_value in tw.value:
result = json.loads(each_value)
pattern = "the"
search(result['text'],pattern)
|
# Advent of Code 2017
# https://adventofcode.com/2017
# Day 2
# https://adventofcode.com/2017/day/2
"""
As you walk through the door, a glowing humanoid shape yells in your direction. "You there! Your state appears to be
idle. Come help us repair the corruption in this spreadsheet - if we take another millisecond, we'll have to display an
hourglass cursor!"
The spreadsheet consists of rows of apparently-random numbers. To make sure the recovery process is on the right track,
they need you to calculate the spreadsheet's checksum. For each row, determine the difference between the largest value
and the smallest value; the checksum is the sum of all of these differences.
For example, given the following spreadsheet:
5 1 9 5
7 5 3
2 4 6 8
The first row's largest and smallest values are 9 and 1, and their difference is 8.
The second row's largest and smallest values are 7 and 3, and their difference is 4.
The third row's difference is 6.
In this example, the spreadsheet's checksum would be 8 + 4 + 6 = 18.
What is the checksum for the spreadsheet in your puzzle input?
--- Part Two ---
"Great work; looks like we're on the right track after all. Here's a star for your effort." However, the program seems
a little worried. Can programs be worried?
"Based on what we're seeing, it looks like all the User wanted is some information about the evenly divisible values in
the spreadsheet. Unfortunately, none of us are equipped for that kind of calculation - most of us specialize in bitwise
operations."
It sounds like the goal is to find the only two numbers in each row where one evenly divides the other - that is, where
the result of the division operation is a whole number. They would like you to find those numbers on each line, divide
them, and add up each line's result.
For example, given the following spreadsheet:
5 9 2 8
9 4 7 3
3 8 6 5
In the first row, the only two numbers that evenly divide are 8 and 2; the result of this division is 4.
In the second row, the two numbers are 9 and 3; the result is 3.
In the third row, the result is 2.
In this example, the sum of the results would be 4 + 3 + 2 = 9.
What is the sum of each row's result in your puzzle input?
"""
import pandas as pd
import numpy as np
checksum_input = '02_input.tsv'
test_input_1 = '02_test_input_1.tsv'
test_input_2 = '02_test_input_2.tsv'
def calculate_checksum_1(data):
df = pd.read_csv(data, sep='\t', header=None)
nums = df.apply(lambda x: x.max() - x.min(), axis=1).astype(np.int)
return nums.sum()
def calculate_checksum_2(data):
df = pd.read_csv(data, sep='\t', header=None)
nums = df.apply(lambda x: find_nums(x), axis=1)
return sum(nums)
def find_nums(row):
sorted_row = row.sort_values().tolist()
for i, val_1 in enumerate(sorted_row):
for j, val_2 in enumerate(sorted_row[i + 1:]):
if val_2 % val_1 == 0:
num = val_2 // val_1
return num
if __name__ == '__main__':
# ------ Part 1 ------ #
assert calculate_checksum_1(test_input_1) == 18
# display my solution
print('Part 1: ', calculate_checksum_1(checksum_input))
# ------ Part 2 ------ #
assert calculate_checksum_2(test_input_2) == 9
# display my solution
print('Part 2: ', calculate_checksum_2(checksum_input))
|
import requests
from os import path
from pprint import pprint
from collections import Counter
def getPythonSite():
r = requests.get('https://www.python.org')
# print(r.content)
# Count # of spans in r.content
s = f'span count: {str(r.content).count("span")}'
print(s)
f = open('newFile.txt', 'w')
f.write(s)
f.close()
def getMobyDick():
"""
Download Moby Dick
"https://www.gutenberg.org/files/76/76-0.txt"
len == 691343
"""
# Implement file check
if path.exists("mobyDick.txt"):
print("Already downloaded!")
return
# f = open('mobyDick.txt', 'r')
# s = f.read()
r = requests.get('https://www.gutenberg.org/files/76/76-0.txt')
a = r.text.replace("\n", ' ').replace("\r", ' ')
# f = open('mobyDick.txt', 'w')
# f.write(str(r.content))
# f.close()
with open('mobyDick.txt', 'w') as f:
pprint(dir(f))
f.write(a)
def readBook(s):
""" count 10 most common words
Return dictionary
{
"word": 102
}
"""
with open(s) as f:
s = f.read()
words = s.split()
# create data structure of 10
# most common words with counts
c = Counter(words)
pprint(c.most_common(10))
print(f"Count a: {s.count(' a ')}")
if __name__ == "__main__":
# getPythonSite()
getMobyDick()
readBook('mobyDick.txt')
|
def spiral(n,a): # passed all pubic cases and passed all private cases
r=0 #row start
c=0 #col start
m=n #col end
n=n#row end
while(r<n and c<m):
for i in range(r,n):#first col
if(r==n-1 and c==m-1):
print(a[i][c],end="")
else:
print(a[i][c],end=" ")
c += 1
for i in range(c,m):
print(a[n-1][i],end=" ")
n -= 1
for i in range(n-1, r-1, -1):
print(a[i][m-1],end=" ")
m -= 1
for i in range(m-1 , c-1 , -1):
print(a[r][i],end=" ")
r += 1
n = int(input())
matrix = []
for i in range(n):
x= input()
x = [int(i) for i in x.split()] #creates a 1D array of integers from input string of numbers separated by space
matrix.append(x)
spiral(n,matrix)
|
import random
n = int(input())
arr = []
for i in range(0,n):
ele = int(input())
arr.append(ele)
def isSorted(arr):
flag = 1
for i in range(0,len(arr)):
if(i<len(arr)-1 and arr[i]>arr[i+1]):
flag = 0
return flag
def swap(arr,i,j):
arr[i],arr[j] = arr[j],arr[i]
return arr
list1 = list(range(0,n))
list2 =list(range(0,n))
while( isSorted(arr) != 1):
i = random.randint(0,len(list1)-1)
j = random.randint(0,len(list2)-1)
if(i>j and arr[i] < arr[j]):
arr = swap(arr,i,j)
elif(j > i and arr[j] < arr[i]):
arr = swap(arr,i,j)
for i in range(0,n):
if(i != n-1):
print(arr[i],end=" ")
else:
print(arr[i],end="")
|
n = 1
sum = 0
while(n<6):
sum = sum + int(input())
n = n + 1
average = sum/5
print(average , end="")
|
count = int(input('how many numbers are there in your code? '))
codelist = []
def mainloop():
count = int(input('how many numbers are there in your code? '))
codelist = []
while len(codelist) > 0:
codelist.pop((len(codelist)))
entercode(count)
decode(count)
finaldecode()
def entercode(count):
zero = 0
while zero < count:
codelist.append(float(input('enter the next number in your code: ')))
zero = zero + 1
print (codelist)
entercode(count)
def decode(count):
countdc = count
zero = 0
# print (count)
while zero < countdc:
codelist[(count-1)] = int(round((codelist[(count-1)] ** ((count)/3)),0))
# print (codelist[count-1])
# print ('codelist')
zero = zero + 1
count = count - 1
def replacenumbers(length):
lengthrn = length
while lengthrn > 0:
if codelist[(lengthrn-1)] == 1:
codelist[(lengthrn-1)] = 'a'
lengthrn = lengthrn - 1
if codelist[(lengthrn-1)] == 2:
codelist[(lengthrn-1)] = 'b'
lengthrn = lengthrn - 1
if codelist[(lengthrn-1)] == 3:
codelist[(lengthrn-1)] = 'c'
lengthrn = lengthrn - 1
if codelist[(lengthrn-1)] == 4:
codelist[(lengthrn-1)] = 'd'
lengthrn = lengthrn - 1
if codelist[(lengthrn-1)] == 5:
codelist[(lengthrn-1)] = 'e'
lengthrn = lengthrn - 1
if codelist[(lengthrn-1)] == 6:
codelist[(lengthrn-1)] = 'f'
lengthrn = lengthrn - 1
if codelist[(lengthrn-1)] == 7:
codelist[(lengthrn-1)] = 'g'
lengthrn = lengthrn - 1
if codelist[(lengthrn-1)] == 8:
codelist[(lengthrn-1)] = 'h'
lengthrn = lengthrn - 1
if codelist[(lengthrn-1)] == 9:
codelist[(lengthrn-1)] = 'i'
lengthrn = lengthrn - 1
if codelist[(lengthrn-1)] == 10:
codelist[(lengthrn-1)] = 'j'
lengthrn = lengthrn - 1
if codelist[(lengthrn-1)] == 11:
codelist[(lengthrn-1)] = 'k'
lengthrn = lengthrn - 1
if codelist[(lengthrn-1)] == 12:
codelist[(lengthrn-1)] = 'l'
lengthrn = lengthrn - 1
if codelist[(lengthrn-1)] == 13:
codelist[(lengthrn-1)] = 'm'
lengthrn = lengthrn - 1
if codelist[(lengthrn-1)] == 14:
codelist[(lengthrn-1)] = 'n'
lengthrn = lengthrn - 1
if codelist[(lengthrn-1)] == 15:
codelist[(lengthrn-1)] = 'o'
lengthrn = lengthrn - 1
if codelist[(lengthrn-1)] == 16:
codelist[(lengthrn-1)] = 'p'
lengthrn = lengthrn - 1
if codelist[(lengthrn-1)] == 17:
codelist[(lengthrn-1)] = 'q'
lengthrn = lengthrn - 1
if codelist[(lengthrn-1)] == 18:
codelist[(lengthrn-1)] = 'r'
lengthrn = lengthrn - 1
if codelist[(lengthrn-1)] == 19:
codelist[(lengthrn-1)] = 's'
lengthrn = lengthrn - 1
if codelist[(lengthrn-1)] == 20:
codelist[(lengthrn-1)] = 't'
lengthrn = lengthrn - 1
if codelist[(lengthrn-1)] == 21:
codelist[(lengthrn-1)] = 'u'
lengthrn = lengthrn - 1
if codelist[(lengthrn-1)] == 22:
codelist[(lengthrn-1)] = 'v'
lengthrn = lengthrn - 1
if codelist[(lengthrn-1)] == 23:
codelist[(lengthrn-1)] = 'w'
lengthrn = lengthrn - 1
if codelist[(lengthrn-1)] == 24:
codelist[(lengthrn-1)] = 'x'
lengthrn = lengthrn - 1
if codelist[(lengthrn-1)] == 25:
codelist[(lengthrn-1)] = 'y'
lengthrn = lengthrn - 1
if codelist[(lengthrn-1)] == 26:
codelist[(lengthrn-1)] = 'z'
lengthrn = lengthrn - 1
def finaldecode():
final = input('do you want to decode your message?(yes or no) ')
if final == 'yes':
replacenumbers(count)
# print (codelist)
finalproduct = ''.join(codelist)
print (finalproduct)
mainloop()
else:
print ('enter a new code')
mainloop()
decode(count)
finaldecode()
def mainloop():
count = int(input('how many numbers are there in your code? '))
codelist = []
entercode(count)
decode(count)
finaldecode()
mainloop()
|
# Equal Sum Partition
# PS: Given an arrays "nums", return whether or not the array can be broken into 2 subsets such that the sum of both the partitions is equal
# eg:
# nums = [1,5,11,5]
# o/p = True
# because [1,5,5] = 11 and [11] = 11
# *********************NOTE**********************
# If the sum of the elements in "nums" is even, then and only then is an equal sum partition possible
# T[len(nums)+1][sums+1]
t = [[0 for _ in range(12)] for _ in range(5)]
def subset_sum(nums, sums):
# Initialise
for i in range(len(nums)+1):
for j in range(sums+1):
if i==0:
t[i][j] = False
if j==0:
t[i][j] = True
for i in range(1, len(nums)+1):
for j in range(1, sums+1):
if nums[i-1]<=j:
t[i][j] = t[i-1][j-nums[i-1]] or t[i-1][j]
else:
t[i][j] = t[i-1][j]
return t[len(nums)][sums]
def equal_sum_parti(nums):
# *********************NOTE**********************
# If the sum of the elements in "nums" is even, then and only then is an equal sum partition possible
sum_arr = sum(nums)
if sum_arr%2!=0:
return False
# Now to problem reduces to something very very simple
# We just have to find one subset whose sum==sum_arr//2, automatically the remaining partitions value is sum_arr//2
# Reduced to subset sum problem now
return subset_sum(nums, sum_arr//2)
nums = [1,5,11,5]
print(equal_sum_parti(nums))
|
# Using recursion
# Reference: https://youtu.be/kvyShbFVaY8
def knapsack(wts, val, W, length):
# Base Condition:
# Smallest valid weight = 0 kg
# Smallest possible capacity value = 0
if length==0 or W==0: # When either knapsack is full when no elements remain in the wts array
return 0
# Logic here is to traverse the array backward
# See if the weight of a certain element is less than or equal to the capacity of the knapsack
# Case-1: yes, it is.
# There are further 2 choices, either take the element or don't
# So we check profit wise, if the element will yiled greater profit, we'll take it else call the function for the rest of the array
if wts[length-1]<=W:
return max((val[length-1] + knapsack(wts, val, W - wts[length-1], length-1)), (knapsack(wts, val, W, length-1)))
# Casse-2: no
# Then we simple do not need that element and we can move to the remaining array
else:
return knapsack(wts, val, W, length-1)
# Choice Diagram
wts = [1,3,4,5]
val = [1,4,5,7]
W = 7 # This is capacity of the knapsack
# function should return maximum profit
print(knapsack(wts, val, W, len(wts)))
|
# Basic linked list implementation using functions
# Both insertion and deletion
class LinkedList:
def __init__(self):
self.head= None
# Printing the linked list
def printll(self):
temp = self.head
while (temp):
print(temp.data, end = " ")
temp = temp.next
# Inserting a node at the end
def append(self, new_data):
new_node = Node(new_data)
if self.head==None:
self.head = new_node
else:
temp = self.head
while temp.next:
temp = temp.next
temp.next = new_node
# Inserting at the beginning
def insertbeg(self, new_data):
new_node = Node(new_data)
new_node.next = self.head
self.head = new_node
# Inserting in between
def insertbet(self, prev_node, new_data):
new_node = Node(new_data)
new_node.next = prev_node.next
prev_node.next= new_node
# Deleting anode given the value to be deleted
def delnode(self, key):
temp = self.head
# Check if the node to be deleted is the first node i.e. head
if temp is not None and temp.data==key:
self.head = temp.next
temp.next = None #Breaking the link
return
while temp is not None:
if temp.data==key:
break
prev = temp
temp = temp.next
# Check if the element was found or not
if temp==None: # Meaning that temp reached the last node but the key wasn't found
return
# Coming here means that the key is found somewhere in the linked list
# and we have reference of the key in temp and its previous value in prev
prev.next = temp.next
temp.next = None
# Deleting anode, given its position
def delatpos(self, pos):
# Check for empty LL
if self.head == None:
return
temp = self.head
# Check if you have to delete the head
if pos==0:
self.head = temp.next
temp.next = None
return
# Any other element to be deleted
prev= None
while pos!=0 and temp is not None:
prev = temp
temp = temp.next
pos-=1
# The position does not exist in the Linked List
if pos!=0:
return
# It does exist
prev.next = temp.next
temp.next = None
# Deleting the entire linked list
def del_LL(self):
self.head= None
# Reversing a Linked List
def reverse(self):
if not self.head:
return None
current_node = self.head
prev_node = None
next_node = None
while current_node:
next_node = current_node.next
current_node.next = prev_node
prev_node = current_node
current_node = next_node
self.head = prev_node
class Node:
def __init__(self, data):
self.data= data
self.next= None
# Creating Linked List
linkedlist = LinkedList()
# Inserting 4 elements
linkedlist.append(1)
linkedlist.append(2)
linkedlist.append(3)
linkedlist.append(4)
# Inserting 5 at the beginning
linkedlist.insertbeg(5)
# Inserting 18 after the second element
linkedlist.insertbet(linkedlist.head.next, 18)
# Deleting anelement from the middle of the linkedlist
linkedlist.delnode(18)
# Deleting element from the start i.e the head referenced element
linkedlist.delnode(5)
# Deleting at a given position
linkedlist.delatpos(7)
# To delete the entire LL
linkedlist.del_LL()
linkedlist.printll()
|
# Subset Sum Problem reference https://youtu.be/_gPcYovP7wc
# Problem Statement: Given an integer array "nums" and an integer "sums", output where any subset of nums can sum upto to "sum"
# Output: Boolean (True/False)
# eg1:
# nums = [2,3,7,8,10]
# sum = 11
# output: True
#First Step, Let's make the DP Matrix
# t[length+1][w+1] but here: t[length+1][sums+1] where length = len(nums)
# t = [[0 for _ in range(sums+1)] for _ in range(len(nums)+1)]
# here it will be:
t = [[0 for _ in range(12)] for _ in range(6)]
def subset_sum(nums, sums):
# Initialization of the first row and column according to the base condition
# Here t[0][0] will be True, because when len(nums)==0 and req. sum is 0, that case is possible with a empty subset
# Remaining row 0 will be False, because when we want any req sum>0 and the array has no elements, that case isn't possible
# Remaining of column 0 will be initialised with True because, for a req. sum of 0, given any amount of integers in the nums array, an empty subset is always possible
# Kar lete hai
for i in range(len(nums)+1):
for j in range(sums+1):
if i==0:
t[i][j] = False
if j==0:
t[i][j] = True
# print(t)
# Code Logic
# Referenceing the last 0-1 knapsack code, wts arrays correponds to nums and W corresponds to sums
# So nums-->i, sums-->j
for i in range(1, len(nums)+1):
for j in range(1, sums+1):
# We check if the current number is less than the required sum or not for a particular subproblem
if nums[i-1]<=j:
# if yes, previously we took "max" but now that we are dealing with boolean values we'll use the "or" operator
t[i][j] = t[i-1][j-nums[i-1]] or t[i-1][j]
else:
# If not, just move on
t[i][j] = t[i-1][j]
# Latly, you gotta return the value form the last row and last column
return t[len(nums)][sums]
nums = [2,3,7,8,11]
sums = 11
print(subset_sum(nums, sums))
|
# -*- coding: utf-8 -*-
__author__ = "Patrick Lehmann"
from itertools import permutations
persons = set()
happiness = dict()
def main():
global persons, happiness
biggest_happiness = 0
seat_placement = "None"
with open('../../input/day13.txt', 'r', encoding='utf-8') as f:
data = f.read()
for line in data.splitlines():
line = line.replace('would ', '').replace('gain ', '+').replace('lose ', '-')
line = line.replace('happiness units by sitting next to ', '')
split = line.split()
split[0] = split[0].replace(" ", "").replace(".", "")
split[2] = split[2].replace(" ", "").replace(".", "")
split[1] = split[1].replace(" ", "")
key = split[0] + " " + split[2]
persons.add(split[0])
persons.add(split[2])
happiness[key] = int(split[1])
# Add myself to the person list. Give
# me no Happiness factor. so that when I get it
# I just return a default value of 0 for me
persons.add("You")
for neighbour in permutations(persons):
curr = happy_neighbour(neighbour)
if curr > biggest_happiness:
biggest_happiness = curr
seat_placement = neighbour
print(seat_placement, biggest_happiness)
def happy_neighbour(data):
total = 0
for i in range(len(data)):
if i != (len(data) - 1):
total += get_happiness(data[i], data[i + 1])
else:
total += get_happiness(data[i], data[0])
return total
def get_happiness(person1, person2):
key = person1 + " " + person2
key2 = person2 + " " + person1
total = happiness.get(key, 0)
total += happiness.get(key2, 0)
return total
if __name__ == "__main__":
main()
|
__author__ = "Patrick Lehmann"
import heapq
def main():
"""
Entry point of the program
"""
with open('../../input/day02.txt') as f:
data = f.read()
parse_input(data)
def parse_input(data):
total_wrapping_paper = 0
total_ribbon = 0
for line in data.splitlines():
dimensions = line.split("x")
total_wrapping_paper += surface(int(dimensions[0]), int(dimensions[1]), int(dimensions[2]))
total_wrapping_paper += slack(int(dimensions[0]), int(dimensions[1]), int(dimensions[2]))
total_ribbon += ribbon(int(dimensions[0]), int(dimensions[1]), int(dimensions[2]))
print("The total ammount of wrapping paper in sqr Feet is:")
print(total_wrapping_paper)
print("The total ammount of ribbons in feet is:")
print(total_ribbon)
def ribbon(l, w, h):
smallest = heapq.nsmallest(2, [l, w, h])
return (2 * smallest[0] + 2 * smallest[1]) + l * w * h
def surface(l, w, h):
return int(2 * l * w + 2 * w * h + 2 * h * l)
def slack(l, w, h):
return heapq.nsmallest(1, [l * w, w * h, h * l])[0]
if __name__ == '__main__':
main()
|
__author__ = "Patrick Lehmann"
def main():
"""
Entry point of the program
"""
with open('../../input/day01.txt') as f:
data = f.read()
print("Part 1: ")
print(answer_part1(data))
print("Part 2: ")
print(answer_part2(data))
def answer_part1(data):
counter = 0
for c in data:
if c == ")":
counter -= 1
else:
counter +=1
return counter
def answer_part2(data):
counter = 0
floor = 0
for c in data:
if floor == -1:
break
if c == ")":
floor -= 1
else:
floor += 1
counter += 1
return counter
if __name__ == '__main__':
main()
|
# make sure to run previous codes
# Summarize all numeric columns
print(df.describe())
# Summarize all columns
print(df.describe(include='all'))
print(df.describe(include=['object'])) # limit to one or more types
# statistics per group (groupby)
print(df.groupby("job").mean())
print(df.groupby("job")["age"].mean())
print(df.groupby("job").describe(include='all'))
# Groupby in a loop
for grp, data in df.groupby("job"):
print(grp, data) |
import math
class Shape2D:
def area(self):
raise NotImplementedError()
# __init__ is a special method called the constructor
# Inheritance + Encapsulation
class Square(Shape2D):
def __init__(self, width):
self.width = width
def area(self):
return self.width ** 2
class Disk(Shape2D):
def __init__(self, radius):
self.radius = radius
def area(self):
return math.pi * self.radius ** 2
shapes = [Square(2), Disk(3)]
# Polymorphism
print([s.area() for s in shapes])
s = Shape2D()
try:
s.area()
except NotImplementedError as e:
print("NotImplementedError") |
# Missing data
# Missing values are often just excluded
df = users.copy()
df.describe(include='all') # exclude missing values
# find missing values in a series
df.height.isnull() # True if NaN, False otherwise
df.height.notnull() # False if NaN, True otherwise
df[df.height.notnull()] # only show rows where age is not NaN
df.height.isnull().sum() # count the missing values, returns 2
# find missing values in a DataFrame
df.isnull() # DataFrame of booleans
df.isnull().sum() # calculate the sum of each column
# Strategy 1: drop missing values
df.dropna() # drop a row if ANY values are missing
df.dropna(how='all') # drop a row only if ALL values are missing
# Strategy 2: fill in missing values
df.height.mean()
df = users.copy()
df.ix[df.height.isnull(), "height"] = df["height"].mean()
print(df) |
import pymongo
###Connection
myclient = pymongo.MongoClient("mongodb://localhost:27017/")
mydb = myclient["mydatabase"] # Db
mycol = mydb["customers"] #table
list1 = mycol.find().sort("name") # ascending
list = mycol.find().sort("name", -1) # Descending
for x in list:
print(x)
|
class Student:
def __init__(self): #Called when class called(similar to constructor of java)
self.name = "Rolf"
self.grades = (88, 91, 78, 98, 95)
def __init__(self, name, grades): #Called when class called(similar to constructor of java)
self.name = name
self.grades = grades
def average(self):
return sum(self.grades)/len(self.grades)
########################WITHOUT ARGUMENT
# student = Student() # Creating object of type Student
# print(student.name)
# print(student.average()) #or (Good Practice)
# print(Student.average(student)) # (Bad Practice)
########################WITH ARGUMENT
student1 = Student("Rolf", (88, 91, 78, 98, 95))
student2 = Student("Divakar", (55, 34, 76, 55, 99))
print(student1.name)
print(student1.average())
print(student2.name)
print(student2.average())
|
from tkinter import *
from tkinter import messagebox
root = Tk()
root.title("Tis is title")
# showinfo, showwarning, showerror, askyesno, askquestion
def popup():
response = messagebox.askyesno("This is title","Here is messagebox")
if response==1:
Label(root,text="You clicked yes").pack()
else:
Label(root,text="You clicked no").pack()
Button(root,text="Click me",command=popup,padx=5,pady=5).pack(padx=10,pady=10)
root.mainloop() |
"""
简单选择排序
基本思想:在要排序的一组数中,选出最小(或者最大)的一个数与第1个位置的数交换;
然后在剩下的数当中再找最小(或者最大)的与第2个位置的数交换,依次类推,直到第n-1个元素(倒数第二个数)和
第n个元素(最后一个数)比较为止。
时间复杂度:O(n*n)
"""
def simple_select_sort(array):
for i in range(len(array)):
min_index = i
for j in range(i, len(array)):
if array[j] < array[min_index]:
min_index = j
array[i], array[min_index] = array[min_index], array[i]
return array
my_array = [23, 4, 1, 45]
print(simple_select_sort(my_array))
|
# -*- coding: utf-8 -*-
import math
import csv
import copy
from collections import OrderedDict
from fractions import Fraction
AGE_GRANULARITY = 10
def reidentify_patient(record_hospital):
gender = record_hospital['gender']
age = record_hospital['age']
zipcode = record_hospital['zipcode']
success = False
data = ""
with open('voters.txt') as csvfile:
dialect = csv.Sniffer().sniff(csvfile.read(1024))
csvfile.seek(0)
reader = csv.DictReader(csvfile, dialect=dialect)
# Check if the data matches the records of voters
for record in reader:
if gender == record["gender"] and age == record["age"] and zipcode == record["zipcode"]:
success=True
data = record["name"] + ": " + record_hospital["condition"]
if success:
return True, data
else:
return False
def generalize_gender(gender: str, level: int):
assert (type(level) is int and level >= 0)
assert (gender in ['male', 'female'])
if level == 0:
return gender
else:
return '*'
def generalize_age(age: int, level: int):
assert (type(level) is int and level >= 0)
assert (type(age) is int and age > 0)
if level == 0:
return age
elif 0 < level < 3:
granularity = AGE_GRANULARITY * level
age_ = granularity * math.floor(age / granularity)
return '[%d-%d[' % (age_, age_ + granularity)
else:
return '*'
#Question 3
def generalize_zipcode(zipcode: str, level: int):
assert (type(level) is int and level >= 0)
assert (type(zipcode)) is str and zipcode != ""
if level == 0:
return zipcode
elif 0 < level <= len(zipcode):
ast = ""
i=0
while i < level:
i += 1
ast += "*"
zipcode = zipcode[:-level]+ast
return zipcode
else:
return "Error: define another value for level"
def generalize_record(record, level_gender, level_age, level_zipcode):
record_ = copy.deepcopy(record)
record_['gender'] = generalize_gender(record['gender'], level_gender)
record_['age'] = generalize_age(int(record['age']), level_age)
record_['zipcode'] = generalize_zipcode(record['zipcode'], level_zipcode)
return record_
def generalize_database(records, level_gender, level_age, level_zipcode):
# This method generalizes the entire hospital_records
return [generalize_record(record, level_gender, level_age, level_zipcode) for record in records]
def compute_anonymity_level(records, quasi_identifiers):
n=0
k = []
nr = [] #nr = New records = liste épurée
age = []
zipcode = []
#Read records
for i in range (0, len(records)):
new_records = OrderedDict()
for ii in range (len(quasi_identifiers)):
v = str (quasi_identifiers[ii])
new_records[quasi_identifiers[ii]] = records.__getitem__(i)[v]
nr.append(new_records)
# Get all the keys
for i in nr:
l = i.keys()
l2 = list(l)
# Lecture de records
for i in range (0, len (nr)):
#print(len(nr))
# Comparaison avec les autres records (new records)
for ii in range (0, len (l2)):
n = 0
#print ("{} is {}".format(l2[ii], nr.__getitem__(i)[l2[ii]]))
for x in range(0, len(nr)):
# Cette boucle permet de comparer
temp_n = 0
for ii in range(0, len(l2)):
# On récupère uniquement les valeurs identiques
if (l2[ii] == "zipcode"):
zipcode = nr.__getitem__(i)[l2[ii]]
if (l2[ii] == "age"):
age = nr.__getitem__(i)[l2[ii]]
if (l2[ii] == "gender"):
gender = nr.__getitem__(i)[l2[ii]]
if nr.__getitem__(i)[l2[ii]] == nr.__getitem__(x)[l2[ii]]:
temp_n+=1
if temp_n == 3:
n+=1
k.append(n)
# Retourne la valeur minimale de k
return min(k)
def compute_distortion(levels, max_levels):
assert len(max_levels) == len(levels)
d = math.modf(Fraction(1, len(max_levels)) * (Fraction(levels[0], max_levels[0]) + Fraction(levels[1], max_levels[1]) + Fraction(levels[2],max_levels[2])))[0]
return d
if __name__ == '__main__':
with open('voters.txt') as csvfile:
dialect = csv.Sniffer().sniff(csvfile.read(1024))
csvfile.seek(0)
reader = csv.DictReader(csvfile, dialect=dialect)
voters_records = [record for record in reader]
with open('hospital_records.txt') as csvfile:
dialect = csv.Sniffer().sniff(csvfile.read(1024))
csvfile.seek(0)
reader = csv.DictReader(csvfile, dialect=dialect)
hospital_records = [record for record in reader]
print(hospital_records)
age = []
for i in hospital_records:
age.append(i["age"])
# Question 1
#Patient 12 pris aléatoirement - Il est possible de réidentifier un autre patient en modifiant l'index de la liste hospital_records
if (reidentify_patient(hospital_records[12])):
print ("**** Question 1 ****")
print("Information of the patient: {}".format(reidentify_patient(hospital_records[12])))
print ("********\n")
else:
print("There's no match on this patient, please try another one")
# Question 3
# Generalization of a random patient (12)
print("**** Question 3 ****")
print (generalize_record(hospital_records[12],1, 1, 2))
print("********\n")
# Question 4
# Anonymity level of a random data set
# Adding random values in order to compute anonymity level
liste = []
liste.append(hospital_records[19])
liste.append(hospital_records[1])
liste.append(hospital_records[12])
liste.append(hospital_records[4])
qid = ["gender", "age", "zipcode"]
print("**** Question 4 ****")
print("The Anonymity level is {}".format(compute_anonymity_level(liste, qid)))
print("********\n")
#Question 5
print("**** Question 5 ****")
print("The Distortion with the specific values [1, 1, 2] and maximum values [1, 3, 4] is {}".format(compute_distortion([1, 1, 2], [1, 3, 4])))
print("********\n")
# Questions 6
k_target = 5
level_gender_max = 1
level_age_max = 3
level_zipcode_max = 4
level_gender_opt = 1
level_age_opt = 1
level_zipcode_opt = 2
params_max = (level_gender_max, level_age_max, level_zipcode_max)
min_d = []
min_k = []
data_opt = []
print("**** Question 6 ****")
print ("Computing, please wait...")
for i in range(0,level_gender_max+1):
for j in range(0,level_age_max+1):
for m in range(0,level_zipcode_max+1):
params_opt = (i,j,m)
#print (params_opt)
k_opt = compute_anonymity_level(generalize_database(hospital_records, i, j, m), qid)
#print('Optimal scheme (targeted k=%d): levels(gender,age,zipcode)=%s, distortion=%.2f, k=%d' % (k_target, str(params_opt), compute_distortion(params_opt, params_max), k_opt))
if 5 < k_opt < 400:
data_opt.append('Optimal scheme (targeted k=%d): levels(gender,age,zipcode)=%s, distortion=%.2f, k=%d' % (k_target, str(params_opt), compute_distortion(params_opt, params_max), k_opt))
min_d.append(compute_distortion(params_opt, params_max))
index = min_d.index(min(min_d))
print("{}".format(data_opt[index]))
|
"""
Simulation of fast-food restaurant
system description should be added
Outputs : 1-mean time for customer being in the system
2-mean customer's waiting time in receiving food
3-mean and maximum of queue length in serving food part
4-mean performance of the servers in ordering and receiving part
5-mean number of customers in the system
Starting State = ...
Authors: Mohammad Sadegh Abolhasani, Abolfazl Tghavi
Starting Date:9 May 2020
Finishing Date:... May 2020
"""
import random
import math
import pandas as pd
###import numpy as np
from operator import itemgetter
# an array for holding the information of all customers attended
customers = []
# defining statistics which should be updated during the simulation
total_time_customer_in_system = 0
total_num_of_exited_customers = 0
total_time_customer_in_receiving_queue = 0
total_num_of_customers_received_food = 0
serving_food_queue_length = []
total_ordering_server_busy_time = 0
total_receiving_server_busy_time = 0
ordering_servers_rest_time = 0
receiving_servers_rest_time = 0
# defining a class for customers
class Customer:
index = 0
entering_time = 0
exit_time = 0
entering_time_to_receiving_section = 0
def __init__(self, index, enetering_time):
self.index = index
self.entering_time = enetering_time
# initialization of the system states and FEL
def starting_state():
# this clock will be used in the code and it is based on the minutes which is passed till then
clock = 0
# this clock has two part: one for hour and the other for minutes and it will start from 10 AM
Real_clock = dict()
Real_clock["hour"] = 10
Real_clock["minute"] = 0
step = 0
# State
state = dict()
state['Ordering_Server_Idle'] = 5
state['Ordering_Server_Resting'] = 0
state['Ordering_Server_Rest_blocked'] = 0
state['Ordering_queue'] = 0
state['Receiving_Server_Idle'] = 2
state['Receiving_Server_Resting'] = 0
state['Receiving_Server_Rest_blocked'] = 0
state['Receiving_queue'] = 0
state['Serving_Chairs_Idle'] = 30
state['serving_queue'] = 0
# the initial FEL
future_event_list = list()
#FEL_maker(future_event_list, , )
return state, future_event_list, step, clock, Real_clock
# a function for making new event notices and adding them to the FEL
def FEL_maker(future_event_list,keys, values):
new_event = dict()
for i in range(len(keys)):
new_event[keys[i]] = values[i]
future_event_list.append(new_event)
|
def reverseInteger(x:int):
num = str(x)
temp = str()
if num[0] == '-':
temp += '-'
for i in range(len(num)-1, -1, -1):
if num[i] == '-':
pass
else:
temp += num[i]
return int(temp)
print(reverseInteger(-123)) |
# coding: utf-8
# <div align="right">Python 2.7 Jupyter Notebook</div>
#
# # Living labs
# <br>
# <div class="alert alert-warning">
# <b>This notebook should be opened and completed by students completing both the technical and non-technical tracks of this course.</b>
# </div>
#
# ### Your completion of the notebook exercises will be graded based on your ability to:
#
# > **Understand**: Do your comments show evidence that you recall and understand technical concepts?
# # Notebook introduction
#
# Living labs describe the paradigm of working with new ideas and technology, directly engaging with, and observing users while they are living their lives.
#
# While the levels of direct user engagement and co-creation vary between the examples referenced in this section, they share the access to users behavior in response to novel products, content or activity.
# # 1. Living Labs
#
# A living lab can be established using existing infrastructure and data sources. This was shown in the example of the Andorra living lab which made use of CDRs, credit card transactions, and public transportation data. While direct user engagement may be limited, their behaviors - especially their reactions to new products or experimental interventions - can be observed in this setting. Living labs can also be entirely virtual, as is the case in the A/B testing of web applications.
#
# Although the bleeding edge of living lab development is driven by the growing data collection and interaction capabilities that have been enabled by the spread of ubiquitous computing, this is not a prerequisite for big or electronic data. In Video 2 Professor Alex Pentland discusses an example of how Nike leverages living labs to determine which shoes to release within their stores. You can also refer to [Hack-MIT](http://livinglab.mit.edu/hack-mit/) as an example of how to use existing infrastructure, such as WiFi access points, as a simple living lab.
#
# Data visualization can take many forms. You can refer to [this](https://github.com/hariharsubramanyam/mit-wifi-data-vis) example of a visualization of WiFi data. Once you have the basic lab in place, you would generally build your use case around it. Marketers would likely be interested in the density and profiles of individuals in specific areas, and the resulting efficiency of campaigns or other interventions, while city planners may be interested in optimizing flow in public spaces.
#
# All of the data that was used in this course was generated by sophisticated deployments which collected not only **big** data, but also **deep** data.
#
#
# Another recent trend in technology, the "[Internet of Things](http://www.gartner.com/it-glossary/internet-of-things/)" (or "IoT"), holds significant opportunities for data collection and interaction with the environment, whether human or device based. The big shift here is that the end-points are becoming active and they contain computation capabilities, where previous efforts focused on sensors or observations alone.
#
# > **Note**: The presence of computation capabilities at the end-points also opens up opportunities such as software-defined products. Consider activity trackers learning to recognize new activities, or self-driving cars where new features such as parking capabilities can be added with software updates. Once you have gathered data and refined your algorithms, these can potentially be implemented as software-defined products or features (like the one above) can be added using this mechanism.
#
# You can read more about the European Open Living Labs network [here](http://www.openlivinglabs.eu/), and another example of a recent deployment of a living lab using sensor and mobile data, the Amsterdam IoT Living Lab, [here](http://iotlivinglab.com/). Additional information is available [here](http://iotlivinglab.com/amsterdam-iot-living-lab-wiki/) and on this [blog](https://www.yenlo.com/blog/building-the-world-s-biggest-ibeacon-living-lab-with-wso2).
#
# > **Note**:
#
# > Living lab projects typically include a wide variety of stakeholders and partners, including government, academic, and commercial parties. Refer to the goal statement of the Amsterdam Smart City project below as an example:
#
# > **Goal**: The goal of the IoT Living Lab is to provide IoT infrastructure and actionable, Open Data, and developer friendly platforms for emerging IoT innovations. This stimulates the creation of new startups and mobile applications, which in turn make a rapid impact on the local economy.
#
# > (Source: Amsterdam Smart City 2016)
#
# While the focus of this course is social analytics, there are a number of recent technological trends that can add significant value to your future projects. You are encouraged to explore these on your own.
#
# Much of the publicity around big data focuses solely on the volume, or in some cases the format of the data, and many fail to capitalize on existing sources of data that may already be accessible. [Dark data](http://www.gartner.com/it-glossary/dark-data/) refers to data that is already available, but not utilized. IoT also contains a number of relevant concepts. You can read about Gartner's view of the top ten IoT technologies for 2017 and 2018 [here](http://www.gartner.com/newsroom/id/3221818).
# > **Note**:
#
# > Once you better understand the available data sources, as well as the options offered by technological developments, you will be in a much better position to successfully start and complete your social analytics projects.
#
# Refer to the additional links below for more guidance:
# - [The Dark Side of Big Data](http://www.forbes.com/sites/tomfgoodwin/2016/07/14/the-dark-side-of-big-data/#16e565e738a2)
# - [Understanding Dark Data](https://www.linkedin.com/pulse/20140529034348-246665791-understanding-dark-data)
# - [Avoiding The Pitfalls of Dark Data](http://www.forbes.com/sites/centurylink/2015/10/09/watch-your-step-avoiding-the-pitfalls-of-dark-data/#3ce31cf278c4)
# <div class="alert alert-info">
# <b>Exercise 1 Start.</b>
# </div>
#
# ### Instructions
#
# > Arek Stopczynski points out the dangers of fixating on big data and losing perspective, and studying the data instead of the population of interest (and their interactions).
#
# > a) How do "deep data" and "living labs" help us to avoid the pitfalls of only studying available data sets?
#
# > b) List some of the advantages of "living labs" as opposed to "focus groups" or "large scale surveys"?
#
# > **Note**: Your answer should be a short description (two to three sentences) for each of the two questions.
# **a)** If scientifically defined and planned, and responding to ethical and privacy concerns, they give an opportunity to study the behavior of subjects through a combination of data collection channels in a customized way and in real time and space with minimal intrusion on their daily habits and a minimum of influences letting the information come from them directly instead of extracting from rigid available data sets by definition non recent, not always relevant information even for the best proxies, with rarely appropriate ranges, sometimes questionable robustness and reliability. It also allow to control the data collection process, treatment and interpretation to avoid data integrity and privacy issues and make sure that the process if reproducible.
#
# **b)** A few examples of advantages in favor of living labs:
# - Living labs are often multi-channels vs. single or limited channels for focus groups or surveys.
# - Living labs are live and can be ran on long periods of time vs. one time takes for focus groups and surveys.
# - Living labs are direct expression without inferences of users behaviors vs. one time opinions in focus groups and surveys.
# - Living labs can go beyond focus groups and surveys by looking at actual actions of subjects and not only what they say.
# - Living labs can avoid biases infered by the context, questions or the delivery of questions in focus groups or surveys.
# <br>
# <div class="alert alert-info">
# <b>Exercise 1 End.</b>
# </div>
#
# > **Exercise complete**:
#
# > This is a good time to "Save and Checkpoint".
# ## 1.1 Purpose
# Typical purposes for setting up living labs include:
# * Research;
# * Development; and
# * Production applications.
#
# In commercial context, there are internal and external opportunities for setting up living labs.
#
# ### 1.1.1 External
# Typical uses include marketing and customer insight use cases where the profile and demographics of individuals are used to optimize or create product or service offerings. These insights are typically also relevant in supply chain optimization.
#
# ### 1.1.2 Internal
# A better understanding of social networks in companies can be used in human resources projects, as per the examples introduced in Module 7. You can also refer to the seminal work of Professor Alex Pentland, the [Sociometric Badge study](http://realitycommons.media.mit.edu/badgedataset.html) from 2008, which provides a myriad of insights in this [paper](http://realitycommons.media.mit.edu/download.php?file=Sensible_Organizations.pdf).
#
# Organizations usually have access to large amounts of dark and deep data which can be utilized to get started. These sources of data can also be extended with applications such as Funf to create deep data sets.
#
# > **Note**:
#
# > Data privacy and sovereignty
#
# > It is hard to overemphasize the importance of the data privacy, or even data sovereignty of individuals, due its centrality in building the trust relationship necessary for a living lab to be successful. Please review the privacy course content from Module 6, with special focus on the open Personal Data Store (openPDS) architecture, which strives to provide privacy to the users even when the data is used for internal purposes only.
# <div class="alert alert-info">
# <b>Exercise 2 Start.</b>
# </div>
#
# ### Instructions
#
# > In Video 4, David Shrier discusses the typical stakeholders involved in setting up living labs. Please provide a short summary of the roles played by each of the following parties, referencing both the input (what they require from the other parties) and the output that they deliver to the other stakeholders.
#
# > a) Government
#
# > b) Data partners
#
# > c) Local or global business
#
# > d) Local universities or academic partners
# **a)** Government: under the pre-requisite that the study follow existing local, national and international laws and regulations in data ethics, privacy and use:
# - Local community introduction and engagement support
# - Financing subsidies in setting up and building the living lab
# - Local stakeholders introductions and involvement of governmental, local, national or international organizations and agencies
# - Offer credibility for potential study subjects
# - Help in developping use cases and finance follow up programs
#
# **b)** Data partners: under the pre-requisite that the study respect client database privacy and use standards:
# - Help in designing the study method and data collect (telcommunication or credit card companies)
# - Help in sampling methodology in regard to available data
# - Can sponsor mobile phones or equipments to gain exposure, text new products/services and gain competitive intelligence through the study (subjects choices of data or voice plans, deep data consumption habits insights, etc.) if only the equipment given with no strings attached
# - Industry expertise and technical support
# - Help in scaling the study on larger range from local to national and even international scale.
#
# **c)** Local or global business: uder the pre-requisite that the study can have meaningful insights for them:
# - On the ground support
# - Data analysis support
# - Financial support if no strings attached
#
# **d)** Local universities or academic partners: under the pre-requisite that the study corresponds to their ethics, data privacy and scientific standards:
# - Methodology support
# - Data analysis support
# - Scientific community exposure
# - Scientific credibility
# <br>
# <div class="alert alert-info">
# <b>Exercise 2 End.</b>
# </div>
#
# > **Exercise complete**:
#
# > This is a good time to "Save and Checkpoint".
# <div class="alert alert-info">
# <b>Exercise 3 Start.</b>
# </div>
#
# ### Instructions
#
# > Professor Alex Pentland discussed how many organizations are realizing the value of data as a strategic asset. How does the role of an analyst change when considering strategic analysis as opposed to more traditional data analysis?
#
# > Provide a short description of the typical tasks that you would expect within this new role, and briefly discuss or refer to potential organizational changes (or parties) that the analysts would need to interact with in their new role.
# The strategic analyst supports the company management in the decision making process to shape the future of his company. He focuses more on competitive intelligence localizing and tracking the company in its competitive landscape, looking at market size and growth rate changes and their causation, threats of new entrants or substitute products or services, actionable user and consumer insights that could increase top and bottom line results in creating new products and services, or increase efficiency by reducing churn rates, switching rates, increase repeat customers, boost customers and partners satisfaction, improve marketing campaigns and delivery methods efficiency or explore new payment options.
#
# Duties will include:
# - Strategy formulation for the corporate management team and all its business units (assets and affiliates portfolio management and acquisitive growth)
# - Industry research, analysis and tracking
# - Growth opportunity identification and business case development
# - Pipeline development, specifically in the form of target identification, screening and tracking, and relationship development with potential targets
# - Financial analysis and valuation
# - Management of due diligence teams
# - Business plan creation
# - Deal structuring and negotiation
# - Preparation of preliminary and final presentations to senior management
#
# Interactions should include:
# - Management team
# - Board of directors
# - Heads of units and key business units leaders
# - External consultants
# <br>
# <div class="alert alert-info">
# <b>Exercise 3 End.</b>
# </div>
#
# > **Exercise complete**:
#
# > This is a good time to "Save and Checkpoint".
# # 2. Submit your notebook
#
# Please make sure that you:
# - Perform a final "Save and Checkpoint";
# - Download a copy of the notebook in ".ipynb" format to your local machine using "File", "Download as", and "IPython Notebook (.ipynb)", and
# - Submit a copy of this file to the online campus.
#
# # 3. References
# Amsterdam Smart City. 2016. “IoT Living Lab - Amsterdam Smart City.” Accessed September 5. https://amsterdamsmartcity.com/projects/iot-living-lab.
#
# > **Note**:
#
# > Arek Stopczynski references the Copenhagen Network Study and indicates that the research is ongoing. You can read more about recent developments and additional publications [here](https://sunelehmann.com/2016/08/24/new-paper-in-pnas/).
# In[ ]:
|
#!/usr/bin/env python3
from rearrange import rearrange_name
import unittest
class TestRearrange(unittest.TestCase):
def test_basic(self):
testcase = 'Guenon, Rene'
expected = 'Rene Guenon'
self.assertEqual(rearrange_name(testcase),expected)
def test_null(self):
testcase = ""
expected = ""
self.assertEqual(rearrange_name(testcase),expected)
def test_double_name(self):
testcase = "Hopper, Grace M."
expected = "Grace M. Hopper"
self.assertEqual(rearrange_name(testcase),expected)
def test_one_name(self):
testcase = "Voltaire"
expected = "Voltaire"
self.assertEqual(rearrange_name(testcase),expected)
unittest.main()
|
#!/usr/bin/env python3
import re
def rearrange_name(name):
# full words use of: \b....\b
result = re.search(r"^([\w .]*), ([\w .]*)$", name)
#print(result.groups())
if result == None:
return name
return "{} {}".format(result[2],result[1])
|
# Author: Christopher LeMoss
# Date: 1-30-2021
# Description:
# Homework 3 for CS362 Software Engineering II at Oregon State University.
# Determines whether or not the given year is a leap year.
# Lacks error handling.
year = int(input("Enter year: "))
is_leap_year = False
if year % 4 == 0:
if year % 100 == 0:
if year % 400 == 0:
is_leap_year = True
else:
is_leap_year = True
if is_leap_year:
print(year, "is a leap year.")
else:
print(year, "is not a leap year.")
|
class TicTacToe:
def __init__(self):
self._board = [['','',''],['','',''],['','','']]
self._current_state = "UNFINISHED"
self._num = 0
def horizontal_win(self, row, player):
if row>2 or row<0:
return False
if self._board[row] != "":
return False
if self._current_state != "UNFINISHED":
return False
self._board[row] = player
return True
if self._board[row] == player:
self._num = self._num + 1
if self._board[row][0] == player and self._board[row][1] == player and self._board[row][2] == player:
self._current_state = str(player) + "_Won"
return True;
def vertical_win(self, col, player):
if row > 2 or row < 0 or col > 2 or col < 0:
return False
if self._board[row][col] != "":
return False
if self._current_state != "UNFINISHED":
return False
self._board[row][col] = player
return True
if self._board[row] == player:
self._board[col] = player
self._num = self._num + 1
if self._board[0][col] == player and self._board[1][col] == player and self._board[2][col] == player:
self._current_state = str(player) + "_Won"
return True
def diagonal_win(self, row, col, player):
if row > 2 or row < 0 or col > 2 or col < 0:
return False
if self._board[row][col] != "":
return False
if self._current_state != "UNFINISHED":
return False
self._board[row][col] = player
return True
if self._board[row] == player:
self._board[row][col] = player
self._num = self._num + 1
if self._board[0][0] == player and self._board[1][1] == player and self._board[2][2] == player:
self._current_state = str(player) + "_Won"
elif self._board[0][2] == player and self._board[1][1] == player and self._board[2][0] == player:
self._current_state = str(player) + "_Won"
return True
def make_move(self, row, col, player):
if row > 2 or row < 0 or col > 2 or col < 0:
return False
if self._board[row][col] != "":
return False
if self._current_state != "UNFINISHED":
return False
self._board[row][col] = player
return True
count = 0
while tic._current_state == "UNFINISHED":
if count % 2 == 0:
flag = tic.make_move(int(row), int(col), "X")
else:
flag = tic.make_move(int(row), int(col), "O")
else:
count = count + 1
if tic.get_current_state() == "DRAW":
print("Match is Draw")
elif tic.get_current_state() == "X_WON":
print("Player X Won the Match")
elif tic.get_current_state() == "O_WON":
print("Player O Won the Match")
def get_current_state(self):
return self._current_state
def print_Table(self):
print("\nPresent Status of TicTacToe board is : \n")
l = " 0 1 2"
count=0
print(l)
for i in self._board:
print(str(count)+str(i))
count=count+1
print()
tic = TicTacToe()
tic.print_Table()
count = 0
while tic._current_state == "UNFINISHED":
if count % 2 == 0:
print("Player-x it's your turn")
row = input("Enter Row In which You want to insert (0-2) : ")
col = input("Enter Column In which You want to insert (0-2) : ")
flag = tic.make_move(int(row), int(col), "X")
if flag is False:
tic.print_Table()
while flag is False:
print("Move is Not Successful")
print("Player-x it's your turn")
row = input("Enter Row In which You want to insert (0-2) : ")
col = input("Enter Column In which You want to insert (0-2) : ")
flag = tic.make_move(int(row), int(col), "X")
tic.print_Table()
else:
print("Move is Successful")
tic.print_Table()
else:
print("Player-O it's your turn")
row = input("Enter Row In which You want to insert (0-2) : ")
col = input("Enter Column In which You want to insert (0-2) : ")
flag = tic.make_move(int(row), int(col), "O")
if flag is False:
tic.print_Table()
while flag is False:
print("Move is Not Successful")
print("Player-O it's your turn")
row = input("Enter Row In which You want to insert (0-2) : ")
col = input("Enter Column In which You want to insert (0-2) : ")
flag = tic.make_move(int(row), int(col), "X")
tic.print_Table()
else:
print("Move is Successful")
tic.print_Table()
count = count + 1
if tic.get_current_state() == "DRAW":
print("Match is Draw")
elif tic.get_current_state() == "X_Won":
print("Player X Won the Match")
else:
print("Player O Won the Match") |
import mysql.connector
mydb = mysql.connector.connect(host='localhost', user='root', password='', database='salon')
mycursor = mydb.cursor()
mycursor.execute('select flight_id FROM passengers GROUP BY flight_id HAVING COUNT(*) > 1')
myresult = mycursor.fetchall()
if myresult:
print('COUNT Have Records:')
print('FLIGHT ID')
print('_______________')
for x in myresult:
print(x[0])
# print('</td></tr></table>')
else:
print('No Records In Data Base')
mycursor = mydb.cursor()
mycursor.execute('SELECT * FROM flights WHERE id IN ( SELECT id FROM passengers GROUP BY flight_id HAVING COUNT(*) > 1)')
myresult = mycursor.fetchall()
if myresult:
print('COUNT Have Records:')
print('FLIGHT ID')
print('_______________')
for x in myresult:
print(x)
else:
print('No Records In Data Base') |
cpf = input("Entre com o seu CPF ")
cpflist = []
cpflist = list(cpf)
for digito in cpflist:
if not digito.isnumeric():
cpflist.remove(digito)
digito1 = []
for i in range(10, 1, -1):
digito1 += [i]
digito2 = []
for i in range(11, 1, -1):
digito2 += [i]
var = 0
for i in range(9):
var += int(digito1[i] * int(cpflist[i]))
digitof1 = int(11 - (var % 11))
if digitof1 > 9:
digitof1 = 0
var = 0
for i in range(10):
var += int(digito2[i]) * int(cpflist[i])
digitof2 = int(11 - (var % 11))
if digitof2 > 9:
digitof2 = 0
if digitof1 == int(cpflist[-2]) and digitof2 == int(cpflist[-1]):
print("CPF válido")
else:
print("CPF inválido")
|
dinheiro = [100, 50, 25, 10, 5, 2, 1, 0.5, 0.25, 0.10, 0.05, 0.01]
newvalue = float(input('Qual valor você deseja sacar em reais? '))
print("Você irá sacar:")
for i in dinheiro:
x = int(newvalue//i)
if i > 1 and x != 0:
print(f'{x} nota(s) de R$ {i:.2f}')
elif i <= 1 and x != 0:
print(f'{x} moeda(s) de R$ {i:.2f}')
newvalue = newvalue % i
|
'''
Triangle, pentagonal, and hexagonal numbers are generated by the following formulae:
Triangle Tn=n(n+1)/2 1, 3, 6, 10, 15, ...
Pentagonal Pn=n(3n−1)/2 1, 5, 12, 22, 35, ...
Hexagonal Hn=n(2n−1) 1, 6, 15, 28, 45, ...
It can be verified that T285 = P165 = H143 = 40755.
Find the next triangle number that is also pentagonal and hexagonal.
'''
def triangle_number(n):
return n * (n + 1) // 2
def pentagonal_number(n):
return n * (3 * n - 1) // 2
def hexagonal_number(n):
return n * (2 * n - 1)
limit = 100000
pentagonal_numbers = set()
hexagonal_numbers = set()
np = 166
nh = 144
for _ in range(limit):
pentagonal_numbers.add(pentagonal_number(np))
hexagonal_numbers.add(hexagonal_number(nh))
nh += 1
np += 1
nt = 286
for _ in range(limit):
n = triangle_number(nt)
if n in pentagonal_numbers and n in hexagonal_numbers:
print('T{0} = {1}'.format(nt, n))
nt += 1
|
'''
The sum of the squares of the first ten natural numbers is,
1^2 + 2^2 + ... + 10^2 = 385
The square of the sum of the first ten natural numbers is,
(1 + 2 + ... + 10)^2 = 55^2 = 3025
Hence the difference between the sum of the squares of the first ten natural numbers and the square of the sum is 3025 − 385 = 2640.
Find the difference between the sum of the squares of the first one hundred natural numbers and the square of the sum.
'''
def sum_square_difference(n):
result = round(n**4/4 + n**3/6 - n**2/4 - n/6)
return int(result)
print(sum_square_difference(100)) |
'''
Starting in the top left corner of a 2×2 grid, and only being able to move to the right and down, there are exactly 6 routes to the bottom right corner.
How many such routes are there through a 20×20 grid?
'''
import numpy as np
def lattice_paths(width, height):
grid = np.zeros((width + 1, height + 1), dtype = np.int64)
grid[-1,-1] = 1
for x in reversed(range(0, width + 1)):
for y in reversed(range(0, height + 1)):
if grid[x, y] != 0:
continue
if y == height:
down = 0
else:
down = grid[x, y + 1]
if x == width:
right = 0
else:
right = grid[x + 1, y]
grid[x, y] = down + right
return grid[0, 0]
print(lattice_paths(20,20)) |
'''
145 is a curious number, as 1! + 4! + 5! = 1 + 24 + 120 = 145.
Find the sum of all numbers which are equal to the sum of the factorial of their digits.
Note: as 1! = 1 and 2! = 2 are not sums they are not included.
'''
factorials = {'0': 1}
f = 1
for i in range(1, 10):
f *= i
factorials[str(i)] = f
result = set()
for i in range(10, 2540160):
if sum([factorials[x] for x in str(i)]) == i:
result.add(i)
print(result)
|
'''
The decimal number, 585 = 10010010012 (binary), is palindromic in both bases.
Find the sum of all numbers, less than one million, which are palindromic in base 10 and base 2.
(Please note that the palindromic number, in either base, may not include leading zeros.)
'''
def is_bin_Palindrome(n):
b = str(bin(n))[2:]
return b == b[::-1]
double_base_palindromes = set()
for i in range(1, 10):
if is_bin_Palindrome(i):
double_base_palindromes.add(i)
for i in range(1, 1000):
# for abccba
n_even = int(str(i) + str(i)[::-1])
if is_bin_Palindrome(n_even):
double_base_palindromes.add(n_even)
if i < 100:
for j in range(0, 10):
# for abcxcba
n_odd = int(str(i) + str(j) + str(i)[::-1])
if is_bin_Palindrome(n_odd):
double_base_palindromes.add(n_odd)
print('Length:Sum = {0}:{1}'.format(
len(double_base_palindromes), sum(double_base_palindromes)))
|
# TO-DO: complete the helpe function below to merge 2 sorted arrays
def merge(arrA, arrB):
elements = len(arrA) + len(arrB)
merged_arr = [0] * elements
# TO-DO
# merged_arr = sorted(elements)
merged_arr = sorted(arrA + arrB)
return merged_arr
# test_array_one = [1, 2, 3]
# test_array_two = [4, 5, 6]
# print(merge(test_array_one, test_array_two))
# TO-DO: implement the Merge Sort function below USING RECURSION
def merge_sort(arr):
# TO-DO
# we need to split it into two arrays
if len(arr) > 1:
lhs = merge_sort(arr[:len(arr)//2])
rhs = merge_sort(arr[len(arr)//2:])
arr = merge(lhs, rhs)
return arr
test_array = [0, 2, 1, 3, 4, 5, 6]
merge_sort(test_array)
# STRETCH: implement an in-place merge sort algorithm
def merge_in_place(arr, start, mid, end):
# TO-DO
return arr
def merge_sort_in_place(arr, l, r):
# TO-DO
return arr
# STRETCH: implement the Timsort function below
# hint: check out https://github.com/python/cpython/blob/master/Objects/listsort.txt
def timsort(arr):
return arr
|
#this program is meant for creating quizes with random order and random wrong answers
#this is for making each exam completely differente from one another
import random
import json
import os
import time
def createquiz(quiznum):
now = list(time.localtime()) #setting the current date
foldername = 'quizes {}-{}-{}'.format(now[0],now[1],now[2]) #variable folder name based on date
with open('capital.json') as f: #reading the json file for creating a local dict
stateCapital = json.load(f)
if not os.path.exists(foldername): #create new dir based on the current date if does not exist
os.makedirs(foldername)
state = list(stateCapital.keys()) #states
answers = {}#the answers file
for i in range(quiznum):
random.shuffle(state) #shuffeling states
#creating the quizes files
with open(foldername + '/' +'quizfile {}.txt'.format(i + 1),'w') as f:
f.write('Name:\n\nDate:\n\nPeriod:\n\n')
f.write((' ' * 20) + 'State capital quiz (form {})'.format(i + 1))
f.write('\n\n')
answers['quizfile %s' %(i+1)] = []
for qnum in range(50): #create 50 random questions for each of the files with random options and one correct answer
correctAnswer = stateCapital[state[qnum]]
wrongAnswer = list(stateCapital.values())
del wrongAnswer[wrongAnswer.index(correctAnswer)]
wrongAnswer = random.sample(wrongAnswer,3)
answerOptions = [correctAnswer] + wrongAnswer
random.shuffle(answerOptions)
f.write('%s. What is the capital of %s? \n\n' %(qnum,state[qnum]))
for x in range(4):
f.write('\n %s) %s' %('ABCD'[x],answerOptions[x]))
f.write('\n\n')
answers['quizfile %s' %(i+1)].append(('Answer for question %s' %(qnum+1),'ABCD'[answerOptions.index(correctAnswer)]))
with open(foldername + '/answers.json','w') as f: #writing the answers json file
json.dump(answers,f)
try:
createquiz(10)
except ValueError as v:
print('error found!',v)
print('done!')
|
##Check a given number/word, whether its a palindrome or not
##Use recursion to do the task
def is_palindrome(num): #argument alays takes int.
n= str(num) #input ('Enter number or word') #input alays takes string.
if len(n) <= 1:
return True
elif n[0] == n[-1] and is_palindrome(n[1:-1]):
return True
return False
## 4. A palindromic number reads the same both ways.
##Find the largest palindrome made from the product of two 3-digit numbers
##(Hint- Ans is 9009 = 91 × 99 )
def two_pal():
c= []
for i in range (100,1000):
for j in range(i,1000):
p=i*j
if is_palindrome(p) == True:
c.append(p)
return max(c)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.