blob_id stringlengths 40 40 | language stringclasses 1
value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30
values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2
values | text stringlengths 12 5.47M | download_success bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
54cc6a11f21f0f20d489aec991ef9f46d562951d | Python | kay-darryl-so/SE3350-Match-TA | /departmentGUI.py | UTF-8 | 5,236 | 2.59375 | 3 | [] | no_license | # GUI for Department
import tkinter as tk
from tkinter import filedialog, ttk
from matchTA import matchTA
def matchTAtoCourse():
frame = tk.Frame(ms, bg='#f3e6ff')
frame.place(relx=0.1, rely=0.1, relwidth=0.8, relheight=0.8)
label1 = tk.Label(frame, text= 'Match Results', bg='#f3e6ff', font=('Calibri',18))
label1.place(relx=0.12, rely=0.2, relwidth=0.8, relheight=0.1)
fn = "InstructorRanking.csv"
match = matchTA(fn)
tree = ttk.Treeview(frame)
tree["columns"]=("Name","Course")
tree.column("#0", width=0)
tree.column("Name", width=150, minwidth=150, stretch=tk.NO)
tree.column("Course", width=250, minwidth=270, stretch=tk.NO)
tree.heading("Name",text="Name",anchor=tk.W)
tree.heading("Course", text="Course",anchor=tk.W)
tree.insert(parent="", index="end", iid=0, text="", values=(match[0][0],match[0][1]))
tree.insert(parent="", index="end", iid=1, text="", values=(match[1][0],match[1][1]))
tree.insert(parent="", index="end", iid=2, text="", values=(match[2][0],match[2][1]))
tree.insert(parent="", index="end", iid=3, text="", values=(match[3][0],match[3][1]))
tree.place(relx=0.3, rely=0.35)
editButton = tk.Button(frame, text="Edit TA Allocations", command=editAllocations)
editButton.place(relx=0.35, rely=0.7, relwidth=0.3, relheight=0.1)
def editAllocations():
global frame
frame = tk.Frame(ms, bg='#f3e6ff')
frame.place(relx=0.1, rely=0.1, relwidth=0.8, relheight=0.8)
label1 = tk.Label(frame, text= 'Match Results', bg='#f3e6ff', font=('Calibri',18))
label1.place(relx=0.12, rely=0.2, relwidth=0.8, relheight=0.1)
global tree
tree = ttk.Treeview(frame)
tree["columns"]=("Name","Course")
tree.column("#0", width=0, stretch=tk.NO)
tree.column("Name", width=150, minwidth=150, stretch=tk.NO)
tree.column("Course", width=250, minwidth=270, stretch=tk.NO)
tree.heading("Name",text="Name",anchor=tk.W)
tree.heading("Course", text="Course",anchor=tk.W)
match = matchTA("sample.csv")
tree.insert(parent="", index="end", iid=0, text="", values=(match[0][0],match[0][1]))
tree.insert(parent="", index="end", iid=1, text="", values=(match[1][0],match[1][1]))
tree.insert(parent="", index="end", iid=2, text="", values=(match[2][0],match[2][1]))
tree.insert(parent="", index="end", iid=3, text="", values=(match[3][0],match[3][1]))
global count
count = 4
tree.place(relx=0.3, rely=0.2)
#Labels
nameLabel = tk.Label(frame, text='Name', bg='#f3e6ff')
nameLabel.place(relx=0.12, rely=0.6, relwidth=0.1, relheight=0.05)
courseLabel = tk.Label(frame, text='Course', bg='#f3e6ff')
courseLabel.place(relx=0.5, rely=0.6, relwidth=0.1, relheight=0.05)
#Entries
global nameEntry
global courseEntry
nameEntry = tk.Entry(frame)
nameEntry.place(relx=0.12, rely=0.65, relwidth=0.4, relheight=0.05)
courseEntry = tk.Entry(frame)
courseEntry.place(relx=0.5, rely=0.65, relwidth=0.4, relheight=0.05)
#Buttons
addRecordButton = tk.Button(frame, text='Add Record', command=addRecords)
addRecordButton.place(relx=0.15, rely=0.725, relwidth=0.2, relheight=0.05)
removeSelected = tk.Button(frame, text='Remove Selected Record', command=removeRecords)
removeSelected.place(relx=0.4, rely=0.725, relwidth=0.35, relheight=0.05)
selectButton = tk.Button(frame, text='Select Record to Update', command=selectRecord)
selectButton.place(relx=0.15, rely=0.8, relwidth=0.35, relheight=0.05)
updateButton = tk.Button(frame, text='Save Update', command=updateRecord)
updateButton.place(relx=0.55, rely=0.8, relwidth=0.2, relheight=0.05)
def addRecords():
count = 4
tree.insert(parent="", index="end", iid=count, text="", values=(nameEntry.get(), courseEntry.get()))
count += 1
#Clear boxes
nameEntry.delete(0, tk.END)
courseEntry.delete(0, tk.END)
def removeRecords():
selectedRecord = tree.selection()
for record in selectedRecord:
tree.delete(record)
def selectRecord():
#Clear Entry Boxes
nameEntry.delete(0, tk.END)
courseEntry.delete(0, tk.END)
selected = tree.focus()
values = tree.item(selected, 'values')
#Output to Entry Boxes
nameEntry.insert(0, values[0])
courseEntry.insert(0, values[1])
def updateRecord():
selected = tree.focus()
tree.item(selected, text='', values=(nameEntry.get(), courseEntry.get()))
def mainScreen():
global ms
ms = tk.Tk()
ms.title("TA-Course Matching System")
canvas = tk.Canvas(ms, height=900, width=1200, bg="#ffffff")
canvas.pack()
frame = tk.Frame(ms, bg='#f3e6ff')
frame.place(relx=0.1, rely=0.1, relwidth=0.8, relheight=0.8)
label = tk.Label(frame, text="Select a function", bg='#f3e6ff', font=('Calibri',18))
label.place(relx=0.25, rely=0.3, relwidth=0.5, relheight=0.1)
TAhours = tk.Button(frame, text="Match TAs to Courses", command=matchTAtoCourse)
TAhours.place(relx=0.225, rely=0.5, relwidth=0.25, relheight=0.15)
TAallocations = tk.Button(frame, text="View/Edit TA Allocations", command=editAllocations)
TAallocations.place(relx=0.525, rely=0.5, relwidth=0.25, relheight=0.15)
ms.mainloop()
| true |
f5b0fc46d08cb15b4e2a80019eedf0a83fc90190 | Python | rkclark/learning-python | /functions.py | UTF-8 | 206 | 3.65625 | 4 | [] | no_license | def myFunction(arg1, arg2):
print("This is my function", arg1, arg2)
myFunction("hello", "world")
def addNumbers(a=1, b=1):
print(a + b)
return a + b
addNumbers()
# 2
addNumbers(b=5, a=2) # 7
| true |
bdf3e331d6d11d76609cac22cb1a99dd2f574e31 | Python | radrams/child_malnutrition | /models/underweight.py | UTF-8 | 13,050 | 2.578125 | 3 | [] | no_license | import random
import warnings
import ax as ax
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import sklearn
import scipy.stats as stats
import xgboost
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix, accuracy_score, classification_report, roc_curve, auc
from sklearn.model_selection import train_test_split, GridSearchCV, RepeatedStratifiedKFold
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import cross_val_predict
from statsmodels.compat import scipy
import statsmodels.api as sm
import os
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
np.random.seed(42)
random.seed(42)
warnings.filterwarnings('ignore')
cwd = os.getcwd()
path = os.path.join(os.path.dirname(cwd), 'data', 'underweight.xlsx')
############## Read from Excel file ######################################################
# Read the data
df_immu_hyg_underweight = pd.read_excel(path, sheet_name='underweight')
# Print Column names
print(df_immu_hyg_underweight.head())
# print(df_immu_hyg_underweight.columns.ravel())
print('===============================================================')
print(df_immu_hyg_underweight.describe())
print('===============================================================')
print(df_immu_hyg_underweight.isnull().sum())
print('===============================================================')
############ Drop Columns having 'na' count greater than 150 rows =================
max_number_of_nas = 150
df_immu_hyg_underweight = df_immu_hyg_underweight.loc[:, (df_immu_hyg_underweight.isnull().sum(axis=0) <= max_number_of_nas)]
############################################### MISSING VALUES #############6############################################
# Number of missing values
print('===================== Missing Values Summary Before Imputation =====================')
print(df_immu_hyg_underweight.isnull().sum())
print('===============================================================')
############################################### Handling missing values using mean imputation
df_immu_hyg_underweight.fillna(df_immu_hyg_underweight.mean(), inplace=True)
print('===================== Missing Values Summary After Imputation =====================')
print(df_immu_hyg_underweight.isnull().sum())
print('===============================================================')
########################################### Defining input and output columns
X = df_immu_hyg_underweight.loc[:, df_immu_hyg_underweight.columns != 'SH.STA.MALN.ZS']
################# Grouping Data
median_split = df_immu_hyg_underweight['SH.STA.MALN.ZS'].median()
y = pd.cut(df_immu_hyg_underweight['SH.STA.MALN.ZS'], bins=[0, median_split, 100], right=False, labels=['low', 'high'])
y.value_counts().plot(kind='bar')
plt.title("Underweight - Data Split")
plt.xlabel('Classes')
plt.ylabel('Number of Instances')
plt.show()
###################################################################################################
############################################## Distribution plot
f, ax = plt.subplots(1, sharex=True)
mean_value = df_immu_hyg_underweight['SH.STA.MALN.ZS'].mean()
std_value = df_immu_hyg_underweight['SH.STA.MALN.ZS'].std()
sns.distplot(df_immu_hyg_underweight['SH.STA.MALN.ZS'], kde_kws={"label": "Actual distribution"},
fit=stats.norm, fit_kws={"label": "Normal distribution\n(with mean: {:0.2f} and \nvariance: {:0.2f}"
.format(mean_value, std_value), "color":"orange"})
plt.title('Density Plot for Underweight')
plt.xlabel('Underweight %')
plt.ylabel('Density')
plt.legend(loc="upper right")
f.text(x=0.65, y=0.3, transform = ax.transAxes, s="Skewness: {:0.6f}".format(df_immu_hyg_underweight['SH.STA.MALN.ZS'].skew()))
f.text(x=0.65, y=0.25, transform = ax.transAxes, s="Kurtosis: {:0.6f}".format(df_immu_hyg_underweight['SH.STA.MALN.ZS'].kurt()))
plt.show()
############ Q-Q Plot
stats.probplot(df_immu_hyg_underweight['SH.STA.MALN.ZS'], dist="norm", plot=plt)
plt.title("Underweight Q-Q Plot")
plt.show()
########### Pairplot
g = sns.pairplot(df_immu_hyg_underweight, x_vars=X.columns[0:3], y_vars='SH.STA.MALN.ZS', kind="reg")
g.fig.suptitle("Pair Plot - Input Vs Output")
plt.tight_layout()
plt.show()
g = sns.pairplot(df_immu_hyg_underweight, x_vars=X.columns[3:6], y_vars='SH.STA.MALN.ZS', kind="reg")
g.fig.suptitle("Pair Plot - Input Vs Output")
plt.tight_layout()
plt.show()
g = sns.pairplot(df_immu_hyg_underweight, x_vars=X.columns[6:8], y_vars='SH.STA.MALN.ZS', kind="reg")
g.fig.suptitle("Pair Plot - Input Vs Output")
plt.tight_layout()
plt.show()
######################## Remove correlated Features
correlated_features = set()
# calculate Pearson correlation coefficient
corr = df_immu_hyg_underweight.corr()
print(corr['SH.STA.MALN.ZS'].sort_values(ascending=False))
print('===============================================================')
ax = sns.heatmap(
corr,
annot=True,
vmin=-1, vmax=1, center=0,
cmap=sns.diverging_palette(20, 220, n=200),
square=True
)
ax.set_xticklabels(
ax.get_xticklabels(),
rotation=45,
horizontalalignment='right'
);
# fix for mpl bug that cuts off top/bottom of seaborn viz
b, t = plt.ylim() # discover the values for bottom and top
b += 0.5 # Add 0.5 to the bottom
t -= 0.5 # Subtract 0.5 from the top
plt.ylim(b, t) # update the ylim(bottom, top) values
plt.title('Pearson Correlation Matrix')
plt.tight_layout()
plt.show()
for i in range(len(corr .columns)):
for j in range(i):
if abs(corr.iloc[i, j]) > 0.8:
colname = corr.columns[i]
correlated_features.add(colname)
print('Removed Correlated Columns:', correlated_features)
print('===============================================================')
X.drop(labels=correlated_features, axis=1, inplace=True)
########################################################################################################################################
##################### MODELS ######################
classifiers =[]
clf_logit = LogisticRegression()
classifiers.append(clf_logit)
clf_nb = GaussianNB()
classifiers.append(clf_nb)
clf_xgb = xgboost.XGBClassifier()
classifiers.append(clf_xgb)
clf_svm = sklearn.svm.SVC()
classifiers.append(clf_svm)
clf_knn = KNeighborsClassifier(n_neighbors=13)
classifiers.append(clf_knn)
clf_rf = RandomForestClassifier(random_state=0)
classifiers.append(clf_rf)
clf_accuracies = {}
############## Fine-tune KNN
# # try K=1 through K=25 and record testing accuracy
# k_range = range(1, 26)
# # We can create Python dictionary using [] or dict()
# scores = []
# # We use a loop through the range 1 to 26
# # We append the scores in the dictionary
# for k in k_range:
# knn = KNeighborsClassifier(n_neighbors=k)
# clf_knn_optimized = KNeighborsClassifier(n_neighbors=k)
# scores.append(cross_val_score(estimator=clf_knn_optimized, X=X, y=y, cv=10).mean())
# print(scores)
# plt.plot(k_range, scores)
# plt.xlabel('Value of K for KNN')
# plt.show()
################################## Fine-tune Decision Tree
# Define the parameter values that should be searched
# criterion = ['gini', 'entropy']
# max_depth = [4, 6, 8, 12]
# parameters = dict(criterion=criterion, max_depth=max_depth)
#
# # instantiate the grid
# dtc = DecisionTreeClassifier(random_state=0)
# grid = GridSearchCV(dtc, parameters, cv=10, scoring='accuracy')
# # fit the grid with data
# grid.fit(X, y)
# classifiers.append(grid.best_estimator_)
# print(grid.best_estimator_)
########################## Fine-tuned Decision tree using Grid Search
classifiers.append(DecisionTreeClassifier(ccp_alpha=0.0, class_weight=None, criterion='entropy',
max_depth=6, max_features=None, max_leaf_nodes=None,
min_impurity_decrease=0.0, min_impurity_split=None,
min_samples_leaf=1, min_samples_split=2,
min_weight_fraction_leaf=0.0, presort='deprecated',
random_state=0, splitter='best'))
############### Function to plot confusion matrix
def plot_cm(model_name, df_cm):
plt.figure(figsize = (8,7))
sns.heatmap(df_cm/ df_cm.sum(axis=1)[:, np.newaxis], annot=True,fmt='.2%', cmap='Blues', xticklabels=['low','high'], yticklabels=['low','high'])
# fix for mpl bug that cuts off top/bottom of seaborn viz
b, t = plt.ylim() # discover the values for bottom and top
b += 0.5 # Add 0.5 to the bottom
t -= 0.5 # Subtract 0.5 from the top
plt.ylim(b, t) # update the ylim(bottom, top) values
sensitivity = cm[0, 0] / (cm[0, 0] + cm[0, 1])*100
print('Sensitivity : ', sensitivity)
specificity = cm[1, 1] / (cm[1, 0] + cm[1, 1])*100
print('Specificity : ', specificity)
plt.title('Confusion Matrix - ' + model_name )
plt.figtext(0.5, 0.01, 'Sensitivity: {:.2f}, Specificity: {:.2f}'.format(sensitivity, specificity))
plt.xlabel('Predicted label')
plt.ylabel('True label')
plt.tight_layout()
plt.show()
######### Plot Feature Importance
def plot_feature_importance(model):
try:
model.fit(X, y)
importance = model.feature_importances_
features = X.columns
indices = np.argsort(importance)
plt.title('Feature Importance - ' + model.__class__.__name__)
plt.barh(range(len(indices)), importance[indices], color='steelblue', align='center')
plt.yticks(range(len(indices)), [features[i] for i in indices])
plt.xlabel('Relative Importance')
plt.tight_layout()
plt.show()
except Exception as e:
pass
#############################################################
# Calculate cross-validation scores
for clf in classifiers:
all_accuracies = cross_val_score(estimator=clf, X=X, y=y, cv=10)
print("Average accuracy of %s is %s"%(clf.__class__.__name__, all_accuracies.mean()))
print('Accuracy of each fold: ', all_accuracies)
y_pred = cross_val_predict(clf, X, y, cv=10)
accuracy = accuracy_score(y, y_pred)
print('Accuracy of the current fold: ', accuracy)
clf_accuracies[clf.__class__.__name__] = accuracy
print('===============================================================')
# print the sorted classifier accuracies
sorted_accuracies = sorted(clf_accuracies.items(), key=lambda item: item[1], reverse=True)
print('==================== Sorted Accuracies =================')
for k, v in sorted_accuracies:
print(k, " ", v)
print('===============================================================')
top_3_accuracies = dict(sorted_accuracies[:3])
#### Calculate Confusion matrix and Feature Importance for Top 3 classifiers
for clf in classifiers:
if clf.__class__.__name__ in top_3_accuracies.keys():
y_pred = cross_val_predict(clf, X, y, cv=10)
cm = confusion_matrix(y, y_pred)
print('Confusion matrix for ', clf.__class__.__name__ , ': ', cm)
plot_cm(clf.__class__.__name__, cm)
plot_feature_importance(clf)
print(classification_report(y, y_pred))
print('===============================================================')
####################### Compute ROC curve and area the curve
## ROC Curve for SVC
probas_ = cross_val_predict(sklearn.svm.SVC(probability=True), X, y, cv=10, method="predict_proba")
fpr_svc, tpr_svc, thresholds_svc = roc_curve(y, probas_[:, 1], pos_label='low')
roc_auc_svc = auc(fpr_svc, tpr_svc)
print("Area under the ROC curve for SVC: %f" % roc_auc_svc)
# Plot ROC curve
plt.clf()
plt.plot(fpr_svc, tpr_svc, "b:", label='SVC (AUC = %0.2f)' % roc_auc_svc)
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.0])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC - Underweight')
plt.legend(loc="lower right")
######### ROC Curve Decision Tree
y_prob_pred = cross_val_predict(DecisionTreeClassifier(random_state=0), X, y, cv=10, method="predict_proba")
fpr_dt, tpr_dt, thresholds_dt = roc_curve(y, y_prob_pred[:, 1], pos_label='low')
roc_auc_dt = auc(fpr_dt, tpr_dt)
print("Area under the ROC curve for Descision Tree: %f" % roc_auc_dt)
plt.plot(fpr_dt, tpr_dt, linestyle=':', color='green', linewidth=2, label='Decision Tree (AUC = %0.2f)' % roc_auc_dt)
plt.legend(loc="lower right")
################ ROC Curve KNN
y_prob_pred = cross_val_predict(KNeighborsClassifier(n_neighbors=13), X, y, cv=10, method="predict_proba")
fpr_knn, tpr_knn, thresholds_knn = roc_curve(y, y_prob_pred[:, 1], pos_label='low')
roc_auc_knn = auc(fpr_knn, tpr_knn)
print("Area under the ROC curve for KNN: %f" % roc_auc_knn)
plt.plot(fpr_knn, tpr_knn, linestyle=':', color='brown', linewidth=2, label='KNN (AUC = %0.2f)' % roc_auc_knn)
plt.legend(loc="lower right")
plt.show()
##################################
| true |
cded8596d09005c16bd98853ec51ed79e710c7f4 | Python | Orodef/Python | /Contar Caracteres.py | UTF-8 | 865 | 3.359375 | 3 | [] | no_license | '''
Alejandro Hernández Lobo 2017157163
Grupo
'''
#Reto 1
def contarCaracteres(cadena):
resultado = []
usados = []
indice1 = 0
while (indice1 < len(cadena)):
letra = cadena[indice1].upper()
if (yaAparecio(letra, usados) == False):
indice2 = 0
cantidad = 0
while (indice2 < len(cadena)):
if (cadena[indice1].upper() == cadena[indice2].upper()):
cantidad += 1
indice2 += 1
resultado += [[cadena[indice1].upper(), cantidad]]
usados += cadena[indice1].upper()
indice1 += 1
return resultado
def yaAparecio (letra, usados):
indice = 0
while (indice < len(usados)):
if (letra == usados[indice].upper()):
return True
indice += 1
return False
| true |
8d980a2df24b1e90f642f244f385896348e20626 | Python | edzai/edx-chat-bot | /edx_chat_bot/limbo_plugins/room_recommender.py | UTF-8 | 1,754 | 2.546875 | 3 | [] | no_license | """Listens to all messages and recommends a room that may be better for that message"""
import pprint
import re
# This weirdness is because Limbo adds this directory to sys.path :(
try:
from helpers import dm_user
except ImportError:
from .helpers import dm_user
CHANNELS = {
'ecommerce': {
'id': 'C0WL6SPRA',
},
'ops': {
'id': 'C08B4LZEZ',
},
}
OPEN_EDX_ROOM_PATTERNS = [
('ecom|ecommerce|paypal|stripe|braintree|payment processor', {
'channel': 'ecommerce',
}),
('azure|docker|ubuntu|ansible', {
'channel': 'ops',
}),
]
def room_recommender(text):
for pattern, actions in OPEN_EDX_ROOM_PATTERNS:
method = re.search(pattern, text)
if method:
match = method.group(0)
return (match, actions['channel'])
MY_INFO = None
def on_message(msg, server):
"""Called when a message happens in a channel the bot is in.
msg: dict
server: Limbo object thingy
"""
global MY_INFO
if MY_INFO is None:
MY_INFO = server.slack.login_data['self']
# MY_INFO['id']
pprint.pprint(msg)
text = msg.get("text", "").lower()
text += msg.get("file", {}).get("preview", "")
recommendation = room_recommender(text)
if recommendation:
trigger_string, room_name = recommendation
room_id = CHANNELS[room_name]['id']
response_text = "Hi, I noticed you were talking about “{trigger_string}”\n You may have better luck posting this in <#{room_id}|{room_name}>"
response_msg = response_text.format(
trigger_string=trigger_string,
room_id=room_id,
room_name=room_name
)
dm_user(server, msg.get('user'), response_msg)
| true |
433addc00580bfbba51198831bff1ea1be08e40d | Python | CLNersesian/Thinkful-projects | /stats.py | UTF-8 | 1,964 | 3.625 | 4 | [] | no_license | import pandas as pd
data = '''Region, Alcohol, Tobacco
North, 6.47, 4.03
Yorkshire, 6.13, 3.76
Northeast, 6.19, 3.77
East Midlands, 4.89, 3.34
West Midlands, 5.63, 3.47
East Anglia, 4.52, 2.92
Southeast, 5.89, 3.20
Southwest, 4.79, 2.71
Wales, 5.27, 3.53
Scotland, 6.08, 4.51
Northern Ireland, 4.02, 4.56'''
data = data.splitlines() ## split string on newlines or use data.split('\n')
## split each list item by commas into a list comprehension
data = [i.split(', ') for i in data]
## create pandas dataframe
column_names = data[0] ## first row
data_rows = data[1::] ## all subsequent rows of data
df = pd.DataFrame(data_rows, columns=column_names)
import scipy.stats
from scipy import stats
## convert Alcohol and Tobacco columns to float
df['Alcohol'] = df['Alcohol'].astype(float)
df['Tobacco'] = df['Tobacco'].astype(float)
df['Alcohol'].mean()
df['Alcohol'].median()
stats.mode(df['Alcohol'])
df['Tobacco'].mean()
df['Tobacco'].median()
stats.mode(df['Tobacco'])
max(df['Alcohol']) - min(df['Alcohol'])
df['Alcohol'].std()
df['Alcohol'].var()
max(df['Tobacco']) - min(df['Tobacco'])
df['Tobacco'].std()
df['Tobacco'].var()
## Challenge
print "Mean values for Alcohol and Tobacco in Great Britian, respectively:", df['Alcohol'].mean(), df['Tobacco'].mean()
print "Median values for Alcohol and Tobacco in Great Britian, respectively:", df['Alcohol'].median(), df['Tobacco'].median()
print "The Mode for the Alcohol and Tobacco in Great Britian, respectively ", stats.mode(df['Alcohol'])[0][0], stats.mode(df['Tobacco'])[0][0]
print "Range values for Alcohol and Tobacco in Great Britian, respectively:", max(df['Alcohol']) - min(df['Alcohol']), max(df['Tobacco']) - min(df['Tobacco'])
print "Variance values for Alcohol and Tobacco in Great Britian, respectively:", df['Alcohol'].var(), df['Tobacco'].var()
print "Standard deviation values for Alcohol and Tobacco in Great Britian, respectively:", df['Alcohol'].std(), df['Tobacco'].std()
| true |
9435ebc7ec0275602591b4c9978de4e83e57f20e | Python | stellatigre/web-platform-tests | /webdriver/navigation/back.py | UTF-8 | 749 | 2.59375 | 3 | [] | no_license | import unittest
import sys
import os
sys.path.insert(1, os.path.abspath(os.path.join(__file__, "../..")))
import base_test
class BackTest(base_test.WebDriverBaseTest):
# Get a static page that must be the same upon refresh
def test_back(self):
self.driver.get(self.webserver.where_is('navigation/res/backStart.html'))
body = self.driver.find_element_by_css("body").text
self.driver.get(self.webserver.where_is('navigation/res/backNext.html'))
currbody = self.driver.find_element_by_css("body").text
self.assertNotEqual(body, currbody)
self.driver.go_back()
backbody = self.driver.find_element_by_css("body").text
self.assertEqual(body, backbody)
if __name__ == '__main__':
unittest.main()
| true |
8708ee9b15bcb1827aadbf79d70f992ed09bb598 | Python | Markus-Zlabinger/question-annotator | /src/question.py | UTF-8 | 244 | 2.546875 | 3 | [] | no_license | class Question:
qid = None
text = None
text_tokens = None
def __init__(self, qid, text):
self.qid = qid
self.text = text
#self.text_tokens = text_tokens
def __repr__(self):
return self.text
| true |
b3ea530280a1d67c79c8d23d2a2ab74e6d935cd0 | Python | Barrydian/Master_project_OCT_DCGAN | /resize_img.py | UTF-8 | 767 | 2.703125 | 3 | [] | no_license | import os, shutil
import cv2
def resize_img (src_database, target_database, width, height, channel) :
if ( os.path.exists(src_database)) :
if os.path.exists(target_database):
shutil.rmtree(target_database)
os.mkdir(target_database)
for each in os.listdir(src_database):
if channel==1:
img = cv2.imread(os.path.join(src_database,each), cv2.IMREAD_GRAYSCALE)
elif channel==3:
img = cv2.imread(os.path.join(src_database,each), cv2.IMREAD_COLOR)
img = cv2.resize(img,(width,height))
cv2.imwrite(os.path.join(target_database,each), img)
print(os.path.join(target_database,each))
print(' --- Images resizing done ... ') | true |
98d62a5e7e99c6e7f3b91f220c3aa92d8fe06a73 | Python | 99002646/Genesis | /DAS/codes/m_cal.py | UTF-8 | 3,134 | 2.515625 | 3 | [] | no_license | import numpy as np
from matplotlib.lines import Line2D
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from math import pi
from scipy.signal import butter, lfilter
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import freqz
from scipy.ndimage import gaussian_filter1d
import math
from numpy.compat.py3k import long
fs = 5000.0
lowcut = 40.0
highcut = 70.0
o = 3
def butter_bandpass(lowcut, highcut, fs, order=5):
nyq = 0.5 * fs
low = lowcut / nyq
high = highcut / nyq
b, a = butter(order, [low, high], btype='band')
return b, a
def butter_bandpass_filter(data, lowcut, highcut, fs, order=5):
b, a = butter_bandpass(lowcut, highcut, fs, order=order)
y = lfilter(b, a, data)
return y
# Your Parameters
input_volt=230
amp = 1.414*input_volt # (Amplitude)
f = 50 # (Frequency)
fs = 5000 # (Sample Rate)
T = 1/f
Ts = 1/fs
harmonic_amp1=0.05*amp
harmonic_amp2=0.05*amp
attenuation_factor=(amp+harmonic_amp1+harmonic_amp2)/2.5
# Select if you want to display the sine as a continous wave
# True = Continous (not able to zoom in x-direction)
# False = Non-Continous (able to zoom)
continous = True
x = np.arange(fs)
#print(x)
y= [ ((amp*np.sin(2*np.pi*f * (i/fs)))+(0.05*amp*np.sin(6*pi*f * (i/fs)))+(0.05*amp*np.sin(12*pi*f * (i/fs))))*(1/attenuation_factor) for i in x ]
adc=[((y[i]/5)*65536)+(32768) for i in x]
for i in x:
#print((y[i]))
#adc = ((y[i]/5)*65536)+(32768)
#print(adc)
op1 = 32768 + butter_bandpass_filter(adc, lowcut, highcut, fs, order=o)
#print(op1.shape)
op2 = gaussian_filter1d(op1, 4)
#print(math.ceil(op2[i]))
#adc=((y/10)*65536)+(32768)
class Scope(object):
def __init__(self, ax, maxt=2*T, dt=Ts):
self.ax = ax
self.dt = dt
self.maxt = maxt
self.tdata = [0]
self.ydata = [0]
self.line = Line2D(self.tdata, self.ydata)
self.ax.add_line(self.line)
self.ax.set_ylim(-440,440)
self.ax.set_xlim(0, self.maxt)
def update(self, y):
lastt = self.tdata[-1]
if continous :
if lastt > self.tdata[0] + self.maxt:
self.ax.set_xlim(lastt-self.maxt, lastt)
t = self.tdata[-1] + self.dt
self.tdata.append(t)
self.ydata.append(y)
self.line.set_data(self.tdata, self.ydata)
return self.line,
mode = 1 # 1 For AC, 0 DC
RangeAC = attenuation_factor*2.5*2
RangeDC = attenuation_factor*5
inputs = 32768
op = 0
if(mode == 0):
opc = [(op1[i]/65536)*RangeDC for i in x]
if(mode == 1):
opc = [((op1[i]-32768)/65536)*RangeAC for i in x]
def sineEmitter():
for i in x:
#inputs = op2[i]
print(opc[i])
yield (opc[i])
fig, ax = plt.subplots()
scope = Scope(ax)
# pass a generator in "sineEmitter" to produce data for the update func
ani = animation.FuncAnimation(fig, scope.update, sineEmitter, interval=10,
blit=True)
plt.show() | true |
a7ae1a7790b131bb8bf6ca3f1c9cdc77e915ab5a | Python | Xogiga/CPOA_INEC_SAVIGNY_VALADE | /cannes_accomodation/accomodation_website/validators.py | UTF-8 | 1,572 | 3.265625 | 3 | [
"MIT"
] | permissive | import datetime
from wtforms import DateField
from wtforms.validators import ValidationError
class TimeCondition:
def __init__(self, date: 'datetime.date or DateField'):
self.date = date
@staticmethod
def date_from_field(datefield, form):
"""Set self.date to a `datetime.date` object if it currently is a `DateField`"""
try:
date = form[datefield].data
except KeyError:
raise ValidationError(f'Invalid field name {datefield}')
if not isinstance(date, datetime.date):
raise TypeError('Not a DateField')
return date
def check_date(self, form):
if not isinstance(self.date, datetime.date):
self.date = self.date_from_field(self.date, form)
class Before(TimeCondition):
def __init__(self, date, message=None):
super().__init__(date)
if not message:
message = f'The chosen date must be before the {date}'
self.message = message
def __call__(self, form, field):
self.check_date(form)
if field.data > self.date:
raise ValidationError(self.message)
class After(TimeCondition):
def __init__(self, date, message=None):
super().__init__(date)
if not message:
message = f'The chosen date must be after the {date}'
self.message = message
def __call__(self, form, field):
self.check_date(form)
if field.data < self.date:
raise ValidationError(self.message)
| true |
80071d02703eadbcfea0f61f470bd9554140e52c | Python | iayoung85/2ndsandbox | /testweekpropscorer.py | UTF-8 | 1,153 | 2.859375 | 3 | [] | no_license | #scores a proposed set of group assignments based on how many times each pairing has been used in the past
def week_prop_scorer(proposedpairs,all_groups):
score=0
for n in all_groups:
if n in proposedpairs or [n[1],n[0]] in proposedpairs:
score+=1
return score
test=week_prop_scorer([['e', 'b'], ['a', 'h'], ['f', 'c'], ['g', 'd']],[['a', 'd'], ['c', 'b'], ['a', 'b'], ['d', 'c'], ['a', 'c'], ['b', 'd'], ['b', 'd'], ['a', 'c'], ['b', 'e'], ['a', 'd'], ['c', 'f'], ['d', 'join another group'], ['b', 'f'], ['c', 'g'], ['a', 'e'], ['c', 'd'], ['e', 'g'], ['a', 'f'], ['b', 'h'], ['a', 'b'], ['c', 'e'], ['d', 'f'], ['h', 'g'], ['e', 'd'], ['a', 'g'], ['b', 'c'], ['f', 'h']]
)
print(test)
negtest=week_prop_scorer([['f', 'e'], ['h', 'c'], ['g', 'b'], ['a', 'd']],[['a', 'd'], ['c', 'b'], ['a', 'b'], ['d', 'c'], ['a', 'c'], ['b', 'd'], ['b', 'd'], ['a', 'c'], ['b', 'e'], ['a', 'd'], ['c', 'f'], ['d', 'join another group'], ['b', 'f'], ['c', 'g'], ['a', 'e'], ['c', 'd'], ['e', 'g'], ['a', 'f'], ['b', 'h'], ['a', 'b'], ['c', 'e'], ['d', 'f'], ['h', 'g'], ['e', 'd'], ['a', 'g'], ['b', 'c'], ['f', 'h']]
)
print(negtest) | true |
0dd05ad56cde3168eb7d0f3f602cb02395137eeb | Python | zm-git-dev/Bioinfo-pipelines | /bioinfo_training/exercises_lecture1-7/day03/test004/test002.py | UTF-8 | 154 | 3.15625 | 3 | [] | no_license |
f = open('test001.txt', 'r')
letters = []
for line in f:
line.strip("\n")
letters.append(line)
answer = letters[0][0]
print(answer)
f.close() | true |
5c692a98e4d1732d649e1c50c66d5f1e71f5bb2b | Python | delkind-dnsf/Malicious-URL-and-DGA-Domain-Detection-using-Deep-Learning | /Malicious-URL-Detection/data/malware_preprocess.py | UTF-8 | 783 | 2.84375 | 3 | [] | no_license | import pandas as pd
from sklearn.utils import shuffle
df_malware = pd.read_csv("./malware/url_haus_only_url.txt", delimiter='\n', header=None)
malware_labels = []
malware_labels_str = []
malware_urls = []
df_malware_to_list = df_malware[0].tolist()
z = 0
for x in df_malware[0].tolist():
if len(df_malware_to_list[z]) < 101:
malware_labels.append(1)
malware_labels_str.append('malware')
malware_urls.append(df_malware_to_list[z])
z = z + 1
# Data columns("url", "label", "class")
malware_archive = pd.DataFrame(columns=['url'])
malware_archive['url'] = malware_urls
malware_archive['label'] = malware_labels_str
malware_archive['class'] = malware_labels
malware_archive.to_csv("./malware_label.csv", mode='w', index=False)
print(malware_archive) | true |
d94ce5778217c61208f5e21dd29e9b37c64cb598 | Python | luiscape/hdxscraper-wfp-mvam | /collector/classes/mvam.py | UTF-8 | 1,911 | 2.546875 | 3 | [
"MIT"
] | permissive | #!/usr/bin/python
# -*- coding: utf-8 -*-
'''
MVAM CLASS:
---------------
Defines mVAM class as a representation of the
mVAM public API.
'''
import requests
import datetime
from collector.utilities.item import item
class mVAM:
'''
mVAM class; represents the organization's public API.
'''
def __init__(self, table='pblStatsSum', page_limit = 10*4):
self.tables = ['pblStatsSum', 'pblStatsSum4Maps']
if table not in self.tables:
raise ValueError('Table %s does not exist.')
self.table = table
self.page_limit = page_limit
self.metadata = {
'url': 'http://vam.wfp.org/mvam_monitoring/api.aspx',
'time': datetime.datetime.now().isoformat(),
'table': table,
'records': 0
}
def query(self, statement=None):
'''
Makes a query to the mVAM API.
'''
if statement is None:
print('%s No statement provided. Fetching all available records.' % item('warn'))
results = []
if statement:
for page in range(0, self.page_limit):
print('%s Collecting %s page %s' % (item('bullet'), self.table, page) )
r = requests.post(self.metadata['url'], data = {'table': self.table, 'where': statement, 'page': page })
if len(r.json()) == 0:
break
else:
results += r.json()
else:
for page in range(0, self.page_limit):
print('%s Collecting %s page %s' % (item('bullet'), self.table, page) )
r = requests.post(self.metadata['url'], data = {'table': self.table, 'page': page })
if len(r.json()) == 0:
break
else:
results += r.json()
self.metadata['records'] = len(results)
return(results)
| true |
f12e30751d69c97d29c5ebb5635a1e48647e1101 | Python | lhd-Sakura/starrobot_ws | /src/starrobot/starrobot_teleop/src/teleop_twist_servo.py | UTF-8 | 2,109 | 2.75 | 3 | [] | no_license | #!/usr/bin/env python
import roslib; roslib.load_manifest('teleop_twist_keyboard')
import rospy
from starrobot_msgs.msg import Servo
import sys, select, termios, tty
msg = """
Reading from the keyboard and Publishing to Servo!
---------------------------
Moving around:
w
s
x
t : up (+z)
b : down (-z)
anything else : stop
q/z : increase/decrease max speeds by 10%
w/x : increase/decrease only linear speed by 10%
e/c : increase/decrease only angular speed by 10%
CTRL-C to quit
"""
moveBindings = {
'i':(1,0,0,0),
'o':(1,0,0,-1),
'j':(0,0,0,1),
'l':(0,0,0,-1),
'u':(1,0,0,1),
',':(-1,0,0,0),
'.':(-1,0,0,1),
'm':(-1,0,0,-1),
'O':(1,-1,0,0),
'I':(1,0,0,0),
'J':(0,1,0,0),
'L':(0,-1,0,0),
'U':(1,1,0,0),
'<':(-1,0,0,0),
'>':(-1,-1,0,0),
'M':(-1,1,0,0),
't':(0,0,1,0),
'b':(0,0,-1,0),
}
speedBindings={
'q':(1.1,1.1),
'z':(.9,.9),
'w':(1.1,1),
'x':(.9,1),
'e':(1,1.1),
'c':(1,.9),
}
def getKey():
tty.setraw(sys.stdin.fileno())
select.select([sys.stdin], [], [], 0)
key = sys.stdin.read(1)
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, settings)
return key
if __name__=="__main__":
settings = termios.tcgetattr(sys.stdin)
pub = rospy.Publisher('servo', Servo, queue_size = 1)
rospy.init_node('teleop_twist_servo')
speed = rospy.get_param("~speed", 0.5)
servo_angle = 0.0
status = 0
try:
print msg
while(1):
key = getKey()
print key
if key in moveBindings.keys():
x = moveBindings[key][0]
elif key in speedBindings.keys():
speed = speed * speedBindings[key][0]
print speed
if (status == 14):
print msg
status = (status + 1) % 15
else:
if (key == '\x03'):
break
servo_msg = Servo()
servo_msg.servo1 = 0.1
pub.publish(servo_msg)
finally:
servo_msg = Servo()
servo_msg.servo1 = 0.2
servo_msg.servo1 = 0.2
servo_msg.servo1 = 0.2
servo_msg.servo1 = 0.2
pub.publish(servo_msg)
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, settings)
| true |
95772c0488cbf699930432d8c25b12e0d2146624 | Python | desertfireballnetwork/particle_filter | /main_MPI.py | UTF-8 | 58,157 | 2.859375 | 3 | [] | no_license | #!/usr/bin/env python
"""
=============== Meteoroid Trajectory Analysis ===============
============ using a bootstrap particle filter ==============
Created on Mon Jul 16 2017
@author: Eleanor Kate Sansom
Uses particle filter methodology outlined in Arulampalam et al.
(2002) -- A tutorial on particle filters for online nonlinear/
non-Gaussian Bayesian tracking (doi: 10.1109/78.978374).
## Requirements:
- From Triangulation folder: trajectory_utilities.py,
dfn_utils.py
- From Brightflight folder: bf_functions_3d.py,
ballistic_params.py
- From Brightflight/particle_filters folder: nrlmsise_00.py,
nrlmsise_00_data.py,
nrlmsise_00_header.py
## Running the program:
run in commandline as
$ mpirun -n <#nodes> python main_MPI.py <userinputs>
There are three run options depending on combination of data
available.
(1) 1D particle filter: 1D analysis on a single,
pre-triangulated trajectory
file with X_geo, Y_geo and
Z_geo information.
(2) 3D particle filter, cartesian: 3D analysis on single or
multiple pre-triangulated
files with X_geo, Y_geo and
Z_geo information.
(3) 3D particle filter, rays: 3D analysis on calibrated
astrometric observations
(altitude and azimuth data
required)
Inputs: required:
-i --run_option: select from (1) (2) or (3) described
above.
-d --inputdirectory: input directory of folder
containing files with extension .ecsv
-p --numparts: number of particles to run, testing run
100; quick run try 1000; good run try 10,000;
magnus 100,000+
optional:
-m --mass_option: Specify how you would like entry masses
to be initiated. (1) calculated using ballistic
coefficient in metadata; (2) using random logarithmic
distribution; (3) random uniform distribution.
Default is 3
-s --pse: specify if you would like to use exclusively
start (S) or end (E) points, or use both (B).
Default uses ends
-l --luminosity_weighting: weighting for luminosity
-M0 --m0_max: if using -m 2 or 3, specify a maximum
initial mass. Default is 2000",default=2000.)
-c --comment: add a version name to appear in saved
file titles. If unspecified, _testing_ is used.
-k --save_kml: save observation rays as kmls
-o --old: previous .fits file
to be tested:
-f --fragment: If this is a fragment run, it will
increase covariance values at the specified
times given by -t option
-t --fragment_start: If this is a fragment run, give
starting time (seconds in UTC, not event relative)
Output: HDU table fits file.
HDU table[0] is the primary and holds information on
all other indices and does not contain data
access this info using table.info().
See http://docs.astropy.org/en/stable/io/fits/
for more info.
Table at each time step is saved as a new index in HDU
table.
access last timestep table using table[-1].data
Each table is in the format:
#-------------------------------------------------------
column index KEY:
0 : 'X_geo' - X posistion in ECEF (m)
1 : 'Y_geo' - Y posistion in ECEF (m)
2 : 'Z_geo' - Z posistion in ECEF (m)
3 : 'X_geo_DT' - X velocity (dx/dt) in ECEF (m/s)
4 : 'Y_geo_DT' - Y velocity (dy/dt) in ECEF (m/s)
5 : 'Z_geo_DT' - Z velocity (dz/dt) in ECEF (m/s)
36: 'X_eci' - X posistion in ECI (m)
37: 'Y_eci' - Y posistion in ECI (m)
38: 'Z_eci' - Z posistion in ECI (m)
39: 'X_eci_DT' - X velocity (dx/dt) in ECI (m/s)
40: 'Y_eci_DT' - Y velocity (dy/dt) in ECI (m/s)
41: 'Z_eci_DT' - Z velocity (dz/dt) in ECI (m/s)
6 : 'mass' - mass (kg)
7 : 'cd' - drag coefficient (aerodynamic => 2 * gamma; see Bronshten 1976)
8 : 'A' - shape coefficient,
A = cross sectional surface area / volume^(2/3)
9 : 'kappa' - shape-density coefficient,
kappa = cd * A / density^(2/3)
10: 'sigma' - ablation coefficient
11: 'mag_v' - absolute visual magnitude
12: 'tau' - luminous efficiency parameter
13: 'Q_x' - variance of process noise for X position
14: 'Q_y' - variance of process noise for Y position
15: 'Q_z' - variance of process noise for Z position
16: 'Q_v_x' - variance of process noise for X velocity
17: 'Q_v_y' - variance of process noise for Y velocity
18: 'Q_v_z' - variance of process noise for Z velocity
19: 'Q_m' - variance of process noise for mass
20: 'Q_cd' - variance of process noise for drag coefficient (unused)
21: 'Q_cl' - variance of process noise for lift coefficient (unsued)
22: 'Q_k' - variance of process noise for kappa
23: 'Q_s' - variance of process noise for sigma
24: 'Q_tau' - variance of process noise for luminous efficiency
25: 'brightness' - luminous intensiy
26: 'rho' - initial density (kg/m3)
27: 'parent_index'- index of parent particle (t-1)
28: 'orig_index' - index of original particle assigned in dim.Initialise() (t0)
29: 'weight' - combined position and luminous weighting (assigned in main)
30: 'D_DT' - magnitude of velocity vector: norm(vel_x, vel_y, vel_z)
31: 'latitude' - latitude (degrees)
32: 'longitude' - longitude (degrees)
33: 'height' - height (m)
34: 'lum_weight' - luminous weighting
35: 'pos_weight' - position weighting
additional columns include time (relative to event t0)
and datetime
Still TODO:
- Intensity calculation - there are 3 ways of doing it...
- 1D pf errors are being set within the weighting function
rather than being passed from inputs
"""
# import modules used by all dims
# general
from math import *
import copy, random
import sys, os, argparse, glob
import contextlib
# science
import numpy as np
import scipy
import scipy.integrate
# Astropy
from astropy.table import Table, Column, join, hstack
import astropy.units as u
from astropy.io import fits
from astropy.time import Time, TimeDelta
# own
import bf_functions_3d as bf
from nrlmsise_00_header import *
from nrlmsise_00 import *
import dfn_utils
import bf_functions_3d as bf
import trajectory_utilities as tu
#import multiprocessing
from mpi4py import MPI
def ParticleFilterParams(fix_params=False):
""" returns particle filter function parameters.
Q_c: process noise variances as a row vector
Q_c_frag: process noise variances as a row vector if there is a fragmentation event
(higher for mass and vels)
P: initial state varances (position and velocity only)
range_params: other dynamic equation parameter ranges for initiation of
particles
"""
## Particle filter parameters
# Q_c will be the time continuous covariance matrix.
#This should be the errors in the model.
# in the form [x_cov, y_cov, z_cov,
# vel_x_cov, vel_y_co, vel_z_cov,
# mass_cov,
# sigma_cov, shape_cov, brightness_cov, tau_cov]
Q_c = [10., 2., 2.,
150., 50., 50.,
5., 0, 0,
1e-3, 1e-10, 0., 0.0001]
print('Qc values used:', Q_c)
Q_c = np.asarray([i**2 for i in Q_c])
# Q_c_frag is used at reinitialisation if the fragmentation option is used
Q_c_frag = [0., 0., 0.,
0.02, 0.02, 0.02,
0.5, 0, 0,
2e-3, 5e-9, 0., 0.]
Q_c_frag = [i**2 for i in Q_c_frag]
## P: starting uncertainty to initialise gaussian spread of particals.
## P2: starting uncertainty at reinitialisation if the fragmentation option is used
## in the form [x_cov, y_cov, z_cov, % of vel_x_cov, % of vel_y_co, % of vel_z_cov]
P = [50., 50., 50., 250., 250., 250.]
P2 = [50., 50., 50., 250., 250., 250.]
## Initialise state ranges
## shape parameter close to a rounded brick (1.8) (A for a sphere =1.21)
A_min = 1.21
A_max = 3.0
## luminosity coefficient
tau_min = 0.0001
tau_max = 0.1
## lists of typical meteorite densities for different types. [chond, achond, stony-iron, iron, cometary]
pm_mean = [3000, 3100, 4500, 7500, 850]
pm_std = [420, 133, 133, 167, 117 ]
## to choose density values according to a distribution of meteorite percentages:
particle_choices = []
# this is created using lines 257-266; uncomment if needs changing.
random_meteor_type = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 3, 3, 3, 3, 3, 4, 4]
#random_meteor_type = []
#for i in range(80): # 80 % Chondrites
# random_meteor_type.append(0)
#for i in range(11): # 11 % Achondrites
# random_meteor_type.append(1)
#for i in range(2):
# random_meteor_type.append(2) # 2 % Stony-Iron
#for i in range(5):
# random_meteor_type.append(3) # 5 % iron
#for i in range(2):
# random_meteor_type.append(4) # 2 % cometary
## ablation coefficeint
#sigma_min = 0.001*1e-6
#sigma_max = 0.5*1e-6
#range_params = [m0_max, A_mean, A_std, pm_mean, pm_std, random_meteor_type, cd_mean, cd_std, sigma_min, sigma_max, K_min, K_max, tau_min, tau_max]
range_params = [A_min, A_max, pm_mean, pm_std, random_meteor_type, tau_min, tau_max]
if fix_params:
Q_c[-4:] = [0., 0., 0., 0.]
Q_c_frag[-4:] = [0., 0., 0., 0.]
return Q_c, Q_c_frag, P, range_params
if __name__ == '__main__':
# Define MPI message tags
READY, START, DONE, EXIT = 0, 1, 2, 3
# Initializations and preliminaries
comm = MPI.COMM_WORLD # get MPI communicator object
size = comm.size # total number of processes
rank = comm.Get_rank() # rank of this process
status = MPI.Status() # get MPI status object
if rank ==0:
parser = argparse.ArgumentParser(description='Run particle filter on raw camera files.')
#inputgroup = parser.add_mutually_exclusive_group(required=True)
parser.add_argument("-d","--inputdirectory",type=str,
help="input directory of folder containing triangulated files with extension .ecsv", required=True)
parser.add_argument("-p","--numparts",type=int,
help="number of particles to run. Must be an integer.", required=True)
parser.add_argument("-i","--run_option",type=int,
help="would you like to run \n(1) 1D analysis on a single, pre-triangulated trajectory file, \n(2) 3D analysis on multiple pre-triangulated files, \n(3) 3D analysis on calibrated raw observations in ECI, \n(4) 3D analysis on pointwise data",required=True)
parser.add_argument("-fix","--fix_para",action="store_true",
help="fix shape and density to broick and chondrite?",default=False)
parser.add_argument("-c","--comment",type=str,
help="add a version name to appear in saved file titles. If unspecified, _testing_ is used.",default='testing')
parser.add_argument("-f","--fragment",action="store_true",
help="If this is a fragment run",default=False)
parser.add_argument("-t","--fragment_start",type=float, nargs='+',
help="If this is a fragment run, give starting time (seconds in UTC, not event relative)",default=[])
parser.add_argument("-k","--save_kml",action="store_true",
help="save observation kmls?",default=False)
parser.add_argument("-s","--pse",type=str,
help="use start or end points? Type S or E or B for both. Default uses both",default='B')
parser.add_argument("-m","--mass_option",type=int,
help="initial masses to be calculated using ballistic coefficient -- 1; using random logarithmic distribution --2; random uniform distribution --3. Default is 3",default=3)
parser.add_argument("-M0","--m0_max",type=float,
help="if using -m 2 or 3, specify a maximum initial mass. Default is 2000",default=2000.)
#parser.add_argument("-a","--alpha",type=int,
# help="specify a camera number to use alpha values from. Default will be the smallest number. Numbers only, eg. DFNSMALL_25, -a 25",default=0)
parser.add_argument("-o","--old",type=str,
help="previous .fits file",default='')
parser.add_argument("-l","--luminosity_weighting",type=float,
help="weighting for luminosity",default=0.)
parser.add_argument("-r","--time_reverse",action="store_true",
help="would you like to run in reverse?",default=False)
#parser.add_argument("-a","--alpha",type=int,
args = parser.parse_args()
dim = int(args.run_option)
#alpha_cam = int(args.alpha)
mass_opt = int(args.mass_option)
m0_max = float(args.m0_max)
prev_file = args.old
reverse = args.time_reverse
fix_params = args.fix_para
import trajectory_utilities as tu
## TIMER
## if you would like to time processing clocktime of this code,
## uncomment all lines with '##TIMER'
# import timeit
# t_start = timeit.default_timer()
if dim==1:
import geo_1d as df
elif dim==2 or dim==4:
import geo_3d as df
# elif dim == 4:
# import geo_3d_eci as df
elif dim==3:
import full_3d_ECI as df
# number of particles
num_parts = args.numparts
# how is initial mass going to be initiated?
mass_opt = int(args.mass_option)
# what is max initial mass if using mass opt 2 or 3
m0_max = float(args.m0_max)
# are we reloading an old run that terminated early?
prev_file = args.old
# not sure fragment runs work yet.
if args.fragment and not args.fragment_start:
print("if you're running fragments, you need a fragmentation time also. see -help")
sys.exit()
# fragment run?
fragmentation = args.fragment
# times of defined fragmentation times
t_frag = args.fragment_start
# save observation rays as kml?
kmls = args.save_kml
if dim==4:
kmls = False
# use start/end points or both?
if args.pse == 'S' or args.pse == 's':
pse = 'S'
elif args.pse == 'E' or args.pse == 'e':
pse = 'E'
elif args.pse == 'B' or args.pse == 'b':
pse = 'both'
else:
print('-s input invalid, running with ends')
pse = 'E'
# weighting of luminosity observations
lum_weighting_coef = args.luminosity_weighting
# output name defined by inputs
lum_str = '%.2f' % lum_weighting_coef
lum_str = lum_str.replace('.', 'p')
version = 'testing_' + str(dim) +'d_'+ pse + '_' + lum_str if args.comment == '' else '_' + args.comment + '_' + str(dim) + 'd_' + pse + '_' + lum_str
## check if the input directory given exists and pull all ecsv files, and extract data
if (os.path.isdir(args.inputdirectory)):
working_dir = args.inputdirectory
# list all altaz files in this directory
all_ecsv_files = glob.glob(os.path.join(working_dir,"*.ecsv"))
ecsv = [f for f in all_ecsv_files if '_CUT' not in f]
#ecsv = [f for f in all_ecsv_files if 'CUT_TOP' in f]
#ecsv = [f for f in all_ecsv_files if 'CUT_BO' in f]
filenames = []
# check if ecsv files have times and use only those that do
if dim == 4:
for f in ecsv:
if "LeastSquare" in f:
filenames = str(f)
else:
print(f, 'is not a LeastSquare triangulated file')
else:
for f in ecsv:
if "notime" not in f and "LeastSquare" not in f:
filenames.append(str(f))
else:
print(f, 'does not contain timing data and will not be used')
n_obs = len(filenames)
## get data depending on particle filter flavour
# 1D filter:
if dim == 1:
[data, date_info, eci_bool] = bf.DFS_Fireball_Data(filenames, pse, reverse)
x0 = data['dist_from_start'][0]
v0 = data['D_DT'][1] # currently first data point has a nan velocity, so use second as close approximation.
out_name = data['datetime'][0].split('T')[0].replace('-','') + '_1D'
else:
if dim==2 or dim==4: # 3D cartesian
## t0 is start of filter, T0 is start of fireball
if dim==2:
data, t0, T0, eci_bool = bf.Geo_Fireball_Data(filenames, pse, reverse)
out_name = data['datetime'][0].split('T')[0].replace('-','') + '_3Dtriangulated'
# if n_obs>1:
# [x0, v0, x0_err, v0_err, date_info] = bf.RoughTriangulation(data, t0, reverse, eci_bool)
# if reverse: date_info = np.append(date_info, Time(data['datetime'][0], format='isot', scale='utc'))
# else: date_info = np.append(date_info, T0)
# else:
yday = T0.yday.split(':')
y = float(yday[0])
d = float(yday[1])
s = float(yday[2]) * 60 * 60 + float(yday[3]) * 60 + float(yday[4])
t_stack = np.vstack((y, d, s))
data.sort('time')
if 'X_eci' in data.colnames:
print('Running in ECI')
[x0, v0, date_info] = [[data['X_eci'][0], data['Y_eci'][0], data['Z_eci'][0]],
[(data['X_eci'][2] - data['X_eci'][0])/(data['time'][2] - data['time'][0]),
(data['Y_eci'][2] - data['Y_eci'][0])/(data['time'][2] - data['time'][0]),
(data['Z_eci'][2] - data['Z_eci'][0])/(data['time'][2] - data['time'][0])],
t_stack]
else:
print('Running in ECEF')
[x0, v0, date_info] = [[data['X_geo'][0], data['Y_geo'][0], data['Z_geo'][0]],
[(data['X_geo'][2] - data['X_geo'][0])/(data['time'][2] - data['time'][0]),
(data['Y_geo'][2] - data['Y_geo'][0])/(data['time'][2] - data['time'][0]),
(data['Z_geo'][2] - data['Z_geo'][0])/(data['time'][2] - data['time'][0])],
t_stack]
if reverse: date_info = np.append(date_info, Time(data['datetime'][0], format='isot', scale='utc'))
else: date_info = np.append(date_info, T0)
if dim==4:
data, t0, T0, eci_bool = bf.Geo_Fireball_Data_newtriang(filenames, pse, reverse)
yday = T0.yday.split(':')
y = float(yday[0])
d = float(yday[1])
s = float(yday[2]) * 60 * 60 + float(yday[3]) * 60 + float(yday[4])
t_stack = np.vstack((y, d, s))
# data.sort('time')
# if reverse:
# data.reverse()
out_name = data['datetime'][0].split('T')[0].replace('-','') + '_3Dtriangulated'
[x0, v0, date_info] = [[data['X_eci'][0], data['Y_eci'][0], data['Z_eci'][0]],
[data['DX_DT_eci'][0], data['DY_DT_eci'][0], data['DZ_DT_eci'][0]],
t_stack]
if reverse: date_info = np.append(date_info, Time(data['datetime'][0], format='isot', scale='utc'))
else: date_info = np.append(date_info, T0)
elif dim==3 : # 3D rays
if n_obs>1:
## t0 is start of filter, T0 is start of fireball
data, t0, T0, eci_bool = bf.Geo_Fireball_Data(filenames, pse, reverse)
out_name = data['datetime'][0].split('T')[0].replace('-','') + '_3Draw'
data.sort('time')
if reverse:
data.reverse()
[x0, v0, x0_err, v0_err, date_info] = bf.RoughTriangulation(data, t0, reverse, eci_bool)
data.sort('time')
if reverse:
data.reverse()
if reverse: date_info = np.append(date_info, Time(data['datetime'][0], format='isot', scale='utc'))
else: date_info = np.append(date_info, T0)
else:
print('you need at least two camera files for your choice of -i option.')
exit(1)
else:
print('invalid run option key -i')
exit(1)
## define list of unique timesteps and their locations in data table
data.sort('time')
if reverse:
data.reverse()
data_times = data['time']
data_t = np.array([[float(data_times[0])], [str(data['datetime'][0])], [int(0)]])
for i in range(1, len(data_times)):
if data_times[i] != float(data_t[0, -1]):
data_t = np.hstack((data_t, [[float(data_times[i])], [str(data['datetime'][i])], [int(i)]]))
# print(data[0], data[-1], x0, np.linalg.norm(v0))
else:
print('not a valid input directory')
exit(1)
## distinguish between fragmentation filter runs and simple run, create output directory.
if fragmentation:
print('this will be a fragmentation run')
# create output directory
out_name = out_name + "_frag_"
if not os.path.exists(os.path.join(working_dir ,"outputs", out_name)):
os.makedirs(os.path.join(working_dir ,"outputs", out_name))
## save times given for fragmentation events
# and adds an extra index at the end which is beyond last iteration
# so line 588 if statement will work after frag event has happened.
for i in range(len(t_frag)+1):
if i<len(t_frag):
t_frag[i] = (np.abs(np.array(float(data_t[0, :]))-t_frag[i])).argmin()
else:
t_frag = np.hstack([t_frag, len(data_t[0, :])+1])
print(t_frag)
else:
## create output directory
if not os.path.exists(os.path.join(working_dir,"outputs",out_name)):
os.makedirs(os.path.join(working_dir,"outputs",out_name))
## no fragmentation times are given
t_frag = [len(data_t[0])+1]
## save raw data for visualisation after
if eci_bool:
eci_name = 'eci'
else:
eci_name = 'ecef'
name= os.path.join(working_dir ,"outputs", out_name, out_name +version +'_' + eci_name + '_mass'+ str(mass_opt) + '_inputdata.csv')
data.write(name, format='ascii.csv', delimiter=',')
## save kmls of observation vectors from all cameras
if kmls:
bf.RawData2KML(filenames, pse)
bf.RawData2ECEF(filenames, pse)
## defining number of particles to run
n = int(num_parts/(comm.size)) +1 # number of particles for each worker to use
N = n * comm.size # total number of particles (n*24)
## info to share with ranks:
T = len(data_t[0]) # number of timesteps
p_all = np.empty([N, 42])
## empy space for effectiveness array
n_eff_all = np.zeros(T) # empy space for effectiveness array
f = 0 # index of fragmentation event (incremented in frag section at end of function)
l_weight = False # if low weright, use fragmentation scattering
[Q_c, Q_c_frag, P, range_params] = ParticleFilterParams()
## grouping everything to send
alpha = data['alpha'][0]
init_info = [version, T, n, N, data_t, data, x0, v0, out_name, f, t_frag, dim, l_weight, alpha, mass_opt, m0_max, reverse, date_info, eci_bool, eci_name, fix_params]
## send it all ranks
for i in range(1, size):
comm.send(init_info, dest = i)
## TIMER
#t_1 = timeit.default_timer()-t_start
#print('time to initialise code', t_1)
else:
[version, T, n, N, data_t, data, x0, v0, out_name, f, t_frag, dim, l_weight, alpha, mass_opt, m0_max, reverse, date_info, eci_bool, eci_name, fix_params] = comm.recv(source=0)
if dim == 1:
import geo_1d as df
elif dim == 2 or dim == 4:
import geo_3d as df
elif dim == 3:
import full_3d_ECI as df
[Q_c, Q_c_frag, P, range_params] = ParticleFilterParams()
p_all = None
#########################################################
## all ranks get an empty working array
p_working = np.empty([n, 42])#, dtype='O')
comm.Barrier()
#---------------------------------------------------------
# SIS Particle filter
#---------------------------------------------------------
################# Step 1 - Initialisaton #################
## master scatters initial empty array to all ranks to be filled for t0
comm.Scatterv(p_all, p_working, root=0)
for i in range(n):
p_working[i, :] = df.Initialise(x0, v0, rank*n+i, rank*n+i, N, P, range_params, alpha, date_info, mass_opt, m0_max, data['gamma'][0], eci_bool, fix_params)
comm.Gatherv( p_working, p_all, root=0)
comm.Barrier()
############## Master saves initial timestep ###########
if rank ==0:
## TIMER
#t_2 = timeit.default_timer()-t_start - t_1
#print('time to initialise particles', t_2)
if prev_file != '':
# if -o option used, this overwrites table just initialised with previous file given
# p_all = np.empty([N, 42])#, dtype='O')
name = prev_file
results_list = fits.open(prev_file)
name_end = prev_file.replace(".fits", "_end.fits")
results_prev_open = fits.open(name_end)
results_prev = results_prev_open[1].data #results_prev = results_list[-1].data
temp_time = [float(x) for x in data_t[0]]
t0 = (np.abs(temp_time-results_prev['time'][0])).argmin() +1
results_prev = Table(results_prev)
results_prev.remove_columns(['datetime', 'time'])
p_all = np.vstack([ results_prev['X_geo'].data,
results_prev['Y_geo'].data,
results_prev['Z_geo'].data,
results_prev['X_geo_DT'].data,
results_prev['Y_geo_DT'].data,
results_prev['Z_geo_DT'].data,
results_prev['mass'].data,
results_prev['cd'].data,
results_prev['A'].data,
results_prev['kappa'].data,
results_prev['sigma'].data,
results_prev['mag_v'].data,
results_prev['tau'].data,
results_prev['Q_x'].data,
results_prev['Q_y'].data,
results_prev['Q_z'].data,
results_prev['Q_v_x'].data,
results_prev['Q_v_y'].data,
results_prev['Q_v_z'].data,
results_prev['Q_m'].data,
results_prev['Q_cd'].data,
results_prev['Q_cl'].data,
results_prev['Q_k'].data,
results_prev['Q_s'].data,
results_prev['Q_tau'].data,
results_prev['brightness'].data,
results_prev['rho'].data,
results_prev['parent_index'].data,
results_prev['orig_index'].data,
results_prev['weight'].data,
results_prev['D_DT'].data,
np.deg2rad(results_prev['latitude']).data,
np.deg2rad(results_prev['longitude']).data,
results_prev['height'].data,
results_prev['lum_weight'].data,
results_prev['pos_weight'].data,
results_prev['X_eci'].data,
results_prev['Y_eci'].data,
results_prev['Z_eci'].data,
results_prev['X_eci_DT'].data,
results_prev['Y_eci_DT'].data,
results_prev['Z_eci_DT'].data])
p_all = copy.deepcopy(np.asarray(p_all).T)
# if number of particles no longer matches the number of cores to run, add blank lines
if n != len(p_all[0])/comm.size:
p_all = np.vstack([p_all, np.zeros([abs(n * comm.size - len(p_all)), len(p_all[0])])])
## initialise output table
results_end = fits.PrimaryHDU()
results_end.writeto(name_end, clobber=True)
else:
initialise = np.hstack((p_all,
np.ones((N,1))* float(data_t[0,0]),
np.array([data_t[1,0] for i in range(N)]).reshape(-1, 1)))
## initialise output table
results = fits.PrimaryHDU()
name= os.path.join(working_dir ,"outputs", out_name, out_name + version + '_' + eci_name + '_mass'+ str(mass_opt) + "_outputs.fits")
results.writeto(name, clobber=True)
## create first HDU table and save
results = fits.BinTableHDU.from_columns([fits.Column(name='time', format='D', array=initialise[:, 42]),
fits.Column(name='datetime', format='25A', array=initialise[:, 43]),
fits.Column(name='X_geo', format='D', array=initialise[:, 0]),
fits.Column(name='Y_geo', format='D', array=initialise[:, 1]),
fits.Column(name='Z_geo', format='D', array=initialise[:, 2]),
fits.Column(name='X_geo_DT', format='D', array=initialise[:, 3]),
fits.Column(name='Y_geo_DT', format='D', array=initialise[:, 4]),
fits.Column(name='Z_geo_DT', format='D', array=initialise[:, 5]),
fits.Column(name='X_eci', format='D', array=initialise[:, 36]),
fits.Column(name='Y_eci', format='D', array=initialise[:, 37]),
fits.Column(name='Z_eci', format='D', array=initialise[:, 38]),
fits.Column(name='X_eci_DT', format='D', array=initialise[:, 39]),
fits.Column(name='Y_eci_DT', format='D', array=initialise[:, 40]),
fits.Column(name='Z_eci_DT', format='D', array=initialise[:, 41]),
fits.Column(name='mass', format='D', array=initialise[:, 6]),
fits.Column(name='cd', format='D', array=initialise[:, 7]),
fits.Column(name='A', format='D', array=initialise[:, 8]),
fits.Column(name='kappa', format='D', array=initialise[:, 9]),
fits.Column(name='sigma', format='D', array=initialise[:, 10]),
fits.Column(name='mag_v', format='D', array=initialise[:, 11]),
fits.Column(name='tau', format='D', array=initialise[:, 12]),
fits.Column(name='Q_x', format='D', array=initialise[:, 13]),
fits.Column(name='Q_y', format='D', array=initialise[:, 14]),
fits.Column(name='Q_z', format='D', array=initialise[:, 15]),
fits.Column(name='Q_v_x', format='D', array=initialise[:, 16]),
fits.Column(name='Q_v_y', format='D', array=initialise[:, 17]),
fits.Column(name='Q_v_z', format='D', array=initialise[:, 18]),
fits.Column(name='Q_m', format='D', array=initialise[:, 19]),
fits.Column(name='Q_cd', format='D', array=initialise[:, 20]),
fits.Column(name='Q_cl', format='D', array=initialise[:, 21]),
fits.Column(name='Q_k', format='D', array=initialise[:, 22]),
fits.Column(name='Q_s', format='D', array=initialise[:, 23]),
fits.Column(name='Q_tau', format='D', array=initialise[:, 24]),
fits.Column(name='brightness', format='D', array=initialise[:, 25]),
fits.Column(name='rho', format='D', array=initialise[:, 26]),
fits.Column(name='parent_index', format='D', array=initialise[:, 27]),
fits.Column(name='orig_index', format='D', array=initialise[:, 28]),
fits.Column(name='weight', format='D', array=initialise[:, 29]),
fits.Column(name='D_DT', format='D', array=initialise[:, 30]),
fits.Column(name='latitude', format='D', array=[np.rad2deg(float(x)) for x in initialise[:, 31]]),
fits.Column(name='longitude', format='D', array=[np.rad2deg(float(x)) for x in initialise[:, 32]]),
fits.Column(name='height', format='D', array=initialise[:, 33]),
fits.Column(name='lum_weight', format='D', array=initialise[:, 34]),
fits.Column(name='pos_weight', format='D', array=initialise[:, 35])])
results_list = fits.open(name, mode= 'append')
results_list.append(results)
results_list[-1].name='time_0'
results_list.writeto(name, clobber=True)
print('data saved to ', name)
# as no previous file was given, the first prediction step will be index = 1 in the time steps.
t0 = 1
## TIMER
#t_3 = timeit.default_timer()-t_start - t_2
#print('time to initialise table', t_3)
for i in range(1, size):
comm.send(t0, dest = i)
else:
t0 = comm.recv(source=0)
comm.Barrier()
################# ALL ####################################
## performing iterative filter
for t in range(t0, T):
## everyone gets time
tk = float(data_t[0, t])
tkm1 = float(data_t[0, t-1])
t_end = False
## does time = time of user defined fragmetation event
if t == t_frag[f]:
frag = True
f +=1
## if low weighting, use fragmentation covariance to scatter particles more.
elif l_weight:
frag = True
else:
frag = False
## if this is the final timestep, prediction step allows mass to go to 0.
if t == T-1:
t_end = True
############ Master gets observation data ################
if rank ==0:
print('iteration is: ', t, 'of', T-1, 'at time:', tk)
# find the indices of the data in the data table that correspond to the current time
obs_index_st = int(data_t[2, t])
obs_index_ed = int(data_t[2, int(t+1)]) if len(data_t[0])> t+1 else int(len(data))
# determine if there are luminosity values available
lum_info = []
for i in range(obs_index_st, obs_index_ed):
#print(data['magnitude'][i])
if data['magnitude'][i] <50: # if 'luminosity' in data.colnames:
lum_info.append([data['magnitude'][i]])
lum_info.append([data['mag_error'][i]])
if dim == 1: # 1D filter
obs_info = np.zeros((obs_index_ed - obs_index_st, 2))
for i in range(0, obs_index_ed-obs_index_st):
obs_info[i,:] = [data['dist_from_start'][i+obs_index_st], data['pos_err'][i+obs_index_st]]
fireball_info= [data['Lat_rad'][obs_index_st], data['Lon_rad'][obs_index_st], data['height'][obs_index_st], date_info[0], date_info[1], date_info[2]+tk, data['g_sin_gamma'][obs_index_st]]
elif dim == 2 or dim == 4: # 3D cardesian
obs_info = np.zeros((obs_index_ed - obs_index_st, 6))
if eci_bool:
for i in range(0, obs_index_ed-obs_index_st):
obs_info[i,:] = [data['X_eci'][i+obs_index_st], data['Y_eci'][i+obs_index_st], data['Z_eci'][i+obs_index_st], data['R_X_eci'][i+obs_index_st], data['R_Y_eci'][i+obs_index_st], data['R_Z_eci'][i+obs_index_st]]
else:
for i in range(0, obs_index_ed-obs_index_st):
obs_info[i,:] = [data['X_geo'][i+obs_index_st], data['Y_geo'][i+obs_index_st], data['Z_geo'][i+obs_index_st], data['R_X_geo'][i+obs_index_st], data['R_Y_geo'][i+obs_index_st], data['R_Z_geo'][i+obs_index_st]]
fireball_info= [0, 0, 0, date_info[0], date_info[1], date_info[2]+tk, date_info[3], tk]
elif dim == 3: # 3D rays
obs_info = np.zeros((obs_index_ed - obs_index_st, 7))
for i in range(0, obs_index_ed-obs_index_st):
##use table errors
obs_info[i,:] = [data['azimuth'][i+obs_index_st], data['altitude'][i+obs_index_st], data['obs_lat'][i+obs_index_st], data['obs_lon'][i+obs_index_st], data['obs_hei'][i+obs_index_st], data['R_azi'][i+obs_index_st], data['R_alt'][i+obs_index_st]]
## use double table errors
# obs_info[i,:] = [data['azimuth'][i+obs_index_st], data['altitude'][i+obs_index_st], data['obs_lat'][i+obs_index_st], data['obs_lon'][i+obs_index_st], data['obs_hei'][i+obs_index_st], data['R_azi'][i+obs_index_st]*2, data['R_alt'][i+obs_index_st]*2]
## use 0.1 degrees
# obs_info[i,:] = [data['azimuth'][i+obs_index_st], data['altitude'][i+obs_index_st], data['obs_lat'][i+obs_index_st], data['obs_lon'][i+obs_index_st], data['obs_hei'][i+obs_index_st], data['R_UV'][i+obs_index_st], data['R_UV'][i+obs_index_st]]
fireball_info= [0, 0, 0, date_info[0], date_info[1], date_info[2]+tk, date_info[3], tk]
for i in range(1, size):
comm.send([obs_info, lum_info, frag, fireball_info], dest = i)
## TIMER
#t_pfstart = timeit.default_timer()
else:
[obs_info, lum_info, frag, fireball_info] = comm.recv(source=0)
############# Step 2 - Predict and update ##################
## master sends particles to ranks to perform 'forward step' which includes
## non-linear integration of state, model covariance and then calculates particle
## likelihood. These are sent back to master.
comm.Barrier()
comm.Scatterv(p_all, p_working, root=0)
## each rank loops though their array of objects performing 'forward step' which includes
# non-linear integration of state, model covariance and then calculates particle likelihood
if frag:
for i in range(n):
p_working[i, :] = df.particle_propagation(p_working[i], 2/3., tkm1, tk, fireball_info, obs_info, lum_info, rank*n+i, N, frag, t_end, Q_c_frag, m0_max, reverse, eci_bool, fix_params)
else:
for i in range(n):
p_working[i, :] = df.particle_propagation(p_working[i], 2/3., tkm1, tk, fireball_info, obs_info, lum_info, rank*n+i, N, frag, t_end, Q_c, m0_max, reverse, eci_bool, fix_params)
comm.Gatherv( p_working, p_all, root=0)
########## Master calculates weights and resamples ########
if rank ==0:
print('master collected all ')
## TIMER
#t_4 = timeit.default_timer()-t_pfstart
#print('time to integrate', t_4)
## if you want to turn resampling on/off... do it here
if t_end:
resamp = False
else:
resamp = True
#####################
# resampling for log weights calculated in particle_propagation:
if resamp:
w = np.empty([8, N])
## 'w' - is an array for the weight calculations.
## Row indices are: [pos_weight,
## lum_weight
## normalised pos_weight,
## normalised lum_weight,
## combined normalised weight,
## exp of combined normalised weight,
## cumulative weight,
## col in p_all array (depreciated)]
for i in range(N):
## first set any NAN weightings to approx = 0
if np.isnan(p_all[i, 35]):
p_all[i, :] = p_all[i, :] * 0.
p_all[i, 35] = -5000.
elif np.isnan(p_all[i, 34]):
p_all[i, :] = p_all[i, :] * 0.
p_all[i, 34] = -5000.
## fill in 'w' with position and luminous weightings and particle index
w[:, i] = np.array([p_all[i, 35], p_all[i, 34],0., 0., 0., 0., 0., i]).T
mx_p = max(w[0,:])
mx_l = max(w[1,:])
weights_sum_p = np.log(np.sum([ np.exp(i - mx_p) for i in w[0, :]])) + mx_p
weights_sum_l = np.log(np.sum([ np.exp(i - mx_l) for i in w[1, :]])) + mx_l
#weights_sqr_sum = sum(w[0, :]**2)
print(mx_p, mx_l, weights_sum_p, weights_sum_l)
# l_weight = False
w[2, :] = w[0, :] - weights_sum_p # fill in normalised sum in log space
w[3, :] = w[1, :] - weights_sum_l # fill in normalised sum in log space
##TODO: this is where luminosity relative weighting should be adjusted!
#w[4, :] = w[2, :] + (w[3, :] * 0.)
#w[4, :] = w[2, :] + (w[3, :] * 1.)
#w[4, :] = w[2, :] + (w[3, :] * 1/5.)
# w[4, :] = w[2, :] + (w[3, :] * 1/10.)
#w[4, :] = w[2, :] + (w[3, :] * 1/50. )
#w[4, :] = w[2, :] + (w[3, :] * 1/100.)
# w[4, :] = w[2, :] + (w[3, :] * 1/1000. )
# w[4, :] = [np.log(np.exp(w[2, i]) * np.exp(w[3, i]) * lum_weighting_coef) for i in range(N)]
# print(w[4, 0], np.exp(w[2, 0]), np.exp(w[3, 0]) , lum_weighting_coef)
# print(w[2, 0] , w[3, 0])
# print(w)
w[4, :] = w[2, :]
mx = max(w[4,:])
weights_sum = np.log(np.sum([ np.exp(i - mx) for i in w[4, :]])) + mx
# print(mx, weights_sum)
w[4, :] = w[4, :] - weights_sum
p_all[:, 35] = w[2, :]
p_all[:, 34] = w[3, :]
p_all[:, 29] = w[4, :]
w[5, :] = [exp(i) for i in w[4,:]] # take exp of normalised sum
w[6, :] = np.cumsum(w[5, :]) # fill in cumulative sum
## calculate particle effectiveness for degeneracy
n_eff = 1/np.sum(w[5, :]**2)
#n_eff_all[t] = n_eff
print('sum of weights: ', weights_sum)
print('effectiveness: ', n_eff/ N * 100, '%')
## resampling
# print(w)
draw = np.random.uniform(0, 1 , N)
index = np.searchsorted(w[6, :], draw, side='right')
# print(w[7], index,N)
p2 = np.asarray([p_all[int(w[7, index[j]])] for j in range(N)]) # saved in a new array so that nothing is overwritten.
# print(p2)
# print(w)
#p2[:, 29] = np.asarray([w[4, w[7, index[j]]] for j in range(N)]) 3 should do the same as line "p_all[:, 29] = w[4, :]"
mx = max(p2[:, 29])
weights_sum = np.log(np.sum([ np.exp(i - mx) for i in p2[:, 29]])) + mx
p2[:, 29] = [exp(i-weights_sum) for i in p2[:, 29]]
#
p_all = np.asarray(copy.deepcopy(p2))
avg_vel = 0.
avg_mass = 0.
avg_kappa = 0.
avg_sigma = 0.
avg_mag = 0.
for i in range(N):
## if printing averages to terminal,, uncomment next 6 lines:
avg_vel += np.linalg.norm([p_all[i, 3], p_all[i, 4], p_all[i, 5]]) * p_all[i, 29]
avg_mass += p_all[i, 6] * p_all[i, 29]
avg_kappa += p_all[i, 9] * p_all[i, 29]
avg_sigma += p_all[i, 10] * p_all[i, 29]
avg_mag += p_all[i, 11] * p_all[i, 29]
print('mean velocity: ', avg_vel )
print('mean mass: ', avg_mass)
print('mean kappa: ', avg_kappa)
print('mean sigma: ', avg_sigma * 1e6)
print('mean M_v: ', avg_mag)
print('observed M_vs: ', lum_info)
## TIMER
#t_5 = timeit.default_timer()-t_pfstart-t_4
#print('time to resample', t_5)
# save resulting table in HDU fits file
p_out = np.hstack((p_all,
np.ones((N,1))*tk,
np.array([data_t[1,t] for i in range(N)]).reshape(-1, 1)))
results = fits.BinTableHDU.from_columns([fits.Column(name='time', format='D', array=p_out[:, 42]),
fits.Column(name='datetime', format='25A', array=p_out[:, 43]),
fits.Column(name='X_geo', format='D', array=p_out[:, 0]),
fits.Column(name='Y_geo', format='D', array=p_out[:, 1]),
fits.Column(name='Z_geo', format='D', array=p_out[:, 2]),
fits.Column(name='X_geo_DT', format='D', array=p_out[:, 3]),
fits.Column(name='Y_geo_DT', format='D', array=p_out[:, 4]),
fits.Column(name='Z_geo_DT', format='D', array=p_out[:, 5]),
fits.Column(name='X_eci', format='D', array=p_out[:, 36]),
fits.Column(name='Y_eci', format='D', array=p_out[:, 37]),
fits.Column(name='Z_eci', format='D', array=p_out[:, 38]),
fits.Column(name='X_eci_DT', format='D', array=p_out[:, 39]),
fits.Column(name='Y_eci_DT', format='D', array=p_out[:, 40]),
fits.Column(name='Z_eci_DT', format='D', array=p_out[:, 41]),
fits.Column(name='mass', format='D', array=p_out[:, 6]),
fits.Column(name='cd', format='D', array=p_out[:, 7]),
fits.Column(name='A', format='D', array=p_out[:, 8]),
fits.Column(name='kappa', format='D', array=p_out[:, 9]),
fits.Column(name='sigma', format='D', array=p_out[:, 10]),
fits.Column(name='mag_v', format='D', array=p_out[:, 11]),
fits.Column(name='tau', format='D', array=p_out[:, 12]),
fits.Column(name='Q_x', format='D', array=p_out[:, 13]),
fits.Column(name='Q_y', format='D', array=p_out[:, 14]),
fits.Column(name='Q_z', format='D', array=p_out[:, 15]),
fits.Column(name='Q_v_x', format='D', array=p_out[:, 16]),
fits.Column(name='Q_v_y', format='D', array=p_out[:, 17]),
fits.Column(name='Q_v_z', format='D', array=p_out[:, 18]),
fits.Column(name='Q_m', format='D', array=p_out[:, 19]),
fits.Column(name='Q_cd', format='D', array=p_out[:, 20]),
fits.Column(name='Q_cl', format='D', array=p_out[:, 21]),
fits.Column(name='Q_k', format='D', array=p_out[:, 22]),
fits.Column(name='Q_s', format='D', array=p_out[:, 23]),
fits.Column(name='Q_tau', format='D', array=p_out[:, 24]),
fits.Column(name='brightness', format='D', array=p_out[:, 25]),
fits.Column(name='rho', format='D', array=p_out[:, 26]),
fits.Column(name='parent_index', format='D', array=p_out[:, 27]),
fits.Column(name='orig_index', format='D', array=p_out[:, 28]),
fits.Column(name='weight', format='D', array=p_out[:, 29]),
fits.Column(name='D_DT', format='D', array=p_out[:, 30]),
fits.Column(name='latitude', format='D', array=[np.rad2deg(float(x)) for x in p_out[:, 31]]),
fits.Column(name='longitude', format='D', array=[np.rad2deg(float(x)) for x in p_out[:, 32]]),
fits.Column(name='height', format='D', array=p_out[:, 33]),
fits.Column(name='lum_weight', format='D', array=p_out[:, 34]),
fits.Column(name='pos_weight', format='D', array=p_out[:, 35])])
results_list.append(results)
results_list[-1].name='time_'+str(t)
results_list.writeto(name, clobber=True)
print('data saved to ', name)
# save this table in its own righ as an 'end' file in case code is interrupted.
# this means only this will need to be read in rather than trying to extract
# end table only from large HDU file
name_end = name.replace(".fits", "_end.fits")
results.writeto(name_end, clobber=True)
# end this iteration
print("now I've done collective things, start again. end timestep #", t, "at time ", tk, "secs")
## TIMER
#t_6 = timeit.default_timer()-t_pfstart-t_5
#print('time to resample', t_6)
comm.Barrier() ## all ranks are held while master performs resampling.
print("we're all happy workers :-). Now saving all data to one table")
comm.Barrier()
########## Master saves table with all particles ########
## master extracts all HDU tables and appends them to one large output table
## for plotting together.
if rank==0:
tabs = fits.open(name)
nrows = int(N *T)
all_results = fits.BinTableHDU.from_columns(tabs[1].columns, nrows=nrows)
for colname in tabs[1].columns.names:
for i in range(2,T+1):
j = i-1
all_results.data[colname][N*j:N*j+N] = tabs[i].data[colname]
name= os.path.join(working_dir ,"outputs", out_name, out_name + version + '_' + eci_name + '_mass'+ str(mass_opt) + '_outputs_all.fits')
all_results.writeto(name, clobber=True)
print('data saved to ', name)
## saves a table of means for each timestep
mean_results = fits.BinTableHDU.from_columns(tabs[1].columns, nrows=T)
for colname in tabs[1].columns.names:
for i in range(2, T+1):
if colname != 'time' and colname != 'datetime':
col_data = sum(tabs[i].data[colname] * tabs[i].data['weight'])
mean_results.data[colname][i-1] = col_data
else:
col_data = tabs[i].data[colname]
mean_results.data[colname][i-1] = col_data[0]
name= os.path.join(working_dir ,"outputs", out_name, out_name + version + '_' + eci_name + '_mass'+ str(mass_opt) + '_outputs_mean.fits')
mean_results.writeto(name, clobber=True)
print('mean data saved to ', name)
#------------------------------------------------------------------------------
# hack to fix lsoda problem
#------------------------------------------------------------------------------
#
def fileno(file_or_fd):
fd = getattr(file_or_fd, 'fileno', lambda: file_or_fd)()
if not isinstance(fd, int):
raise ValueError("Expected a file (`.fileno()`) or a file descriptor")
return fd
@contextlib.contextmanager
def stdout_redirected(to=os.devnull, stdout=None):
"""
https://stackoverflow.com/a/22434262/190597 (J.F. Sebastian)
"""
if stdout is None:
stdout = sys.stdout
stdout_fd = fileno(stdout)
# copy stdout_fd before it is overwritten
#NOTE: `copied` is inheritable on Windows when duplicating a standard stream
with os.fdopen(os.dup(stdout_fd), 'wb') as copied:
stdout.flush() # flush library buffers that dup2 knows nothing about
try:
os.dup2(fileno(to), stdout_fd) # $ exec >&to
except ValueError: # filename
with open(to, 'wb') as to_file:
os.dup2(to_file.fileno(), stdout_fd) # $ exec > to
try:
yield stdout # allow code to be run with the redirected stdout
finally:
# restore stdout to its previous value
#NOTE: dup2 makes stdout_fd inheritable unconditionally
stdout.flush()
os.dup2(copied.fileno(), stdout_fd) # $ exec >&copied
########## End of particle filter code ########
| true |
755cf8dd430dbb6cd6204923ed63ecf62b1f19c7 | Python | vgucsd/cubesat | /lsdo_cubesat/communication/Earth_spin_comp.py | UTF-8 | 2,164 | 2.859375 | 3 | [] | no_license | import numpy as np
from openmdao.api import ExplicitComponent
class EarthSpinComp(ExplicitComponent):
"""
Returns earth quaternion matrix over time
"""
def initialize(self):
self.options.declare('num_times', types=int)
# self.options.declare('launch_date', types=float)
def setup(self):
num_times = self.options['num_times']
# launch_date = self.options['launch_date']
self.add_input('comm_times', shape=num_times, units='s')
self.add_output(
'q_E',
shape=(4, num_times),
units=None,
desc='Quaternion matrix in Earth-fixed frame over time')
cols = np.arange(0, num_times)
cols = np.tile(cols, 4)
rows = np.arange(0, 4 * num_times)
self.declare_partials('q_E', 'comm_times', rows=rows, cols=cols)
def compute(self, inputs, outputs):
num_times = self.options['num_times']
t = inputs['comm_times']
fact = np.pi / 3600.0 / 24.0
theta = fact * t
outputs['q_E'][0, :] = np.cos(theta)
outputs['q_E'][3, :] = -np.sin(theta)
def compute_partials(self, inputs, partials):
num_times = self.options['num_times']
t = inputs['comm_times']
# print(partials['q_E','times'].shape)
fact = np.pi / 3600.0 / 24.0
theta = fact * t
dq_dt = np.zeros((4, num_times))
dq_dt[0, :] = -np.sin(theta) * fact
dq_dt[3, :] = -np.cos(theta) * fact
partials['q_E', 'comm_times'] = dq_dt.flatten()
if __name__ == '__main__':
from openmdao.api import Problem, Group
from openmdao.api import IndepVarComp
num_times = 30
group = Group()
comp = IndepVarComp()
comp.add_output('times', val=np.arange(num_times))
group.add_subsystem('Inputcomp', comp, promotes=['*'])
group.add_subsystem('EarthSpinComp',
EarthSpinComp(num_times=num_times),
promotes=['*'])
prob = Problem()
prob.model = group
prob.setup(check=True)
prob.run_model()
prob.model.list_outputs()
prob.check_partials(compact_print=True)
| true |
5f8ca3dda08da1335a4e8976e1a00be59689bbc5 | Python | Athiramolsali/ScreeningAnswers | /Answer5.py | UTF-8 | 205 | 3.328125 | 3 | [] | no_license | #Answer5
s = 'String'
def fname(s):
with open('myfile.txt') as myfile:
if 'String' in myfile.read():
print('String is found in file')
else:
print('Not found') | true |
c41c4438e6bff2857190a054e3476ac713e41c5b | Python | thenicopixie/holbertonschool-higher_level_programming | /0x0B-python-input_output/3-write_file.py | UTF-8 | 419 | 3.9375 | 4 | [] | no_license | #!/usr/bin/python3
""" Module that writes a string to a test file and returns the number
of character written"""
def write_file(filename="", text=""):
"""Write a string to a file. Return the number of
characters written.
Args:
filename - file to write to
text - text to write in file
"""
with open(filename, mode="w", encoding="utf-8") as a_file:
return a_file.write(text)
| true |
ce3f883b034775d121c96f9283fb9879f66966cc | Python | chriztaka1997/eular-practice-problem | /problem1-10/problem7.py | UTF-8 | 734 | 4.25 | 4 | [] | no_license | ###########################
#
# By listing the first six prime numbers: 2, 3, 5, 7, 11, and 13,
# we can see that the 6th prime is 13.
#
# What is the 10 001st prime number?
#
###########################
def problem7():
number = 10001
def is_prime(num):
if num > 1:
for i in range(2, num):
if num % i == 0:
return False
return True
else:
return False
if number == 1:
return 2
if number == 2:
return 3
num_of_prime = 2
current_int = 4
while num_of_prime != number:
if is_prime(current_int):
num_of_prime += 1
current_int += 1
return current_int - 1
print(problem7())
| true |
dea8caa80e8be54d0e2f3895e18fc8bb8209e560 | Python | chasedenecke/imp3 | /q3.py | UTF-8 | 7,632 | 2.671875 | 3 | [] | no_license |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import datasets, transforms
import seaborn as sns
import math
sns.set()
import sys
import numpy as np
import matplotlib.pyplot as plt
# Function returns two subsets of a data set.
# @param split A % to split the data on.
# @param dataset A dataset.
def DataSplit(split, dataset):
dataset_size = len(dataset)
indices = list(range(dataset_size))
#subsetSplit is the location of the split.
subsetSplit = int(np.floor(split * dataset_size))
# Genereate indices for the two subsets.
subset1_indices, subset2_indices = indices[subsetSplit:], indices[:subsetSplit]
# Fill the subsets.
subset2_sampler = torch.utils.data.Subset(dataset, subset2_indices)
subset1_sampler = torch.utils.data.Subset(dataset, subset1_indices)
return subset1_sampler, subset2_sampler
if torch.cuda.is_available():
device = torch.device('cuda')
else:
device = torch.device('cpu')
print('Using PyTorch version:', torch.__version__, ' Device:', device)
batch_size = 32
# For gray scaling.
transform = transforms.Compose([
transforms.Grayscale(),
transforms.ToTensor()
])
print("Loading data set.")
train_dataset = datasets.CIFAR10('./data',
train=True,
download=True,
transform=transform)
test_dataset = datasets.CIFAR10('./data',
train=False,
download=True,
transform=transform)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size = batch_size,
shuffle=True)
print("Generating validation sample")
validation_sampler, train_sampler = DataSplit(0.8, train_dataset)
validation_loader = torch.utils.data.DataLoader(dataset=validation_sampler,
batch_size = batch_size,
shuffle=True)
print("Generating 5 testing samples.")
training_data = []
# Frequency of the split to produce 4 even sets.
split =[0.75, 0.66667, 0.5]
print(len(train_sampler))
for i in range(0, 3):
subset, train_sampler = DataSplit(split[i], train_sampler)
training_data.append(subset)
training_data.append(train_sampler)
for i, elem in enumerate(training_data):
print("traning set ", i + 1 ," size: ", len(elem))
train_loader = []
for elem in training_data:
tmp = torch.utils.data.DataLoader(dataset=elem,
batch_size = batch_size,
shuffle=True)
train_loader.append(tmp)
print("\n")
for i, loader in enumerate(train_loader):
print("Dataset: ", i + 1)
for (X_train, y_train) in loader:
print('X_train:', X_train.size(), 'type:', X_train.type())
print('y_train:', y_train.size(), 'type:', y_train.type())
print("\n")
break
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(32*32, 100)
self.fc1_drop = nn.Dropout(.25)
self.fc3 = nn.Linear(100, 10)
def forward(self, x):
x = x.view(-1, 32*32)
x = F.relu(self.fc1(x))
x = self.fc1_drop(x)
return F.log_softmax(self.fc3(x), dim=1)
model = Net().to(device)
lr = .01
optimizer = torch.optim.SGD(model.parameters(), lr, momentum=0.5, weight_decay=0)
criterion = nn.CrossEntropyLoss()
print(model)
def train(losst, acct, epoch, loader, log_interval=200):
# Set model to training mode
model.train()
train_loss = 0
# Loop over each batch from the training set
for batch_idx, (data, target) in enumerate(loader):
# Copy data to GPU if needed
data = data.to(device)
target = target.to(device)
# Zero gradient buffers
optimizer.zero_grad()
# Pass data through the network
output = model(data)
# Calculate loss
loss = criterion(output, target)
train_loss += loss.data.item()
# Backpropagate
loss.backward()
# Update weights
optimizer.step()
if batch_idx % log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(loader.dataset),
100. * batch_idx / len(loader), loss.data.item()))
train_loss /= len(loader)
losst.append(train_loss)
def validate(loss_vector, accuracy_vector, loader):
model.eval()
val_loss, correct = 0, 0
for data, target in loader:
data = data.to(device)
target = target.to(device)
output = model(data)
val_loss += criterion(output, target).data.item()
pred = output.data.max(1)[1] # get the index of the max log-probability
correct += pred.eq(target.data).cpu().sum()
val_loss /= len(loader)
loss_vector.append(val_loss)
accuracy = 100. * correct.to(torch.float32) / len(loader.dataset)
accuracy_vector.append(accuracy)
print('\nValidation set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
val_loss, correct, len(loader.dataset), accuracy))
def Generate(lossv, accv, losst, acct, epochs, d=0.2, m = 0.5, wd=0):
optimizer = torch.optim.SGD(model.parameters(), lr, momentum=m, weight_decay=wd)
model.fc1_drop = nn.Dropout(d)
test_loss = []
test_acc = []
print("Dropout is: ", d)
print("Momentum is:", m)
print("Weight Decay:", wd)
for i, dataset in enumerate(train_loader):
print("Using dataset ", i + 1)
for epoch in range(1, epochs + 1):
train(losst, acct, epoch, dataset)
validate(lossv, accv, validation_loader)
validate(test_loss, test_acc, test_loader)
return test_acc
epochs = 5
StateStack = []
dropout_list = [.75, 0.5, 0.25, 0.125]
momentum_list = [1, 0.5, 0.25, 0.1]
weight_decay_list = [0.6, .3, .2, .1]
lossv, accv = [], []
losst, acct = [], []
print("sys.argv[1] = ", sys.argv[1])
x = Generate(lossv, accv, losst, acct, epochs, float(sys.argv[1]), float(sys.argv[2]), float(sys.argv[3]))
# Temp test plot.
plt.subplot(3,1,1)
#plt.figure(figsize=(5,3))
plt.plot(np.arange(1,(epochs*4)+1), lossv, 'b-')
plt.ylabel('test accuracy')
plt.xlabel("epoch")
for d in dropout_list:
x = Generate(lossv, accv, losst, acct, epochs, d)
StateStack.append(x)
# Temp test plot.
plt.subplot(3,1,1)
#plt.figure(figsize=(5,3))
plt.plot(dropout_list, StateStack, 'b-')
plt.ylabel('test accuracy')
plt.xlabel("dropout")
StateStack.clear() #Empty the list containing accuracy
lossv.clear()
accv.clear()
losst.clear()
acct.clear()
for m in momentum_list:
x = Generate(lossv, accv, losst, acct, epochs, m=m)
StateStack.append(x)
# Temp test plot.
plt.subplot(3,1,2)
#plt.figure(figsize=(5,3))
plt.plot(dropout_list, StateStack, 'b-')
plt.ylabel('test accuracy')
plt.xlabel("momentum")
StateStack.clear() #Empty the list containing accuracy
lossv.clear()
accv.clear()
losst.clear()
acct.clear()
for wd in weight_decay_list:
x = Generate(lossv, accv, losst, acct, epochs, wd=wd)
StateStack.append(x)
# Temp test plot.
plt.subplot(3,1,3)
#plt.figure(figsize=(5,3))
plt.plot(dropout_list, StateStack, 'b-')
plt.ylabel('test accuracy')
plt.xlabel("weight decay")
plt.subplots_adjust(hspace=0.5)
try:
plt.show()
except:
print("Cannot show graph")
print("saving graph in p1")
plt.savefig('p1.png')
| true |
a4e6f5bf76bdb62d37a7dbdba7fcf936a6292ba0 | Python | PRemmen/TEASER | /teaser/logic/buildingobjects/buildingphysics/material.py | UTF-8 | 10,025 | 2.71875 | 3 | [
"MIT"
] | permissive | # created June 2015
# by TEASER4 Development Team
import re
import uuid
import teaser.data.input.material_input_json as material_input
import teaser.data.output.material_output as material_output
class Material(object):
"""Material class
This class holds information of Material used for building element layer.
Parameters
----------
parent : Layer
The parent class of this object, the layer the material
belongs to. Allows for better control of hierarchical structures. If
not None this adds this Material to Layer.material.
Default is None
Attributes
----------
name : str
Name of material
density : float [kg/m3]
Density of material
thermal_conduc : float [W/(m*K)]
Thermal conductivity of material
heat_capac : float [kJ/(kg*K)]
Specific heat capacity of material
solar_absorp : float [-]
Coefficient of absorption of solar short wave
ir_emissivity : float [-]
Coefficient of longwave emissivity of material
transmittance : float [-]
Coefficient of transmittance of material
thickness_default : float [m]
Default value for material thickness
thickness_list : list
List of usual values for material thickness, float [m]
material_id : str(uuid)
UUID of material, this is used to have similar behaviour like foreign
key in SQL data bases for use in TypeBuildingElements and Material json
"""
def __init__(self, parent=None):
"""Constructor of Material.
"""
self.parent = parent
self._name = ""
self._density = 0.0
self._thermal_conduc = 0.0
self._heat_capac = 0.0
self._solar_absorp = 0.0
if parent is not None:
if type(self.parent.parent).__name__ != "Window":
self._solar_absorp = 0.7
self._ir_emissivity = 0.9
self._transmittance = 0.0
self._thickness_default = 0.0
self._thickness_list = []
self.material_id = str(uuid.uuid1())
def load_material_template(self, mat_name, data_class=None):
"""Material loader.
Loads Material specified in the json.
Parameters
----------
mat_name : str
Code list for Material
data_class : DataClass()
DataClass containing the bindings for TypeBuildingElement and
Material (typically this is the data class stored in prj.data,
but the user can individually change that. Default is
self.parent.parent.parent.parent.data which is data in project
"""
if data_class is None:
data_class = self.parent.parent.parent.parent.data
else:
data_class = data_class
material_input.load_material(material=self,
mat_name=mat_name,
data_class=data_class)
def save_material_template(self, data_class):
"""Material saver.
Saves Material specified in the json.
Parameters
----------
data_class : DataClass()
DataClass containing the bindings for TypeBuildingElement and
Material (typically this is the data class stored in prj.data,
but the user can individually change that. Default is
self.parent.parent.parent.parent.data which is data in project
"""
if data_class is None:
data_class = self.parent.parent.parent.parent.data
else:
data_class = data_class
material_output.save_material(
material=self, data_class=data_class)
def modify_material_template(self, data_class):
"""Material modifier.
Modify Material specified in the json.
Parameters
----------
data_class : DataClass()
DataClass containing the bindings for TypeBuildingElement and
Material (typically this is the data class stored in prj.data,
but the user can individually change that. Default is
self.parent.parent.parent.parent.data which is data in project
"""
if data_class is None:
data_class = self.parent.parent.parent.parent.data
else:
data_class = data_class
material_output.modify_material(material=self, data_class=data_class)
@property
def material_id(self):
return self.__material_id
@material_id.setter
def material_id(self, value):
self.__material_id = value
@property
def parent(self):
return self.__parent
@parent.setter
def parent(self, value):
if value is not None:
ass_error_1 = "Parent has to be an instance of a layer"
assert type(value).__name__ == "Layer", ass_error_1
self.__parent = value
self.__parent.material = self
else:
self.__parent = None
@property
def name(self):
return self._name
@name.setter
def name(self, value):
if isinstance(value, str):
regex = re.compile('[^a-zA-z0-9]')
self._name = regex.sub('', value)
else:
try:
value = str(value)
regex = re.compile('[^a-zA-z0-9]')
self._name = regex.sub('', value)
except ValueError:
print("Can't convert name to string")
@property
def thermal_conduc(self):
return self._thermal_conduc
@thermal_conduc.setter
def thermal_conduc(self, value):
if isinstance(value, float):
pass
elif value is None:
pass
else:
try:
value = float(value)
except:
raise ValueError("Can't convert thermal conduction to float")
if value is not None:
self._thermal_conduc = float(value)
if self.parent is not None:
if self.parent.parent is not None:
if self.parent.thickness is not None and \
self.parent.parent.inner_convection is \
not None and \
self.parent.parent.inner_radiation is \
not None and \
self.parent.parent.area is not None:
self.parent.parent.calc_ua_value()
@property
def density(self):
return self._density
@density.setter
def density(self, value):
if isinstance(value, float):
self._density = value
elif value is None:
self._density = value
else:
try:
value = float(value)
self._density = value
except:
raise ValueError("Can't convert density to float")
@property
def heat_capac(self):
return self._heat_capac
@heat_capac.setter
def heat_capac(self, value):
if isinstance(value, float):
self._heat_capac = value
elif value is None:
self._heat_capac = value
else:
try:
value = float(value)
self._heat_capac = value
except:
raise ValueError("Can't convert heat capacity to float")
@property
def solar_absorp(self):
return self._solar_absorp
@solar_absorp.setter
def solar_absorp(self, value):
if isinstance(value, float):
self._solar_absorp = value
elif value is None:
self._solar_absorp = 0.7
else:
try:
value = float(value)
self._solar_absorp = value
except:
raise ValueError("Can't convert solar absorption to float")
@property
def ir_emissivity(self):
return self._ir_emissivity
@ir_emissivity.setter
def ir_emissivity(self, value):
if isinstance(value, float):
self._ir_emissivity = value
elif value is None:
self._ir_emissivity = 0.9
else:
try:
value = float(value)
self._ir_emissivity = value
except:
raise ValueError("Can't convert emissivity to float")
@property
def transmittance(self):
return self._transmittance
@transmittance.setter
def transmittance(self, value):
if isinstance(value, float):
self._transmittance = value
elif value is None:
self._transmittance = value
else:
try:
value = float(value)
self._transmittance = value
except:
raise ValueError("Can't convert transmittance to float")
@property
def thickness_default(self):
return self._thickness_default
@thickness_default.setter
def thickness_default(self, value):
if isinstance(value, float):
self._thickness_default = value
elif value is None:
pass
else:
try:
value = float(value)
except:
raise ValueError("Can't convert thickness to float")
@property
def thickness_list(self):
return self._thickness_list
@thickness_list.setter
def thickness_list(self, value):
if value is None:
self._thickness_list = []
# elif type(value) != list:
# raise TypeError("must be list, not ", type(value))
else:
for i in value:
if isinstance(i, float):
pass
else:
try:
value = float(value)
except:
raise ValueError(
"Can't convert entry of thickness_list to float")
self._thickness_list = value
| true |
302805be28ba41e7973346d7dc58fc8543fe776f | Python | skreynolds/uta_cse_1309x | /class_work_problems/find_gcd.py | UTF-8 | 238 | 2.890625 | 3 | [] | no_license | def find_gcd(some_list):
def divisor(number, L):
for e in L:
if e%number:
return False
return True
return [num for num in range(1,min(some_list)+1) if divisor(num, some_list)]
| true |
5c7c9493d4a808fd24740ca78c1e7b5f271d79c1 | Python | soldier9527/python | /0523/dict.py | UTF-8 | 221 | 3.25 | 3 | [] | no_license | pdict = {
'Michael':95,
'Bob':70,
'Lucy':100
}
print(pdict)
print(pdict['Bob'])
# print(pdict['haha'])
print('haha' in pdict)
print(pdict.get('haha'))
print(pdict.get('haha2',-1))
print(pdict.get('Bob',-1)) | true |
0a759cc33b5b28df9670f107ceb36d3adee45c5a | Python | ManasaPola/Coding-Exercise | /UnivalSubtrees.py | UTF-8 | 1,336 | 3.765625 | 4 | [] | no_license | class Node:
def __init__(self, data):
self.data = data
self.left = None
self.right = None
def insert(self, data):
# Compare the new value with the parent node
if self.data:
if data < self.data:
if self.left is None:
self.left = Node(data)
else:
self.left.insert(data)
elif data > self.data:
if self.right is None:
self.right = Node(data)
else:
self.right.insert(data)
else:
self.data = data
def PrintTree(self):
if self.left:
self.left.PrintTree()
print(self.data),
if self.right:
self.right.PrintTree()
def main():
root = Node(0)
root.insert(1)
root.insert(0)
root.insert(1)
root.insert(1)
root.insert(0)
root.PrintTree()
# print(dfs(root))
def dfs(root) -> int:
count = 0
if root:
dfs(root.left)
if root.left is None and root.right is None:
count += 1
elif root.val == root.left.val and root.val == root.right.val:
count += 1
dfs(root.right)
return count
if __name__ == "__main__":
main() | true |
c22d6abbbfad189a1523eae8ec663a181b69b1f0 | Python | ardeshireshghi/python-algo | /factorial.py | UTF-8 | 894 | 3.953125 | 4 | [] | no_license | import re
def pow(number, pwr):
if pwr == 1:
return number
return number * pow(number, pwr - 1)
def factorial(number):
if number <= 1:
return 1
return number * factorial(number - 1)
def max_items(items):
if len(items) == 2:
return items[0] if items[0] > items[1] else items[1]
other_items_max = max_items(items[1:])
return other_items_max if other_items_max > items[0] else items[0]
def find_primes(number):
result = []
for i in range(2, number + 1):
if all(i % d > 0 for d in range(2, i)):
result.append(i)
return result
# print(find_primes(20))
# print(pow(2, 4))
# print(factorial(100))
# print(max_items([1,2,3,6,20,10, 56, 100, 7]))
def is_palindrome(value):
striped_value = re.sub(r'\W+', '', value.lower())
return striped_value[::-1] == striped_value
print(is_palindrome('level'))
| true |
490c0ce4caff9c7d3b269dcf252a38e3a8257af2 | Python | Meaha7/dsa | /arrays/leetcode/product-of-array-except-self-238.py | UTF-8 | 1,154 | 3.390625 | 3 | [] | no_license | # T=n,S=1
def x(nums):
n = len(nums)
zeros = zeroIndex = 0
product = 1
for i, val in enumerate(nums):
if val == 0:
zeros += 1
zeroIndex = i
if zeros > 1:
return [0] * n
else:
product *= val
res = [0] * n
if zeros == 1:
res[zeroIndex] = product
return res
return [product // val for val in nums]
# T=n,S=n
def y(nums):
n = len(nums)
res, left, right = [1] * n, [1] * n, [1] * n
for i in range(1, n):
left[i] = left[i - 1] * nums[i - 1]
for i in range(n - 2, -1, -1):
right[i] = right[i + 1] * nums[i + 1]
for i in range(n):
res[i] = left[i] * right[i]
return res
# T=n,S=1
def z(nums):
n = len(nums)
res = [1] * n
for i in range(1, n):
res[i] = res[i - 1] * nums[i - 1]
right = 1
for i in range(n - 1, -1, -1):
res[i] *= right
right *= nums[i]
return res
for nums in [
[1, 2, 3, 0, 5, 6, 0],
[-1, 1, 0, -3, 3],
[4, 5, 1, 8, 2],
[1, 2, 3, 4]
]:
print(x(nums))
print(y(nums))
print(z(nums))
print()
| true |
876949b5dcb4bd3c9d7fdcc81098d55881f8ef58 | Python | DejaVuDi/ECE_276B | /HW3/Q3_SARSA.py | UTF-8 | 3,488 | 2.65625 | 3 | [] | no_license | import gym
import numpy as np
import itertools
import pandas as pd
import pickle
from collections import defaultdict,namedtuple
from matplotlib import pyplot as plt
# reference: https://learning.oreilly.com/library/view/reinforcement-learning-with/9781788835725/ffd21cf7-d907-45e6-a897-8762c9a20f2d.xhtml
# reference: https://github.com/dennybritz/reinforcement-learning
class Planner:
def __init__(self):
self.lr = 1
self.discount_factor = 0.99
self.episode = 200000
self.eps = 0.05
self.n_states = 40
self.min_lr = 0.005
self.alpha = 0.5
def policy(self,env,Q,state):
A = np.ones(env.action_space.n, dtype=float) * self.eps / env.action_space.n
action = np.argmax(Q[tuple(state)])
A[action] += (1-self.eps)
return A
def discretized_state(self,env,obs):
state = np.array([0,0])
env_low = env.observation_space.low
env_high = env.observation_space.high
env_dx = (env_high-env_low)/self.n_states
state[0] = int((obs[0] - env_low[0])/env_dx[0])
state[1] = int((obs[1] - env_low[1])/env_dx[1])
return state
def rollout(self, env, policy=None, render=False):
Q = defaultdict(lambda: np.zeros(env.action_space.n))
stats = []
best_reward = np.float('-inf')
best_episode = 0
best_stats = []
for i_episode in range(self.episode):
traj = []
t = 0
done = False
total_reward = 0
alpha = max(self.min_lr,self.lr*(self.discount_factor**(i_episode//100)))
c_state = env.reset()
state = self.discretized_state(env,c_state)
for j in itertools.count():
action_prob = self.policy(env,Q,state)
action = np.random.choice(np.arange(len(action_prob)), p=action_prob)
# if render:
# env.render()
n_state, reward, done, _ = env.step(action)
n_state = self.discretized_state(env,n_state)
traj.append((state, action, reward))
total_reward += reward
state = n_state
if done:
stats.append((i_episode,total_reward))
if best_reward < total_reward:
best_reward = total_reward
best_episode = i_episode
best_stats.append((best_episode,best_reward))
break
for i in range(len(traj)-1):
state = traj[i][0]
action = traj[i][1]
n_state = traj[i+1][0]
n_action = traj[i+1][1]
reward = traj[i][2]
td_delta = reward + self.discount_factor * Q[tuple(n_state)][n_action] - Q[tuple(state)][action]
Q[tuple(state)][action] += alpha * td_delta
if i_episode%5000 == 0:
print(i_episode)
if total_reward != -200:
print("Episode {} completed with total reward {} with alpha {}".format(i_episode,total_reward,alpha))
env.close()
# self.plot_stats(stats)
return traj,stats,best_stats
env = gym.make('MountainCar-v0')
env.seed(0)
np.random.seed(0)
planner = Planner()
traj,stats,best_stats = planner.rollout(env, policy=np.random.choice(env.action_space.n)) | true |
fc7e56fe3d3ca844fbffe2eb831fe28a19b3a4dc | Python | afterloe/raspberry-auto | /gpio/py/clb-ai.py | UTF-8 | 943 | 2.65625 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python
# coding=utf-8
import RPi.GPIO as GPIO
import time
R, G = 5, 6
Buzzer = 17
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(R, GPIO.OUT)
GPIO.setup(G, GPIO.OUT)
GPIO.setup(Buzzer, GPIO.OUT)
global Buzz
Buzz = GPIO.PWM(Buzzer, 440)
Buzz.start(50)
pwmR = GPIO.PWM(R, 70)
pwmG = GPIO.PWM(G, 70)
pwmR.start(0)
pwmG.start(0)
try:
t = 0.01
while True:
for i in range(0, 71):
pwmG.ChangeDutyCycle(70)
Buzz.ChangeFrequency(500 - i)
pwmR.ChangeDutyCycle(70 - i)
print(i)
time.sleep(t)
for i in range(70, -1, -1):
pwmG.ChangeDutyCycle(0)
Buzz.ChangeFrequency(500 + i)
pwmR.ChangeDutyCycle(70 - i)
print(i - 1000)
time.sleep(t)
except KeyboardInterrupt:
Buzz.ChangeFrequency(0)
pwmR.stop()
pwmG.stop()
GPIO.cleanup()
| true |
9c658db3be57d263b572bdd019d11ded4fdc3fea | Python | OpenMeasurement/virtual_ID_research | /datagen.py | UTF-8 | 6,253 | 2.546875 | 3 | [
"MIT"
] | permissive | # MIT License
# Copyright (c) 2020 PrivacyAmp
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import numpy as np
import pandas as pd
pd.set_option('display.max_columns', None)
pd.set_option('display.max_colwidth', 1000)
from numpy.random import random
import pyspark.sql.functions as F
import pyspark.sql.types as spark_types
from pyspark.sql import Window, SparkSession
from pyspark import StorageLevel
from pyspark.sql.types import ArrayType, IntegerType, StringType, DoubleType
###################################################################
### Functions generating synthetic data based on a census table ###
###################################################################
# Inverse probability distribution transformation
@F.udf(returnType=ArrayType(DoubleType()))
def udf_rates(m, population, alpha) :
return (m * ((1- np.arange(0, 1, 1/population)) ** (-1/alpha) - 1)).tolist()
@F.udf(returnType=DoubleType())
def udf_sum(rates) :
return np.sum(np.array(rates, dtype=float))
# Function to make a random choice of impressions based on rate
@F.udf(returnType=IntegerType())
def udf_impressions(rate_impressions):
r = np.random.random()
n = int(rate_impressions // 1)
rem = rate_impressions % 1
if r > rem :
return n
else :
return n + 1
@F.udf(returnType=ArrayType(IntegerType()))
def udf_ts_list(start, end, n):
return np.random.randint(start, end, n).tolist()
def gen_people_table_udf(df_census, demo_cols, m=1.0, alpha=20) :
"""
Starting from a census file, generate the population and assign rates according
to the power-law or Lomax (Pareto type II) distribution).
-- version using a udf function --
Returns a table of people with demographics and rate of impression generation.
"""
population_total = df_census.agg(F.sum("population")).collect()[0][0]
df_people = (
df_census
.withColumn("ratio", F.col("population")/F.lit(population_total))
.withColumn("m", F.lit(m))
.withColumn("alpha", F.lit(alpha))
.withColumn("rates", udf_rates(F.col("m"), F.col("population"), F.col("alpha")))
.withColumn("rate", F.explode(F.col("rates")))
.withColumn("user_id", F.row_number().over(Window.orderBy(*demo_cols, F.col("rate"))))
.select(
"user_id",
*demo_cols,
"population",
"ratio",
"rate")
)
return df_people
def gen_people_table(df_census, demo_cols, m=1.0, alpha=20) :
"""
Starting from a census file, generate the population and assign rates according
to the power-law or Lomax (Pareto type II) distribution).
Returns a table of people with demographics and rate of impression generation.
"""
population_total = df_census.agg(F.sum("population")).collect()[0][0]
max_population = df_census.agg(F.max("population")).collect()[0][0]
spark = SparkSession.builder.getOrCreate()
spark_range = spark.range(max_population - 1)
df_people = (
df_census
.crossJoin(F.broadcast(spark_range))
.where("id < population")
.withColumn("ratio", F.col("population")/F.lit(population_total))
.withColumn("m", F.lit(m))
.withColumn("alpha", F.lit(alpha))
.withColumn("rate", F.col("m") * (
(F.lit(1) - F.col("id") / F.col("population"))**(-F.lit(1)/F.col("alpha")) - F.lit(1))
)
.withColumn("user_id", F.row_number().over(Window.orderBy(*demo_cols, F.col("rate"))))
.select(
"user_id",
*demo_cols,
"population",
"ratio",
"rate")
)
return df_people
def add_n_impressions(df_people, I, population_total) :
"""
Assign a certain number of impressions for each person based on their rates.
Note that this is a probabilistic process.
"""
sum_rates = df_people.agg(F.sum("rate")).collect()[0][0]
df = (
df_people
.withColumn("rate", F.col("rate") * F.lit(population_total/sum_rates))
.withColumn("rate_impressions", F.col("rate") * I/population_total)
.withColumn("n_impressions", udf_impressions(F.col("rate_impressions")))
.cache()
.where("n_impressions > 0")
)
return df
def gen_synthetic_impressions(df_people_n, start_ts, end_ts, demo_cols) :
"""
Given a `df_people_n` dataframe of people with a column `n_impressions`
that indicates the total number of impressions for that person, generate
and impression table and randomly assign the timestamps.
"""
df_impressions = (
df_people_n
.withColumn("timestamp_list",
udf_ts_list(
F.unix_timestamp(F.lit(start_ts)),
F.unix_timestamp(F.lit(end_ts)),
F.col("n_impressions"))
)
.withColumn("timestamp_int", F.explode(F.col("timestamp_list")))
.withColumn("timestamp", F.from_unixtime(F.col("timestamp_int")))
.select(
"user_id",
*demo_cols,
"timestamp_int",
"timestamp"
)
.sort("timestamp")
)
return df_impressions | true |
291517c451ddde7000f8c72d06bea861e1f43358 | Python | Adwi11/mask-detection | /Maskdetec.py | UTF-8 | 5,353 | 2.71875 | 3 | [] | no_license | import cv2
import os
import numpy as np
import keras
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense,Activation,Flatten,Dropout,Conv2D,MaxPooling2D
import sklearn.model_selection
import pickle
from keras.callbacks import ModelCheckpoint
from sklearn.preprocessing import LabelBinarizer
import tensorflow
from keras.utils import to_categorical
data_path='C://Users//Adwit//Desktop//Projectdata'
categories=os.listdir(data_path) #putting the two categories in a list known as categories
labels=[i for i in range(len(categories))] #putting 0,1 in a list labels to show the position of file and it will be the key in dict
label_dict=dict(zip(categories,labels)) #putting the two lists together with zip and creatiing a key value scenario to make a dict
img_size=100
data=[]
target=[]
for category in categories:
folder_path = os.path.join(data_path, category) #mostly goes into the file project date and in one iteration opens the file inside it and next iteration the next one
img_names=os.listdir(folder_path)
for img_name in img_names:
img_path=os.path.join(folder_path,img_name)
img=cv2.imread(img_path) #this is basically accessing each image in each folder of the main'Projectdata'
try:
gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
resized=cv2.resize(gray,(img_size,img_size))
data.append(resized)
target.append(label_dict[category]) #appends 0 or 1 according to which image it is with or without (as the value in dict is 0 or 1 and key is with mask or without mask)
except Exception as e:
print("Exception occured:",e) #we put try and cath sincr sometimes computer is unable to read images
data=np.array(data)/255 #making the list into an array and dividing every pixel by 255 to make computation easier
data=np.reshape(data,(data.shape[0],img_size,img_size,1)) #we make the img into 4 dimensional array since cnn only accepts that,1 is there since the img is gray
target=np.array(target)
#new_target=np_utils.to_categorical(target) #since the last layer of our cnn will be 2 neurons and wwill choose one category hence our target is categorical
np.save('data',data) #saving
#np.save('target',new_target)
lb = LabelBinarizer()
target = lb.fit_transform(target)
target = to_categorical(target)
'''data_train, data_test, target_train, target_test = sklearn.model_selection.train_test_split(data, target,test_size=0.1) # splits 10% of data away
model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape=data.shape[1:]))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(256, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dropout(0.5))
model.add(Dense(50, activation="relu"))
model.add(Dense(2, activation="softmax"))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
checkpoint= ModelCheckpoint('model-{epoch:03d}.model',monitor='val_loss',verbose=0,save_best_only=True,mode='auto')
history=model.fit(data_train,target_train,epochs=20,callbacks=[checkpoint],validation_split=0.2)
results=model.evaluate(data_test,target_test)
model.save("model.h5")'''
model= keras.models.load_model("model.h5")
video= cv2.VideoCapture("C://Users//Adwit//Desktop//Test4.mp4 ")
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_frontalface_default.xml")
color_dict={0:(0,255,0),1:(0,0,255)}
while True:
check,frame=video.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # to make the frame captured gray
gray = cv2.GaussianBlur(gray, (21, 21), 0) # to make the motion detection more efficient
faces = face_cascade.detectMultiScale(gray, scaleFactor=1.3, minNeighbors=7)
for x,y,w,h in faces:
region_o_inter=gray[y:y+w,x:x+w] #now to send our region of interest which is the face into the model cnn
resized=cv2.resize(region_o_inter,(100,100))
normalized=resized/255
reshaped=np.reshape(normalized,(1,100,100,1))
result=model.predict(reshaped)
print(result)
label=np.argmax(result,axis=1)[0]
print(color_dict[label])
cv2.rectangle(frame,(x,y),(x+w,y+h),color_dict[label],2) #main rectangle around face
cv2.rectangle(frame,(x,y-40),(x+w,y),color_dict[label],-1) #for the triangle in which we write mask or without mask
#cv2.putText(frame,label_dict[label],(x,y-10),cv2.FONT_HERSHEY_SIMPLEX,0.8,(255,255,255),2)
frame2=frame[y:y+w,x:x+w]
cv2.imshow("tst", frame2)
cv2.imshow("main",frame)
key = cv2.waitKey(1) # every 1 millisecond it switches to a new frame , waitKey(0)is used to close the current frame at the moment user presses a key
if key == ord('q'):
break
cv2.destroyAllWindows()
| true |
353c45e560be88b2f4999899221d2488390f8a3b | Python | Karthik777/ImageClassifier | /ClassifyImage.py | UTF-8 | 993 | 2.546875 | 3 | [] | no_license | __author__ = 'srlaxminaarayanan'
import io
import os
from PIL import Image
from connection import ClassifyImages as CI
import glob
path = '/Users/srlaxminaarayanan/Pictures'
imagePath = '/Users/srlaxminaarayanan/Pictures/images'
def ProcessResult(imagepath, result):
if(os.path.exists("Output.txt")):
with open("Output.txt", "a") as text_file:
text_file.write("\n file: %s \n classification: \n %s \n" % (imagepath, ', '.join(result)))
else:
with open("Output.txt", "w+") as text_file:
text_file.write("file: %s \n classification: \n %s" % (imagepath, ', '.join(result)))
def ClassifyImages():
for file in glob.iglob(os.path.join(imagePath, "*.tif")):
if file != '.DS_Store':
imagepath = os.path.join(imagePath,file)
image = Image.open(imagepath)
classify_images= CI()
ProcessResult(imagepath,classify_images.classify_image(image))
if __name__ == '__main__':
ClassifyImages() | true |
f3a27a0c1bcc6f08d7e2d039ad1249f1db8af718 | Python | ynntech/PracticeofDL | /perceptron.py | UTF-8 | 406 | 2.828125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
#単純なパーセプトロン
import numpy as np
np.random.seed(0)
w = np.random.normal(0., 1, (3))
xs = np.array([[0,0], [0,1], [1,0], [1,1]], dtype=np.float32)
ts = np.array([[-1], [-1], [-1], [1]], dtype=np.float32)
print("weight>>",w)
_xs = np.hstack([xs,[[1] for _ in range(4) ]])
for i in range(4):
ys = np.dot(w,_xs[i])
print ("in >>", _xs[i], "y >>", ys)
| true |
51213b5465cde0c400c2e5b18f5e33da75353af3 | Python | mason-fish/holberton_projects | /airbnb_clone/api/app/views/review.py | UTF-8 | 5,308 | 2.59375 | 3 | [] | no_license | from flask import jsonify
from flask_json import request
from app.models.user import User
from app.models.review import Review
from app.models.place import Place
from app.models.review_user import ReviewUser
from app.models.review_place import ReviewPlace
from app import app
from return_styles import ListStyles
@app.route('/reviews', methods=['GET'], strict_slashes=False)
def all_reviews():
try:
query = Review.select()
return ListStyles.list(query, request), 200
except:
return jsonify({'code': 404, 'msg': "table does not yet exist in db"}), 404
'''
route /users/<user_id>/reviews:
GET: list of all reviews for a user (selected by user_id) => reviews received to the user
POST: create a new review from POST data parameters on the selected user (from user should be set by ID).
'''
@app.route('/users/<user_id>/reviews', methods=['GET', 'POST'], strict_slashes=False)
def user_reviews(user_id):
try:
User.select().where(User.id == user_id).get()
except User.DoesNotExist:
return jsonify({'code': 404, 'msg': "There is no user with this id."}), 404
if request.method == 'GET':
query = (Review
.select()
.join(ReviewUser)
.where(ReviewUser.user == user_id))
return ListStyles.list(query, request), 200
elif request.method == 'POST':
try:
if request.form['message'] is None or request.form['user'] is None:
return jsonify({'code': 404, 'msg': "Missing required data."}), 404
except KeyError:
return jsonify({'code': 400, 'msg': "Missing parameter."}), 400
try:
review = Review()
[setattr(review, key, value) for (key, value) in request.form.items() if key != "created_at" and key != "updated_at"]
review.save()
ReviewUser.create(user=user_id, review=review.id)
return jsonify(review.to_dict()), 201
except:
return jsonify({"code": 409, "msg": "user from data does not exist in db"}), 409
'''
route /users/<user_id>/reviews/<review_id>:
GET: review with id = review_id
DELETE: delete review with id = review_id
'''
@app.route('/users/<user_id>/reviews/<review_id>', methods=['GET', 'DELETE'], strict_slashes=False)
def user_reviews_id(user_id, review_id):
try:
query_one = ReviewUser.select().where(ReviewUser.user == user_id, ReviewUser.review == review_id).get()
except:
return jsonify({"code": 404, "msg": "record does not exist in db, check user id and review id"}), 404
if request.method == "GET":
query = Review.get(Review.id == review_id)
return jsonify(query.to_dict()), 200
elif request.method == "DELETE":
query_two = Review.get(Review.id == review_id)
query_one.delete_instance()
query_two.delete_instance()
return jsonify({"code": 200, "msg": "success"}), 200
'''
route /places/<place_id>/reviews:
GET: list of all reviews for a place (selected by place_id)
POST: create a new review from POST data parameters on the selected place (from user should be set by ID).
'''
@app.route('/places/<place_id>/reviews', methods=['GET', 'POST'], strict_slashes=False)
def place_reviews(place_id):
try:
Place.select().where(Place.id == place_id).get()
except Place.DoesNotExist:
return jsonify({'code': 404, 'msg': "There is no place with this id."}), 404
if request.method == 'GET':
query = (Review
.select()
.join(ReviewPlace)
.where(ReviewPlace.place == place_id))
return ListStyles.list(query, request), 200
elif request.method == 'POST':
try:
if request.form['message'] is None or request.form['user'] is None:
return jsonify({'code': 404, 'msg': "Missing required data."}), 404
except KeyError:
return jsonify({'code': 400, 'msg': "Missing parameter."}), 400
try:
review = Review()
[setattr(review, key, value) for (key, value) in request.form.items() if key != "created_at" and key != "updated_at"]
review.save()
ReviewPlace.create(place=place_id, review=review.id)
return jsonify(review.to_dict()), 201
except:
return jsonify({"code": 409, "msg": "error creating record, check input data"}), 409
'''
route /places/<place_id>/reviews/<review_id>:
GET: review with id = review_id
DELETE: delete review with id = review_id
'''
@app.route('/places/<place_id>/reviews/<review_id>', methods=['GET', 'DELETE'], strict_slashes=False)
def place_reviews_id(place_id, review_id):
try:
query_one = ReviewPlace.select().where(ReviewPlace.place == place_id, ReviewPlace.review == review_id).get()
except:
return jsonify({"code": 404, "msg": "record does not exist in db, check place id and review id"}), 404
if request.method == "GET":
query = Review.select().where(Review.id == review_id).get()
return jsonify(query.to_dict()), 200
elif request.method == "DELETE":
query_two = Review.get(Review.id == review_id)
query_one.delete_instance()
query_two.delete_instance()
return jsonify({"code": 200, "msg": "success"}), 200
| true |
6d5415d5b4b84d94a571913255991c62e88ac767 | Python | A-scale/hangman | /c.py | UTF-8 | 2,908 | 3.71875 | 4 | [] | no_license | class Card:
cards = [None, None, '2', '3', '4', '5', '6', '7', '8', '9', '10',
'Jack', 'Queen', 'King', 'Ace']
suits = ['hart', 'diamond', 'spade', 'crab']
def __init__ (self, number, suit):
self.number = number
self.suit = suit
def __lt__(self, other):
if self.number < other.number:
return True
elif self.number == other.number:
if self.suit < other.suit:
return True
else :
return False
else :
return False
def __gt__(self, other):
if self.number > other.number:
return True
elif self.number == other.number:
if self.suit > other.suit:
return True
else:
return False
else:
return False
def __repr__(self):
return '{} of {}'.format(self.cards[self.number], \
self.suits[self.suit])
from random import shuffle
class Deck:
def __init__(self):
self.yamahuda = []
for i in range (2, 15):
for j in range (4):
self.yamahuda.append(Card(i, j))
shuffle(self.yamahuda)
def rm_card(self):
if len (self.yamahuda) == 0:
return
return self.yamahuda.pop()
class Player:
def __init__(self, name):
self.name = name
self.wins = 0
class Game:
def __init__(self):
self.p1 = Player(input('Type your name:'))
self.p2 = Player(input('Type another name:'))
self.deck = Deck()
def game_start(self):
while True:
n = input('Type space. You want stop game, q:')
if n == 'q':
print('Game set')
break
elif n == ' ':
self.p1card = self.deck.rm_card()
self.p2card = self.deck.rm_card()
print('{} draw {}.\n{} draw {}'\
.format(self.p1.name, self.p1card, self.p2.name, \
self.p2card))
if self.p1card > self.p2card:
self.p1.wins += 1
print(self.p1.name, ' is win!')
elif self.p1card < self.p2card:
self.p2.wins += 1
print(self.p2.name, ' is win!')
message = '{} gets {} points.\n{} gets {} points.'\
.format(self.p1.name, self.p1.wins, \
self.p2.name, self.p2.wins)
if self.p1.wins == self.p2.wins:
print(message)
elif self.p1.wins > self.p2.wins:
print(message + '\n{} win!'.format(self.p1.name))
elif self.p1.wins < self.p2.wins:
print(message + '\n{} win!'.format(self.p2.name))
game1 = Game()
game1.game_start()
| true |
acbd84b6538ada2f20b2953e30b42c2862f7feda | Python | shrivanshh/LearningPython | /src/Tuples.py | UTF-8 | 367 | 4.15625 | 4 | [] | no_license | myTuples = (1, 2, 'three', 4, 5, 2, 3, 4, 5, 2, 2, 2, 2)
print myTuples
# Check Length
print len(myTuples)
print myTuples[2]
# Slicing is also supported
print myTuples[-1]
# Get index of
print myTuples.index('three')
# First index of first occurence will be returned
print myTuples.index(4)
# Count the number of times the value appears
print myTuples.count(2)
| true |
4c0a7b7a671fcc45573ecfaae304853faac73893 | Python | ialak/Digital-Speech-Recognizer | /0.3.7/src/mfccCooker.py | UTF-8 | 777 | 2.703125 | 3 | [
"Apache-2.0"
] | permissive | from MFCC import mfcc
from os.path import join,split
from glob import glob
from SignalProcessor import Signal
MAIN_DIR = '../'
WAVE_FOLDER = MAIN_DIR + 'wav/'
COOKED_FOLDER = WAVE_FOLDER + 'single-cooked/'
MFCC_DIR = MAIN_DIR + 'mfcc/single/'
def cook():
for wavfilename in glob(join(COOKED_FOLDER,'*.wav')):
print('Cooking ' + split(wavfilename)[1])
signal = Signal(wavfilename)
feat = mfcc(signal.data,signal.rate)
outname = MFCC_DIR + split(wavfilename)[1].replace('wav','txt')
out = open(outname,'w')
# out.write(str(len(feat))+' '+str(len(feat[0]))+'\n')
for i in range(len(feat)):
out.write(str(list(feat[i])).strip('[]').replace(' ','') + '\n')
out.close()
print('Done')
cook()
| true |
d4ee5fd1d9bc8194429b76e913d574274e0a5676 | Python | gvacaliuc/comp540-term-project | /deton8/main.py | UTF-8 | 5,765 | 2.703125 | 3 | [] | no_license | """
deton8 -- Detector of Nuclei
Main script to train and test the model.
"""
import argparse
import sys
import yaml
from skimage.morphology import disk
from sklearn.linear_model import PassiveAggressiveRegressor, SGDRegressor
from .features import BasisTransformer, extend_features
from .models import MiniBatchRegressor, UNet
from .processing import Preprocesser
from .utils import NucleiDataset
parser = argparse.ArgumentParser()
parser.add_argument(
"command",
choices=["train", "test", "generate_config"],
type=str,
help="""The command to run.
train: trains all models given a configuration
test: tests on a directory and builds a submission, saving it
to --csv if provided. If masks are present then will
also report score.
generate_config: generates a default training configuration."""
)
parser.add_argument(
"--config",
dest="config_file",
type=str,
default=None,
help="YAML configuration file to load if train/test/submit, else ")
parser.add_argument(
"--csv",
dest="submission_csv",
type=str,
default=None,
help="If 'submit' is called, the submission csv is saved here.")
def run(config, train=True):
"""
Trains our pipeline according to the configuration provided.
"""
train_dir = config["train_dir"]
val_dir = config["val_dir"]
print("Reading in data...")
train_data = NucleiDataset(train_dir).load()
val_data = NucleiDataset(val_dir).load()
x_train = train_data.images_
y_train = train_data.masks_ # value in 0, 1, ..., n
y_train_bin = (y_train > 0).astype(y_train.dtype) # value in {0, 1}
x_val = val_data.images_
y_val = val_data.masks_
y_val_bin = (y_val > 0).astype(y_val.dtype)
print("Preprocessing data...")
preprocesser = Preprocesser()
x_train_pre = preprocesser.fit_transform(x_train)
x_val_pre = preprocesser.transform(x_val)
bilateral_d = 2
bilateral_sigma_color = 75
bilateral_sigma_space = 75
equalize_hist_clip_limit = 0.03
dialation_kernel = disk(radius=3)
dialation_iters = 1
print("Transforming data...")
print(x_train_pre.min())
print(x_train_pre.max())
print(x_val_pre.min())
print(x_val_pre.max())
transformer = BasisTransformer(
bilateral_d=bilateral_d,
bilateral_sigma_color=bilateral_sigma_color,
bilateral_sigma_space=bilateral_sigma_space,
equalize_hist_clip_limit=equalize_hist_clip_limit,
dialation_kernel=dialation_kernel,
dialation_iters=dialation_iters)
x_train_feat = transformer.fit_transform(x_train_pre)
x_val_feat = transformer.fit_transform(x_val_pre)
sgd_params = {
"regressor":
SGDRegressor(
penalty='elasticnet', l1_ratio=0.11, max_iter=5, tol=None),
"batch_size":
1000,
"num_iters":
25000,
}
pa_params = {
"regressor": PassiveAggressiveRegressor(C=.2, max_iter=5, tol=None),
"batch_size": 1000,
"num_iters": 25000,
}
sgd = MiniBatchRegressor(**sgd_params)
pa = MiniBatchRegressor(**pa_params)
print("Fitting linear models...")
sgd.fit(x_train_feat, y_train_bin)
pa.fit(x_train_feat, y_train_bin)
x_train_extended = extend_features(x_train_feat, sgd, pa)
x_val_extended = extend_features(x_val_feat, sgd, pa)
# Now we train UNet
numchannels = x_train_extended.shape[-1]
unet_config = {
"numchannels": numchannels,
"epochs": 50,
"callbacks": [],
"weights": none
}
unet = UNet(**unet_config)
if unet_config["weights"] is not None:
unet.load_weights(unet_config["weights"])
print("Fitting UNet...")
unet.fit(x_train_extended, y_train_bin, x_val_extended, y_val_bin)
# begin inference and print out test scores
x_train_pred = unet.predict(x_train_extended)
x_val_pred = unet.predict(x_val_extended)
segmenter_params = {"nms_min_distance": 3, "watershed_line": True}
segmenter = NucleiSegmenter(**segmenter_params)
print("Segmenting nuclei...")
train_components = segmenter.fit_transform(x_train_pred, x_train_pre)
val_components = segmenter.fit_transform(x_val_pred, x_val_pre)
def get_default_config():
return {
"train_dir": "",
"val_dir": "",
"test_dir": "",
"feature_params": {
"bilateral_d": 2,
"bilateral_sigma_color": 75,
"bilateral_sigma_space": 75,
"equalize_hist_clip_limit": 0.03
},
"sgd_params": {
"regressor__penalty": "elasticnet",
"regressor__l1_ratio": 0.11,
"batch_size": 1000,
"num_iters": 25000
},
"pa_params": {
"regressor__C": 0.2,
"batch_size": 1000,
"num_iters": 25000
},
"unet_params": {
"weights": "",
"architecture": "",
"epochs": 50
},
"segmenter_params": {
"nms_min_distance": 3,
"watershed_line": True
}
}
def main():
"""
Entry point of script.
"""
parsed = parser.parse_args()
if parsed.command == "generate_config" and parsed.config_file:
with open(parsed.config_file, "w") as config_file:
config_file.write(
yaml.dump(get_default_config(), default_flow_style=False))
config = get_default_config()
if parsed.config_file:
with open(parsed.config_file, "r") as config_file:
config.update(yaml.load(config_file.read()))
train = parsed.command == "train"
run(config, train=train)
| true |
75bea52464a201d5a0b9ee30bde6e504ab8ec625 | Python | mardink/RaspberryPiRobot | /Robot/remoteServerRoboteigen.py | UTF-8 | 8,359 | 2.515625 | 3 | [] | no_license | #!/usr/bin/env python
# coding: Latin-1
# Load library functions we want
import os
import sys
import time
import pygame
import SocketServer
import RPi.GPIO as GPIO
#GPIO.setmode(GPIO.BCM) #used for chip numbering
GPIO.setmode(GPIO.BOARD) #used for board numbering, better when using multiple types of pi
GPIO.setwarnings(False)
GPIO.cleanup()
# Settings
Led_blue = 13 #Pin 13 on the board or 21 BCM rev1
Led_red = 11 #Pin 11 on the board or 17 BCM rev1
Buzzer = 15 #Pin 15 on the board or 22 BCM rev1
Button_left = 7 #Pin 7 on the board or 4 BCM rev1
Button_right = 12 #Pin 12 on the board or 18 BCM rev1
Servo_pan = 22 #Pin 22 on the board or 25 BCM rev1
Servo_tilt = 18 #Pin 18 on the board or 24 BCM rev1
Sonar = 8 #Pin 8 on the board or 14 BCM rev1
MotorA_dir = 26 #Pin 26 on the board or 7 BCM rev1
MotorA_speed = 24 #Pin 24 on the board or 8 BCM rev1
MotorB_dir = 21 #Pin 21 on the board or 9 BCM rev1
MotorB_speed = 19 #Pin 19 on the board or 10 BCM rev1
#Set the gpio ports
GPIO.setup(Button_right, GPIO.IN)
GPIO.setup(Button_left, GPIO.IN)
GPIO.setup(MotorB_speed, GPIO.OUT)
GPIO.setup(MotorB_dir, GPIO.OUT)
GPIO.setup(MotorA_speed, GPIO.OUT)
GPIO.setup(MotorA_dir, GPIO.OUT)
GPIO.setup(Led_blue, GPIO.OUT)
GPIO.setup(Led_red, GPIO.OUT)
GPIO.setup(Buzzer, GPIO.OUT)
# Function to set all drives off
def MotorOff():
GPIO.output(MotorA_dir, GPIO.LOW)
GPIO.output(MotorA_speed, GPIO.LOW)
GPIO.output(MotorB_dir, GPIO.LOW)
GPIO.output(MotorB_speed, GPIO.LOW)
# switch Driving Motor on backwards
def drive_backward():
GPIO.output (MotorB_speed, 1)
GPIO.output (MotorB_dir, 0)
# switch Driving Motor on forwards
def drive_forward():
#check_front()
GPIO.output (MotorB_speed, 0)
GPIO.output (MotorB_dir, 1)
# switch Driving Motor off
def drive_stop():
GPIO.output (MotorB_speed, 0)
GPIO.output (MotorB_dir, 0)
# switch Steering Motor left
def steering_left():
GPIO.output (MotorA_speed, 1)
GPIO.output (MotorA_dir, 0)
# switch Steering Motor right
def steering_right():
GPIO.output (MotorA_speed, 0)
GPIO.output (MotorA_dir, 1)
# switch Steering Motor off
def steering_stop():
GPIO.output (MotorA_speed, 0)
GPIO.output (MotorA_dir, 0)
#Function Pan and Tilt
def pan_left():
cmd = 'echo 7=+10 > /dev/servoblaster'
os.system(cmd)
def pan_right():
cmd = 'echo 7=-10 > /dev/servoblaster'
os.system(cmd)
def pan_neutral():
cmd = 'echo 7=150 > /dev/servoblaster'
os.system(cmd)
def tilt_up():
cmd = 'echo 6=-10 > /dev/servoblaster'
os.system(cmd)
def tilt_down():
cmd = 'echo 6=+10 > /dev/servoblaster'
os.system(cmd)
def tilt_neutral():
cmd = 'echo 6=150 > /dev/servoblaster'
os.system(cmd)
#Function LEDS and Buzzer
def LedBlue_on():
GPIO.output(Led_blue,GPIO.HIGH)
def LedBlue_off():
GPIO.output(Led_blue,GPIO.LOW)
def LedRed_on():
GPIO.output(Led_red,GPIO.HIGH)
def LedRed_off():
GPIO.output(Led_red,GPIO.LOW)
def Buzzer_on():
GPIO.output(Buzzer,GPIO.HIGH)
def Buzzer_off():
GPIO.output(Buzzer,GPIO.LOW)
def Server_Ready():
LedBlue_on()
Buzzer_on()
time.sleep(0.5)
LedBlue_off()
Buzzer_off()
time.sleep(0.5)
LedBlue_on()
Buzzer_on()
time.sleep(0.5)
LedBlue_off()
Buzzer_off()
time.sleep(0.5)
#Define sonar functions for avoiding objects
def sonar():
GPIO.setup(Sonar, GPIO.OUT)
GPIO.output(Sonar, True)
time.sleep(0.00001)
GPIO.output(Sonar, False)
start = time.time()
count = time.time()
GPIO.setup(Sonar, GPIO.IN)
while GPIO.input(Sonar)==0 and time.time()-count<0.1:
start = time.time()
stop=time.time()
while GPIO.input(Sonar)==1:
stop = time.time()
# Calculate pulse length
elapsed = stop-start
# Distance pulse travelled in that time is time
# multiplied by the speed of sound (cm/s)
distance = elapsed * 34029
# That was the distance there and back so halve the value
distance = distance / 2
return distance
def sonarAlarm():
LedBlue_on()
Buzzer_on()
LedRed_off()
time.sleep(0.5)
LedBlue_off()
Buzzer_off()
LedRed_on()
time.sleep(0.5)
LedBlue_on()
Buzzer_on()
LedRed_off()
time.sleep(0.5)
LedBlue_off()
Buzzer_off()
LedRed_on()
time.sleep(0.5)
LedBlue_off()
Buzzer_off()
LedRed_off()
def sonarAvoid():
drive_backward()
steering_right()
time.sleep(2)
steering_stop()
drive_stop()
def check_front():
dist = sonar()
safe_distance = 15 # Keep this distance in cm to objects
if dist < safe_distance:
sonarAvoid()
dist = sonar()
if dist < safe_distance:
print('Too close, ',dist)
sonarAlarm()
# Settings for the RemoteKeyBorg server
portListen = 9038 # What messages to listen for (LEDB on an LCD)
# Class used to handle UDP messages
class PicoBorgHandler(SocketServer.BaseRequestHandler):
# Function called when a new message has been received
def handle(self):
global isRunning
request, socket = self.request # Read who spoke to us and what they said
request = request.upper() # Convert command to upper case
driveCommands = request.split(',') # Separate the command into individual drives
#Get joystick signals
moveForward = driveCommands[0]
moveForwardLeft = driveCommands[1]
moveForwardRight = driveCommands[2]
moveBackward = driveCommands[3]
moveBackwardRight = driveCommands[4]
moveBackwardLeft = driveCommands[5]
moveLeft = driveCommands[6]
moveRight = driveCommands[7]
hatUp = driveCommands[8]
hatDown = driveCommands[9]
hatLeft = driveCommands[10]
hatRight = driveCommands[11]
speedUp = driveCommands[12]
speedDown = driveCommands[13]
joyButton1 = driveCommands[14]
joyButton2 = driveCommands[15]
joyButton3 = driveCommands[16]
joyButton4 = driveCommands[17]
joyButton5 = driveCommands[18]
joyButton6 = driveCommands[19]
joyButton7 = driveCommands[20]
joyButton8 = driveCommands[21]
joyButton9 = driveCommands[22]
joyButton10 = driveCommands[23]
joyButton11 = driveCommands[24]
joyButton12 = driveCommands[25]
#Assign Joystick input to functions
if moveForward == 'ON':
check_front()
drive_forward()
elif moveBackward == 'ON':
drive_backward()
elif moveLeft == 'ON':
steering_left()
elif moveRight == 'ON':
steering_right()
elif hatUp == 'ON':
tilt_up()
elif hatDown == 'ON':
tilt_down()
elif hatRight == 'ON':
pan_right()
elif hatLeft == 'ON':
pan_left()
elif joyButton1 == 'ON':
print "Knop 1"
print (sonar())
elif joyButton2 == 'ON':
print "Knop 2"
elif joyButton3 == 'ON':
print "Knop 3"
pan_neutral()
elif joyButton4 == 'ON':
print "Knop 4"
elif joyButton5 == 'ON':
print "Knop 5"
tilt_neutral()
elif joyButton6 == 'ON':
print "Knop 6, Program stops"
#sys.exit()
else:
MotorOff()
try:
global isRunning
# Start by turning all drives off
MotorOff()
#raw_input('You can now turn on the power, press ENTER to continue')
Server_Ready()
# Setup the UDP listener
remoteKeyBorgServer = SocketServer.UDPServer(('', portListen), PicoBorgHandler)
# Loop until terminated remotely
isRunning = True
while isRunning:
remoteKeyBorgServer.handle_request()
# Turn off the drives and release the GPIO pins
print 'Finished'
MotorOff()
#raw_input('Turn the power off now, press ENTER to continue')
GPIO.cleanup()
except KeyboardInterrupt:
# CTRL+C exit, turn off the drives and release the GPIO pins
print 'Terminated'
MotorOff()
raw_input('Turn the power off now, press ENTER to continue')
GPIO.cleanup() | true |
276d2be40ec2b5f8dea2c4b37b76b9e1be617beb | Python | kaangoksal/tkinter-weather-widget | /SatPredictorAPI.py | UTF-8 | 2,242 | 2.9375 | 3 | [] | no_license | import requests
from datetime import datetime
class SatPredictorAPI:
def __init__(self):
pass
def get_sattalite_visibility_data_on_coordinates(self, latitude: float, longitude: float,
date: datetime = datetime.now(), elevation: float = 10.0,
glonass: bool = True, gps: bool = True):
"""
Returns an array of sattalite visibilities
:param latitude:
:param longitude:
:param date:
:param elevation:
:param glonass:
:param gps:
:return:
"""
url = "http://satpredictor2.deere.com/visibilitydata"
if glonass:
glonass = "YES"
else:
glonass = "NO"
if gps:
gps = "YES"
else:
gps = "NO"
cc = date
day = str(cc.day)
month = cc.month
year = str(cc.year)
month_array = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
month_string = month_array[month - 1]
print(month_string)
print(day)
print(year)
# address=&latitude=33.780141&longitude=-84.389046&isGPS=YES&isGLONASS=YES&eleMask=10.0&tZStr=17&selDate=20+Jun+2018
payload = "address=&" + "latitude=" + str(latitude) + "&longitude=" + str(longitude) + "&isGPS=" + gps + \
"&isGLONASS=" + glonass \
+ "&eleMask=" + str(elevation) \
+ "&tZStr=17" \
"&selDate=" + str(day) + "+" + str(month_string) + "+" + str(year)
headers = {
'accept': "application/json, text/javascript, */*; q=0.01",
'accept-encoding': "gzip, deflate",
'Cache-Control': "no-cache",
'connection': "keep-alive",
'content-length': "147",
'content-type': "application/x-www-form-urlencoded;charset=UTF-8",
'Postman-Token': "1282bf48-5561f-48d9-9ab4-1f13667971e7"
}
response = requests.request("POST", url, data=payload, headers=headers)
return response.json()
# ssp = SatPredictorAPI()
#
# ssp.get_sattalite_visibility_data_on_coordinates(1,2)
| true |
f57b5afb9fc245e38ad646c29b8dd2a75b6d0e0e | Python | easywaldo/python_basic | /list.py | UTF-8 | 245 | 3.78125 | 4 | [] | no_license | list = [1, 3, 4, 5, 6, 767, 100]
print(list)
list.sort()
print(list)
for n in list:
print(n)
list_test = [(1, "waldo"), (2, "ted"), (3, "bob")]
list_test.append((4, "leon"))
list_test.extend([(5, "nix"), (6, "james")])
print(list_test)
| true |
5a44386648fcb98c5293d44d8dc88879b1c99c74 | Python | wsgan001/PyFPattern | /Data Set/bug-fixing-5/2f05cc8580ef1be09c88d32f5924b7762b619729-<test_consolidate_inplace>-bug.py | UTF-8 | 156 | 2.8125 | 3 | [] | no_license | def test_consolidate_inplace(self):
frame = self.frame.copy()
for letter in range(ord('A'), ord('Z')):
self.frame[chr(letter)] = chr(letter) | true |
cfdf7c30611fa7e568a64b227a92a58bafab670c | Python | Karina-I-F/ADPY-13-hw-Iterators.Generators.Yield | /generator_md5.py | UTF-8 | 355 | 3.0625 | 3 | [] | no_license | from hashlib import md5
def generator_md5(file):
with open(file, encoding='utf-8') as fo:
for line in fo:
cropped_line = line.strip()
if cropped_line:
yield md5(cropped_line.encode()).hexdigest()
else:
fo.readline()
for line in generator_md5('test.txt'):
print(line)
| true |
ef0884d901e175ecce1aad93d2e8ca38b858b163 | Python | DhairyaMeh/HacktoberFest21 | /6.swapNum.py | UTF-8 | 461 | 4.28125 | 4 | [] | no_license |
# using third variable
# a = int(input("Enter the value of a: "))
# b = int(input("Enter the value of b: "))
# c = a
# a = b
# b = c
# print("----------------------------")
# print("Value of a After swapping is ", a)
# print("Value of b After swapping is ", b)
# using without third variable
a = int(input("Enter the value of a: "))
b = int(input("Enter the value of b: "))
a = 5
b = 10
c = a
a = b
b = c
print(a)
print(b)
| true |
6445e261cced2bae71d5c41aa810e007b6d6a5bd | Python | Arsener/DataminingHomework | /cluster/kmeans.py | UTF-8 | 5,377 | 2.828125 | 3 | [] | no_license | import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score
# 比较簇数量不同时以及是否降维时的SSE和轮廓系数。默认设置PCA的参数n_components=0.9,即保留90%的信息
def compare_cluster_results(data, max_clusters=10, n_components=0.9):
# 记录不同类数的SSE和轮廓系数结果
x_label_silhouette_score = [[], []]
y_label_silhouette_score = [[], []]
x_label_SSE = [[], []]
y_label_SSE = [[], []]
for i in range(2):
if i == 1:
if n_components >= 1:
n_components = int(n_components)
data = PCA(n_components=n_components).fit_transform(data)
model = KMeans(n_clusters=1, init='random', n_init=10).fit(data)
x_label_SSE[i].append(1)
y_label_SSE[i].append(model.inertia_)
for n_clusters in range(2, max_clusters + 1):
model = KMeans(n_clusters=n_clusters, init='random', n_init=10).fit(data)
# 计算轮廓系数
silhouette_avg = silhouette_score(data, model.labels_, metric='euclidean')
x_label_silhouette_score[i].append(n_clusters)
y_label_silhouette_score[i].append(silhouette_avg)
x_label_SSE[i].append(n_clusters)
y_label_SSE[i].append(model.inertia_)
plt.figure(figsize=(3.5, 2.5))
plt.plot(x_label_SSE[0], y_label_SSE[0], marker=".", c='r', label='without PCA')
plt.plot(x_label_SSE[1], y_label_SSE[1], marker=".", c='b', label='with PCA')
plt.xlabel("The number of clusters")
plt.ylabel("SSE")
plt.legend(loc='best')
plt.savefig(os.path.join('figs', 'kmeans_sse.png'))
plt.show()
plt.figure(figsize=(3.5, 2.5))
plt.plot(x_label_silhouette_score[0], y_label_silhouette_score[0], marker=".", c='r', label='without PCA')
plt.plot(x_label_silhouette_score[1], y_label_silhouette_score[1], marker=".", c='b', label='with PCA')
plt.xlabel("The number of clusters")
plt.ylabel("Silhouette coefficient")
plt.legend(loc='best')
plt.savefig(os.path.join('figs', 'kmeans_sil.png'))
plt.show()
# 得到聚类结果以及用于聚类的数据
def get_cluster_result(data, n_clusters=2, pca=False, n_components=0.9):
if pca:
if n_components >= 1:
n_components = int(n_components)
data = PCA(n_components=n_components).fit_transform(data)
model = KMeans(n_clusters=n_clusters, init='random', n_init=10).fit(data)
return model, data
# 可视化
def visualized(data, labels, c):
tsne = TSNE(n_components=2, metric='euclidean', init='pca')
tsne_2d = tsne.fit_transform(data)
# plt.figure(figsize=(3.5, 2.5))
for i in range(len(c)):
cluster_i = tsne_2d[[l[0] for l in np.argwhere(labels == i)]]
plt.scatter(cluster_i[:, 0], cluster_i[:, 1], c=c[i], marker='.')
plt.savefig(os.path.join('figs', 'kmeans_visualization.png'))
plt.show()
# 进行数据分析
def data_analysis(df, labels, data, x, colors):
# 添加一列,内容为:教师编号-课程编号
def combine(a, b):
return str(a) + '-' + str(b)
df['instr-class'] = list(map(lambda a, b: combine(a, b), df['instr'], df['class']))
# 统计每一簇中每个问题的平均分数,并绘出折线图
plt.figure(figsize=(13, 4))
for i in range(3):
plt.plot(x, np.mean(np.array(data[labels == i]), axis=0),
marker=".", c=colors[i], label='cluster{}'.format(i))
plt.xlabel("Question")
plt.ylabel("Average score")
plt.legend(loc='best')
plt.savefig(os.path.join('figs', 'average_score_in_different_clusters.png'))
plt.show()
# 统计每个老师(课程)的学生在每个簇中占的比例
for name in ['instr', 'instr-class']:
pd_instr = pd.DataFrame(
[[key] + [dict(df[labels == i][name].value_counts())[key] / value for i in range(3)] for key, value
in
dict(df[name].value_counts()).items()])
pd_instr.columns = [name, 'clsuter0', 'cluster1', 'cluster2']
pd_instr.to_csv(os.path.join('data', '{}_result.csv'.format(name)), index=False)
# 计算SSE
def calc_sse(data, labels):
data = np.array(data, dtype=np.float64)
n_clusters = set(labels)
centers = np.array([np.mean(data[labels == i], axis=0) for i in n_clusters])
return sum([np.sum(np.square(np.linalg.norm(data[labels == i] - centers[i], axis=1)))
for i in n_clusters])
def main():
# 获取数据
df = pd.read_csv(os.path.join('data', 'data.csv'))
# 选择输入的字段:课程的难度difficulty以及28个问题Q1-Q28
x = ['difficulty'] + ['Q' + str(i) for i in range(1, 29)]
data = df[x]
compare_cluster_results(data)
model, data_for_cluster = get_cluster_result(data, n_clusters=3, pca=True)
colors = ['r', 'g', 'b']
silhouette_avg = silhouette_score(data_for_cluster, model.labels_, metric='euclidean')
sse = calc_sse(data_for_cluster, model.labels_)
print('Silhouette Coefficient: {}\nSSE: {}'.format(silhouette_avg, sse))
# visualized(data_for_cluster, model.labels_, c=colors)
# data_analysis(df, model.labels_, data, x, colors)
if __name__ == '__main__':
main()
| true |
3fa824c5ac0d733a8df77900cee6ef9242161e6f | Python | kis-balazs/SE_Project | /DataCreatorDb/WolframAlpha.py | UTF-8 | 994 | 3 | 3 | [] | no_license | import wolframalpha
class WolframAlpha:
def __init__(self, app_id):
self.city_name = "Cluj-Napoca"
self.__app_id = app_id
self.__client = self.connection()
def connection(self):
"""function to retrieve the connection from the wolfram alpha api, using a specific app ID"""
return wolframalpha.Client(app_id=self.__app_id)
def createQuery(self, date, time):
"""function to create a specific query for the different date and time in one city"""
return "temperature in " + self.city_name + " on " + date + " " + time
def getResponse(self, date, time):
"""function to retrieve the exact temperature, associated with some attributes as time and date from the
Wolfram Alpha API"""
query = self.createQuery(date=date, time=time)
result = self.__client.query(query)
# the exact line giving the temperature, the time and the date
return result['pod'][1]['subpod']['img']['@alt']
| true |
127d57c763de03a1417e8078af55549840e628a4 | Python | wheellzArmine/SkyHead | /mMath/fpg_automation/advertiser_maker.py | UTF-8 | 1,446 | 2.84375 | 3 | [] | no_license | from concept_maker import Concept
import parameters
class Advertiser():
def __init__(self, cursor, agency_id):
self.cursor = cursor
self.agency_id = agency_id
self.adv_name = parameters.ADVERTISER_NAME
self.vertical_id = parameters.VERTICAL_ID
self.ad_server_id = parameters.AD_SERVER_ID
self.domain = parameters.ADV_DOMAIN
def message_constructor(self):
message = 'INSERT INTO advertisers (agency_id,name,vertical_id,ad_server_id,domain) VALUES (%s,%s,%s,%s,%s);'
return message
def advertiser_create(self):
self.cursor.execute(self.message_constructor(),(self.agency_id,parameters.ADVERTISER_NAME,parameters.VERTICAL_ID,parameters.AD_SERVER_ID,parameters.ADV_DOMAIN))
# assert/confirm and *then* print:
print "Advertiser has been inserted..."
self.get_adv_id_by_name()
# read : get id of advertiser just created, by name
def get_adv_id_by_name(self):
select_statement = "SELECT id FROM advertisers WHERE name='%s' ;" % parameters.ADVERTISER_NAME
self.cursor.execute(select_statement)
advertiser_id = self.cursor.fetchall()[0]
#print advertiser_id, type(advertiser_id)
self.id = advertiser_id
#return advertiser_id
#give adv_id to concept_maker:
#def adv_id_to_concept_maker(self, cursor):
# def adv_id_to_concept_maker(self):
# # to concept
# concept = Concept(self.cursor, self.get_adv_id_by_name())
# concept.get_concept_id_by_name()
| true |
b4c0a51f58259f7448324323d869cd98b9320f1f | Python | Janphr/neural_network_framework | /main.py | UTF-8 | 4,548 | 2.609375 | 3 | [] | no_license | from src import *
import matplotlib.pyplot as plt
import numpy as np
import time
train = 1
save = 1
# epochs = 7
# batch_size = 100
# learning_rate = 0.001
# alpha = 0.001
# scaling = 0.99 / 255
#
# loader = MnistLoader(scaling)
# x_train, t_train = loader.get_train_data()
# x_test, t_test = loader.get_test_data()
epochs = 100
batch_size = 10
learning_rate = 0.001
split_idx = 500
alpha = 0
# gamma = 0.1
loader = GestureLoader(1, ('Neck', 'Nose', 'RShoulder', 'RElbow', 'RWrist', 'LShoulder', 'LElbow', 'LWrist', 'RHip',
'LHip', 'MidHip'
, 'LEye', 'REye'
))
x_train, t_train = loader.get_train_data()
x_test = x_train[:split_idx, :]
x_train = x_train[split_idx:, :]
t_test = t_train[:split_idx]
t_train = t_train[split_idx:]
if train:
# Exercise small
# epochs = 1
# batch_size = 2
# learning_rate = 0.02
#
# x_train, t_train = np.array([[1, -2], [1, -2]]), np.array([[0], [0]])
#
# network = Network(
# FullyConnected(2, 2, w=np.array([[-1, 1], [0.67, -0.67]]), b=np.array([2, -3])),
# Sigmoid(),
# FullyConnected(2, 2, w=np.array([[1, -0.33], [1, 0.67]]), b=np.array([1, -4])),
# Sigmoid(),
# FullyConnected(2, 1, w=np.array([[.67], [-1.3]]), b=np.array([.5])),
# Sigmoid()
# )
# Exercise delivery_data
# import pandas as pd
#
# # load the data from the Logistic Regression module
# delivery_data = pd.read_csv("../ml_hci_ws20/03_logistic_regression/data/delivery_data.csv")
# epochs = 2000
# batch_size = 1
# learning_rate = 0.01
# alpha = 0
# split_idx = 10
# # data_size x input_size; data_size x class_size
# x_train, t_train = delivery_data[["motivation", "distance"]].values, delivery_data["delivery?"].values
# t_train = np.reshape(t_train, (-1, 1))
#
# x_test = x_train[:split_idx, :]
# x_train = x_train[split_idx:, :]
# t_test = t_train[:split_idx, :]
# t_train = t_train[split_idx:, :]
#
# network = Network(
# FullyConnected(2, 2),
# Sigmoid(),
# FullyConnected(2, 2),
# Sigmoid(),
# FullyConnected(2, 1),
# Sigmoid()
# )
# MNIST
# network = load_network('MNIST_accuracy_0.9427_confidence_0.8521_lr_0.001_a_0.001_epochs_7')
# network = Network(
# FullyConnected(784, 100),
# Sigmoid(),
# FullyConnected(100, 50),
# Sigmoid(),
# FullyConnected(50, 10)
# )
# Gestures
network = Network(
FullyConnected(676, 300),
Sigmoid(),
FullyConnected(300, 100),
Sigmoid(),
FullyConnected(100, 2)
)
print("Training for " + str(epochs) + " epochs with " + str(batch_size) + " batches and a learning rate of " + str(
learning_rate) + ":")
t = time.time()
loss = Trainer(
network,
cce_loss,
AdamOptimizer(learning_rate, alpha)
).train(x_train, t_train, epochs, batch_size)
print("Training done in " + str(time.time() - t) + " seconds with final Loss of " + str(loss[len(loss) - 1]))
plt.plot(loss)
plt.ylabel("loss")
plt.xlabel("epochs")
plt.title("Batch-size: " + str(batch_size) + " Learning-rate: " + str(learning_rate))
plt.show()
else:
network = load_network('MNIST_accuracy_0.9094_confidence_0.6992')
avg_tps, avg_confidence, correct_wrong_predictions = network.test(x_test, t_test)
if save:
save_network(network, 'G_accuracy_' + str(round(avg_tps, 4)) + '_confidence_' + str(round(avg_confidence, 4)) +
'_lr_' + str(learning_rate) + '_a_' + str(alpha) + '_epochs_' + str(epochs))
# for i in range(len(frames)):
# frame = frames[i]
# error = 0
# for j in range(-check_range, check_range):
# if i+j >= len(frames) or i+j < 0 or frames[i+j]-j == frame:
# error += 1
# error /= 2*check_range+1
# if error >= 0.6:
# frame_indices.append(frame/total_length)
#
# extract_frames("./data/demo_video.mp4", frame_indices)
print("Accuracy: " + str(avg_tps))
print("Confidence: " + str(avg_confidence))
np.set_printoptions(precision=4)
for i in range(len(correct_wrong_predictions[0])):
correct = len(correct_wrong_predictions[0][i])
wrong = len(correct_wrong_predictions[1][i])
print("Class " + str(i) + "\n" + str(correct) + " correct and " + str(wrong) + " wrong predictions -> " +
str(100 * wrong / (correct + wrong)) + " % wrong.")
| true |
32cea1f6ec41e8053b85864a7480d51da0e5ef7f | Python | raspibrick/install | /BrickGate.py | UTF-8 | 18,038 | 2.578125 | 3 | [] | no_license | # BrickGate.py
from raspibrick import *
from threading import Thread
import BrickGateProperties
import socket
import os, subprocess
def debug(msg):
if BrickGateProperties.DEBUG:
print "BG debug-> " + msg
def showOled(text, lineNum, fontSize, indent, clear = False):
if oled == None:
return
if clear:
oled.clear()
oled.setText(text, lineNum, fontSize, indent)
# Response
class Error():
OK = "0"
SEND_FAILED = "-1"
ILLEGAL_METHOD = "-2"
ILLEGAL_INSTANCE = "-3"
CMD_ERROR = "-4"
CREATION_FAILED = "-5"
class Reply():
OK = "OK"
ILLEGAL_DEVICE = "ILLEGAL DEVICE"
ILLEGAL_IDENTIFIER = "ILLEGAL IDENTIFIER"
DEVICE_NOT_CREATED = "DEVICE NOT CREATED"
ILLEGAL_COMMAND = "ILLEGAL COMMAND"
METHOD_EVAL_FAILED = "METHOD EVAL FAILED"
ILLEGAL_PARAMETER = "ILLEGAL PARAMETER"
NO_SUCH_METHOD = "NO SUCH METHOD"
CHAR_NOT_DISPLAYABLE = "CHAR NOT DISPLAYABLE"
ILLEGAL_DECIMAL_POINT = "ILLEGAL DECIMAL POINT"
ILLEGAL_DIGIT = "ILLEGAL DIGIT"
IMAGE_CAPTURE_FAILED = "IMAGE CAPTURE FAILED"
# ---------------------- class JavaRunner ---------------------------
class JavaRunner(Thread):
def __init__(self, app):
Thread.__init__(self)
self.app = app
self.start()
def run(self):
robot.isButtonHit() # dummy call to reset buttonHit flag
print "Spawning user app " + self.app
rc = subprocess.call(["sudo", "java", "-jar", self.app])
# return value not used yet
print "Returning from MyApp with exit code:", rc
# ---------------------- class SocketHandler ------------------------
class SocketHandler(Thread):
def __init__(self, conn):
Thread.__init__(self)
self.conn = conn
def run(self):
debug("SocketHandler started")
global flasher
global isConnected
isRunning = True
while isRunning:
cmd = ""
try:
debug("Calling blocking conn.recv()")
cmd = self.conn.recv(1024)[:-1]
except:
debug("exception in conn.recv()") # happens when connection is reset from the peer (Java Console closed)
break
debug("Received cmd: " + cmd + " len: " + str(len(cmd)))
if len(cmd) == 0:
break
rc = self.executeCommand(cmd)
if rc:
isRunning = False
conn.close()
robot.exit()
delay(2000)
print "Client disconnected. Waiting for next client..."
showOled("Client disconnected", 0, 12, 0, True)
showOled("Waiting for next", 2, 12, 0, False)
showOled("connecting client...", 3, 12, 0, False)
flasher = Flasher(led, [0, 100, 0])
flasher.start()
display.showText("HOLd")
isConnected = False
robot.setButtonEnabled(True)
Tools.debug("SocketHandler terminated")
def executeCommand(self, cmd):
debug("Calling executeCommand() with cmd: " + cmd)
# max command length: device, method, param1, param2, param3, param4, param5
reply = Reply.OK
parts = cmd.split(".") # Split on period
if len(parts) < 2 or len(parts) > 7:
self.showError(Error.CMD_ERROR, cmd)
self.sendReply(Reply.ILLEGAL_COMMAND)
return
if len(parts) == 2:
parts.append("n")
parts.append("n")
parts.append("n")
parts.append("n")
parts.append("n")
elif len(parts) == 3:
parts.append("n")
parts.append("n")
parts.append("n")
parts.append("n")
elif len(parts) == 4:
parts.append("n")
parts.append("n")
parts.append("n")
elif len(parts) == 5:
parts.append("n")
parts.append("n")
elif len(parts) == 6:
parts.append("n")
device = parts[0]
method = parts[1]
param1 = parts[2].replace("`", ".") # . is used as separator
param2 = parts[3].replace("`", ".")
param3 = parts[4].replace("`", ".")
param4 = parts[5].replace("`", ".")
param5 = parts[6].replace("`", ".")
return self.dispatchCommand(device, method, param1, param2, param3, param4, param5)
def dispatchCommand(self, device, method, param1, param2, param3, param4, param5):
debug("dispatchCommand: " + device + ", " + method + ", " + param1 + ", " + param2 + \
", " + param3 + ", " + param4 + ", " + param5)
reply = Reply.OK
isExiting = False
# ------------------- device 'robot' ---------------
if device == "robot":
r = RobotInstance.getRobot()
if method == "getVersion":
reply = SharedConstants.VERSION
elif method == "initSound":
r.initSound(param1, int(param2))
elif method == "playSound":
r.playSound()
elif method == "fadeoutSound":
r.playSound(int(param1))
elif method == "stopSound":
r.stopSound()
elif method == "pauseSound":
r.pauseSound()
elif method == "resumeSound":
r.resumeSound()
elif method == "rewindSound":
r.rewindSound()
elif method == "isSoundPlaying":
if r.isSoundPlaying():
reply = "1"
else:
reply = "0"
elif method == "getIPAddresses":
reply = ", ".join(r.getIPAddresses())
elif method == "exit":
isExiting = True
elif method == "getCurrentDevices": # show all current devices in devices dictionary
if len(devices) == 0:
reply = "NO DEVICES"
else:
reply = ", ".join(devices.keys())
elif method == "isButtonHit":
if robot.isButtonHit():
reply = "1"
else:
reply = "0"
else:
reply = Reply.NO_SUCH_METHOD
# ------------------- device 'gear' or 'uss' ---------------
elif device == "gear" or device == "uss" or device == "cam" or device == "beeper" or device == "oled":
if method == "create":
if not device in devices:
if device == "gear":
devices[device] = Gear()
elif device == "uss":
devices[device] = UltrasonicSensor()
elif device == "cam":
devices[device] = Camera()
elif device == "beeper":
devices[device] = Beeper(int(param1))
elif device == "oled":
devices[device] = OLED1306()
else:
if device == "oled":
devices[device].clear()
devices[device].setFontSize(10)
devices[device].setInverse(False)
else:
if not device in devices:
reply = Reply.DEVICE_NOT_CREATED
else:
reply = evaluate(device, method, param1, param2, param3, param4, param5)
elif device == "display":
reply = dispatchDisplay(device, method, param1, param2, param3, param4, param5)
# ------------------- static device 'led' -----------
elif device == "led":
if method == "setColorAll":
if param1 == "n" or param2 == "n" or param3 == "n":
reply = Reply.ILLEGAL_PARAMETER
else:
try:
Led.setColorAll(int(param1), int(param2), int(param3))
except ValueError:
reply = Reply.ILLEGAL_PARAMETER
elif method == "clearAll":
Led.clearAll()
else:
reply = Reply.NO_SUCH_METHOD
# ------------------- devices with identifier -----------
elif len(device) > 3:
devName = device[0:3]
if devName == "mot" or \
devName == "irs" or \
devName == "led" or \
devName == "svo" or \
devName == "lss":
try:
id = int(device[3:4])
except ValueError:
reply = Reply.ILLEGAL_IDENTIFIER
else:
if method == "create":
if not device in devices:
if devName == "mot":
devices[device] = Motor(id)
elif devName == "irs":
devices[device] = InfraredSensor(id)
elif devName == "led":
devices[device] = Led(id)
elif devName == "lss":
devices[device] = LightSensor(id)
elif devName == "svo":
devices[device] = ServoMotor(id, int(param1), int(param2))
else:
pass # already created
else:
if not device in devices:
reply = Reply.DEVICE_NOT_CREATED
else:
reply = evaluate(device, method, param1, param2, param3, param4, param5)
# ------------------- illegal device ----------------------------
else:
reply = Reply.ILLEGAL_DEVICE
self.sendReply(reply)
return isExiting
def sendReply(self, reply):
Tools.debug("Reply: " + reply)
self.conn.sendall(reply + "\n")
def sendImage(self, img):
Tools.debug("Send Image size " + str(len(img)))
self.conn.sendall(img)
def showError(self, msg1, msg2):
print "Error #" + msg1 + " : " + msg2
display.showText("E" + msg1, [0, 1, 1])
def dispatchDisplay(device, method, param1, param2, param3, param4, param5):
reply = "OK"
if method == "create":
if not device in devices:
devices[device] = Display()
else:
pass # already created
else:
if not device in devices:
reply = Reply.DEVICE_NOT_CREATED
else:
display = devices[device]
if method == "clear":
display.clear()
elif method == "showText":
if param2 == "n":
display.showText(param1)
else:
stm = "display.showText(" + "\"" + param1 + "\", " + param2 + ", [" + param3 + "])"
debug("eval statement: " + stm)
eval(stm)
elif method == "scrollToLeft":
return str(display.scrollToLeft())
elif method == "scrollToRight":
return str(display.scrollToRight())
elif method == "setToStart":
display.setToStart()
elif method == "stopTicker":
display.stopTicker()
elif method == "isTickerAlive":
rc = display.isTickerAlive()
if rc:
return "1"
return "0"
elif method == "isBlinkerAlive":
rc = display.isBlinkerAlive()
if rc:
return "1"
return "0"
elif method == "showTicker":
if param4 == "0":
display.showTicker(param1, int(param2), int(param3), False)
else:
display.showTicker(param1, int(param2), int(param3), True)
elif method == "showBlinker":
if param5 == "0":
stm = "display.showBlinker(" + "\"" + param1 + "\"" + ", [" + param2 + "], " + str(param3) + ", " + str(param4) + ", False)"
else:
stm = "display.showBlinker(" + "\"" + param1 + "\"" + ", [" + param2 + "], " + str(param3) + ", " + str(param4) + ", True)"
debug("eval statement: " + stm)
eval(stm)
else:
reply = Reply.NO_SUCH_METHOD
return reply
def evaluate(device, method, param1, param2, param3, param4, param5):
dev = devices[device] # Get device reference
rc = None
if param1 == "n":
stm = "dev." + method + "()"
elif param2 == "n":
stm = "dev." + method + "(" + param1 + ")"
elif param3 == "n":
stm = "dev." + method + "(" + param1 + ", " + param2 + ")"
elif param4 == "n":
if method == "captureAndSave":
param3 = "'" + param3 + "'" # String parameter
stm = "dev." + method + "(" + param1 + ", " + param2 + ", " + param3 + ")"
elif param5 == "n":
stm = "dev." + method + "(" + param1 + ", " + param2 + ", " + param3 + ", " + param4 + ")"
else:
if method == "startBlinker":
param1 = "'" + param1 + "'" # String parameter
param2 = "'" + param2 + "'" # String parameter
stm = "dev." + method + "(" + param1 + ", " + param2 + ", " + param3 + ", " + param4 + ", " + param5 + ")"
debug("Statement: " + stm)
try:
rc = eval(stm)
debug("eval() returned: " + str(rc))
except:
debug("eval() failed")
return Reply.METHOD_EVAL_FAILED
if rc == None: # method with no return value
return Reply.OK
elif method == "captureJPEG" and rc == -1:
return Reply.IMAGE_CAPTURE_FAILED
else:
return str(rc) # convert return value to string
# ---------------------- Button callback ----------------------------
def onButtonEvent(event):
global isLongPressed
if not isButtonEnabled:
return
if event == BUTTON_PRESSED:
isLongPressed = False
elif event == BUTTON_LONGPRESSED:
isLongPressed = True
disconnect()
elif event == BUTTON_RELEASED:
if not isLongPressed:
if not isConnected:
javaApp = "/home/pi/programs/MyApp.jar"
if os.path.isfile(javaApp):
javaRunner = JavaRunner(javaApp)
else:
print "No Java app found to execute"
# ---------------------- class Flasher ------------------------------
class Flasher(Thread):
def __init__(self, led, color):
Thread.__init__(self)
self.led = led
self.color = color
self.isFlashing = True
def run(self):
while self.isFlashing:
self.led.setColor(self.color)
Tools.delay(50)
self.led.setColor(0, 0, 0)
count = 0
while count < 20 and self.isFlashing:
count += 1
Tools.delay(200)
debug("FlasherThread terminated")
def stop(self):
self.isFlashing = False
def setColor(self, color):
self.color = color
def blinkRed(led, nb):
for n in range(nb):
led.setColor(255, 0, 0)
Tools.delay(100)
led.setColor(0, 0, 0)
Tools.delay(200)
def delay(interval):
time.sleep(interval / 1000.0)
def toBin(n):
a = [n & 4 != 0, n & 2 != 0, n & 1 != 0]
for i in range(3):
if a[i]:
a[i] = 1
else:
a[i] = 0
return a
def disconnect():
global terminateServer
terminateServer = True
debug("Dummy connection...")
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client_socket.connect(('localhost', BrickGateProperties.IP_PORT)) # dummy connection to get out of accept()
# ====================== Main ======================================
print "Brickgate server V" + BrickGateProperties.BRICKGATE_VERSION + " starting"
isButtonEnabled = False
SharedConstants.BLINK_CONNECT_DISCONNECT = False
SharedConstants.PLAY_CONNECT_DISCONNECT = False
robot = Robot()
oled = robot.oled
Beeper().beep(2)
Led.clearAll()
led = Led(LED_LEFT)
flasher = Flasher(led, [0, 100, 0])
display = Display()
Tools.delay(3000)
isConnected = False
robot.addButtonListener(onButtonEvent)
serverSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serverSocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # close port when process exits
Tools.debug("Socket created")
# dictionary of devices: "device_tag":ref
devices = {}
HOSTNAME = "" # Symbolic name meaning all available interfaces
try:
serverSocket.bind((HOSTNAME, BrickGateProperties.IP_PORT))
except socket.error as msg:
print "Bind failed", msg[0], msg[1]
sys.exit()
serverSocket.listen(10)
terminateServer = False
display.showText("HOLd")
flasher.start()
isButtonEnabled = True
print "Waiting for a connecting client..."
showOled("Waiting for", 0, 12, 0, True)
showOled("connecting client...", 1, 12, 0, False)
while True:
# wait to accept a connection - blocking call
Tools.debug("Calling blocking accept()...")
conn, addr = serverSocket.accept()
print "Connected with client at " + addr[0]
if terminateServer:
showOled("Exiting", 2, 10, 0, True)
showOled("BrickGate server", 4, 12, 0, False)
break
showOled("Connected to", 2, 12, 0, False)
showOled(addr[0], 3, 12, 0, False)
if isConnected: # Accept only one connection
continue
flasher.stop()
display.showText("Conn")
Tools.delay(1500)
if oled != None:
oled.clear()
display.clear()
isConnected = True
devices.clear()
socketHandler = SocketHandler(conn)
socketHandler.setDaemon(True) # necessary to terminate it at program termination
socketHandler.start()
Beeper().beep(1)
display.clear()
flasher.stop()
blinkRed(led, 2)
serverSocket.close()
print "BrickGate terminated"
| true |
519d38a87fdc21b4c27cad804eb20e8cc179a69b | Python | tobias-fyi/vela | /cs/lambda_cs/01_intro_python/Sprint-Challenge--Intro-Python/src/cityreader/test_stretch.py | UTF-8 | 3,016 | 3.0625 | 3 | [
"MIT"
] | permissive | import unittest
from cityreader import City, cityreader, cityreader_stretch
def check_city(inp, exp):
if inp.name != exp.name:
return False
if inp.lat != exp.lat:
return False
if inp.lon != exp.lon:
return False
return True
class CityreaderTests(unittest.TestCase):
def setUp(self):
self.cities = cityreader()
def test_cityreader_stretch_correctness(self):
expected = [
City("Albuquerque", 35.1055,-106.6476),
City("Riverside", 33.9382,-117.3949),
City("San Diego", 32.8312,-117.1225),
City("Los Angeles", 34.114,-118.4068),
City("Las Vegas", 36.2288,-115.2603),
City("Denver", 39.7621,-104.8759),
City("Phoenix", 33.5722,-112.0891),
City("Tucson", 32.1558,-110.8777),
City("Salt Lake City", 40.7774,-111.9301)
]
inp = cityreader_stretch(45, -100, 32, -120, self.cities)
self.assertEqual(len(inp), len(expected))
for i in range(len(inp)):
self.assertTrue(check_city(inp[i], expected[i]))
inp = cityreader_stretch(32, -120, 45, -100, self.cities)
self.assertEqual(len(inp), len(expected))
for i in range(len(inp)):
self.assertTrue(check_city(inp[i], expected[i]))
expected = [
City("Richmond", 37.5294,-77.4755),
City("Virginia Beach", 36.7335,-76.0435),
City("Washington", 38.9047,-77.0163),
City("Orlando", 28.4801,-81.3448),
City("Miami", 25.784,-80.2102),
City("Tampa", 27.9937,-82.4454),
City("Jacksonville", 30.3322,-81.6749),
City("Albuquerque", 35.1055,-106.6476),
City("Fort Worth", 32.7813,-97.3466),
City("McAllen", 26.2203,-98.2457),
City("El Paso", 31.8478,-106.431),
City("Dallas", 32.7938,-96.7659),
City("Austin", 30.3038,-97.7545),
City("Houston", 29.7871,-95.3936),
City("San Antonio", 29.4722,-98.5247),
City("New Orleans", 30.0687,-89.9288),
City("Charlotte", 35.208,-80.8308),
City("Raleigh", 35.8323,-78.6441),
City("Memphis", 35.1047,-89.9773),
City("Nashville", 36.1714,-86.7844),
City("Riverside", 33.9382,-117.3949),
City("San Diego", 32.8312,-117.1225),
City("Los Angeles", 34.114,-118.4068),
City("Las Vegas", 36.2288,-115.2603),
City("Denver", 39.7621,-104.8759),
City("Atlanta", 33.7627,-84.4231),
City("Indianapolis", 39.7771,-86.1458),
City("Oklahoma City", 35.4677,-97.5138),
City("Phoenix", 33.5722,-112.0891),
City("Tucson", 32.1558,-110.8777),
City("Baltimore", 39.3051,-76.6144),
City("Columbus", 39.9859,-82.9852),
City("Cincinnati", 39.1412,-84.506),
City("Saint Louis", 38.6358,-90.2451),
City("Kansas City", 39.1239,-94.5541),
City("Louisville", 38.1662,-85.6488)
]
inp = cityreader_stretch(40, -50, 12, -120, self.cities)
for i in range(len(inp)):
self.assertTrue(check_city(inp[i], expected[i]))
if __name__ == '__main__':
unittest.main() | true |
b9ebb4484e1587073a1453d92793be847b72b42f | Python | kafey/pythonthw | /ex15.py | UTF-8 | 520 | 3.828125 | 4 | [] | no_license | # using library sys function argv
from sys import argv
# argv use 2 argument script and filename
script, filename = argv
# open file .txt
txt = open(filename)
# print filename and use .read() to show content of the file
print "Here's your file %r" % filename
print txt.read()
# using raw_input to load filename
print "I'll also ask you to type it again:"
file_again = raw_input("> ")
# open file that name input by user
txt_again = open(file_again)
# show the content again with .read() function
print txt_again.read()
| true |
5eae806b40a6b78e63075ab68b5ba9ed37c50577 | Python | cjiang94/INF3331-Python | /assignment4/assignment4/package/my_integrators/integrator.py | UTF-8 | 615 | 3.375 | 3 | [] | no_license | #!/usr/bin/env python
def integrate(f, a, b, N):
""" Method to find the sum of integral by given function, start(a), end(b) and
N points intervals"""
result = 0
dx = (b-a)/N
temp = a
for i in range(N+1):
result += f(temp) * dx
temp += dx
return result
def midpoint_integrate(f, a, b, N):
""" Method to find the sum of integral by given function, start(a), end(b) and
N points intervals, but with midpoint value"""
result = 0
dx = (b-a)/N
temp = a + (dx/2)
for i in range(N+1):
result += f(temp) * dx
temp += dx
return result
| true |
58dec7d15e204eb6e268fbc15f1ae01aaded4fb8 | Python | djrlj694/python-jillib | /jil_statement.py | UTF-8 | 3,882 | 3.046875 | 3 | [] | no_license | #!/opt/cloudera/parcels/Anaconda/bin/python
# -*- coding: utf-8 -*-
'''
FILE
‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
jil_statement.py
DECLARATION
‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
JILStatement(self, key, **kwargs)
DESCRIPTION
‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
This class generates strings that conform to the syntax of JIL (Job
Information Language) script subcommand and arguments statements,
both of which can be expressed as colon-separated key/value pairs
(i.e., <KEY>:<VALUE>).
REFERENCES
‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
1. https://docops.ca.com/ca-workload-automation-ae/11-4-2/en/scheduling/ae-scheduling/manage-jil/jil-syntax-rules
'''
### Class Declaration ###
class JILStatement(str):
'''
This class generates strings that conform to the syntax of JIL (Job
Information Language) script subcommand and arguments statements,
both of which can be expressed as colon-separated key/value pairs
(i.e., <KEY>:<VALUE>).
Args:
1. key: str
2. **kwargs: dict
Returns:
Instance of JILStatement object
'''
# This constructor follows the following JIL syntax rules:
# 1: Each subcommand uses the following form:
# sub_command:object_name
# 2: The attribute statements have the following form:
# attribute_keyword:value
def __new__(self, key, **kwargs):
if not isinstance(key, str):
raise TypeError('Argument "key" must be of type "str".')
if len(kwargs) == 0:
raise ValueError('At least one keyword argument must exist.')
value = kwargs.get('value', None) or kwargs.get(key, None)
return '' if not value else key + ': ' + str(value)
if __name__ == '__main__':
#pass
print('')
print('UNIT TESTS: PASS')
print('‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾')
print('')
insert_job = 'job1_name'
pass_dict = {'test_key1': 'test_value1', 'test_key2': 'test_value2'}
print(type(pass_dict))
print('TEST P1:')
pass1 = JILStatement('insert_job', value=insert_job)
print(pass1)
print('')
print('TEST P2:')
pass2 = JILStatement('test_key1', **pass_dict)
print(pass2)
print('')
print('TEST P3:')
pass3 = JILStatement('insert_job', key1=insert_job, key2='foo')
print(pass3)
print('')
print('UNIT TESTS: FAIL')
print('‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾')
print('')
print('TEST F1:')
fail1 = JILStatement('insert_job')
print(fail1)
print('TEST F2:')
fail1 = JILStatement('insert_job', 'foo')
print(fail1)
| true |
3b13eccd3824f2ca4ff40274f675fa1a5c93f3e0 | Python | VieetBubbles/holbertonschool-higher_level_programming | /0x0B-python-input_output/8-load_from_json_file.py | UTF-8 | 332 | 3.4375 | 3 | [] | no_license | #!/usr/bin/python3
"""
The 8-load_from_json_file module.
"""
def load_from_json_file(filename):
"""
function that creates an Object from a “JSON file”.
Args:
filename - the file to use to create an object
"""
from json import loads
with open(filename, 'r') as f:
return loads(f.read())
| true |
b752f3ed44cd0bcebeb4ebe5e424820344b8b9fb | Python | junhao69535/pycookbook | /chapter1/del_and_keep_order.py | UTF-8 | 1,094 | 4.15625 | 4 | [] | no_license | #!coding=utf-8
"""
删除序列相同元素并保持顺序
"""
# 怎么再一个序列上面保持元素顺序同时消除重复的值
# 如果序列上的值都是hashable类型,直接利用集合和生成器即可
def dedupe(items):
seen = set()
for item in items:
if item not in seen:
yield item
seen.add(item)
a = [1, 5, 2, 1, 9, 1, 5, 10]
print list(dedupe(a))
# 如果消除的元素不可哈希,要改成:
def dedupe2(items, key=None):
seen = set()
for item in items:
val = item if key is None else key(item)
if val not in seen:
yield val
seen.add(val)
# 这里key参数指定了一个函数,将序列元素转换成hashable类型
b = [{'x': 1, 'y': 2}, {'x': 1, 'y': 3}, {'x': 1, 'y': 2}, {'x': 2, 'y': 4}]
print list(dedupe2(b, lambda d: (d['x'], d['y'])))
# 如果仅仅想消除重复元素,不需要保持顺序,直接使用set()即可
print set(a)
# 也可以用于文件消除重复行
with open('somefile.txt') as f:
for line in dedupe(f):
print line, | true |
fb2c324970bb851d60d43e92e0f9f2437070e5df | Python | yegorkowalew/findcolor | /togroup.py | UTF-8 | 1,219 | 2.65625 | 3 | [] | no_license | import os, time
import glob
import win32com.client
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
in_folder = 'in'
out_folder = 'out'
searh_folder = '%s\\**\\*.xlsx' % os.path.join(BASE_DIR, out_folder)
files_list = glob.glob(searh_folder, recursive=True)
def fileUpdate(xl, files_list):
new_xlsx_file_path = os.path.join(BASE_DIR, 'Готовый.xlsx')
neew_wb = xl.Workbooks.Add()
to_sheet = neew_wb.Worksheets('Лист1')
for xlsFileName in files_list:
wb = xl.Workbooks.Open(xlsFileName)
xl.Visible = True
xl.DisplayAlerts = False
sheet = wb.Worksheets('Техоперации совмещенные')
sheet.Visible = True
sheet.Activate()
# sheet.Range("A1:A10").copy
sheet.Range("A1:K5").Copy(to_sheet.Range("A7:K11"))
time.sleep(10)
neew_wb.SaveAs(new_xlsx_file_path)
def worker():
xl = win32com.client.DispatchEx("Excel.Application")
fileUpdate(xl, files_list)
xl.Quit()
del xl
print('Все файлы обработаны. Программа закроется автоматически через 3сек.')
print('Powered by Yegor Kowalew')
if __name__ == "__main__":
worker() | true |
b4800c3077c06c4f1c4a4b9e165ae02e0a735217 | Python | nerdia/python_play_CL | /stripWavs.py | UTF-8 | 4,992 | 3.1875 | 3 | [] | no_license | '''
The following script will remove silence on the edges of a sound file.
It assumes that there is only one utterance per file. Thus, if there are
multiple utterances in a file, it will isolate the longest one (and remove
everything else as silence).
The threshold value is quite arbitrary however, it worked for me for a corpus
of over 60 minutes of data divided across about 15 speakers, both male and
female.
Tim Mahrt
2010
timmahrt@gmail.com
'''
import os
import numpy
import math
from scipy.io import wavfile
# quick test comment
# The smaller the modifier, the smaller the sampling size (1.0 = 1 second)
rateModifier = 1.0 / 16.0
# The value for which to split silence from noise
rmsThreshold = 350.0
def rootMeanSquare(lst):
# With a numpy array, every element in the list can be manipulated with
# a single instruction.
# e.g. numpy.array([1,2,3])**2 -> [1, 4, 9]
array = numpy.array(lst)
return math.sqrt( sum(array**2) / len(lst) )
def findSequences(lst):
sequenceList = []
currentSequence = []
prevValue = 0.1 # An impossible value since we deal with integers
firstIterFlag = True
for value in lst:
if firstIterFlag: # First time through loop
currentSequence = [value,]
firstIterFlag = False
elif value == prevValue + 1: # We are still in the current sequence
currentSequence.append(value)
prevValue = value
else: # The last sequence finished, start a new sequence
sequenceList.append(currentSequence)
currentSequence = [value,]
prevValue = value
if currentSequence != []:
sequenceList.append(currentSequence)
return sequenceList
def findLongestSublist(listOfLists):
longestList = []
for lst in listOfLists:
if len(lst) > len(longestList):
longestList = lst
return longestList
def getWavDuration(fn):
samplingRate, readData = wavfile.read(fn)
return float(len(readData)) / samplingRate
def isolateAudioString(fn):
# Read in the data
# (wavfiles are stored as 16 bit integers but for our rms calculations,
# we're going to need 32 bit integers)
samplingRate, readData = wavfile.read(fn)
readData = list(numpy.int32(readData))
path, name = os.path.split(fn)
# Break the data into equal sized chunks
chunkSize = int(math.ceil(samplingRate * rateModifier))
numChunks = int(len(readData) / chunkSize)
readDataList = []
for i in xrange(numChunks):
readDataList.append( readData[i*chunkSize:(i+1)*chunkSize] )
# Gather the rms of each chunk
rmsValueList = [rootMeanSquare(vector) for vector in readDataList]
# Create a list of indices to non-silence segments
indexList = []
for i, value in enumerate(rmsValueList):
if value > rmsThreshold:
print value
indexList.append(i)
# Find the longest continuous segment of noise--assume it is the utterance
utteranceIndexList = findLongestSublist( findSequences(indexList) )
# Gather the signal for the utterance indicies
outDataList = []
try:
startIndex = utteranceIndexList[0]
except IndexError:
print "No utterance detected for %s" % (os.path.split(path)[1] + '/' + name)
return
endIndex = utteranceIndexList[-1]
# Output warning -- no silence on the left edge (likely to have been clipped)
if startIndex != 0:
startIndex -= 1
else:
print "%s on the left edge" % (os.path.split(path)[1] + '/' + name)
# Output warning -- no silence on the right edge (likely to have been clipped)
if endIndex + 1 != len(readDataList):
endIndex += 1
else:
print "%s on the right edge" % (os.path.split(path)[1] + '/' + name)
for index in range(startIndex, endIndex+1):
outDataList.append(readDataList[index])
# Output data
path = os.path.join(path, "new")
if not os.path.exists(path):
os.mkdir(path)
outputList = []
for lst in outDataList:
outputList.extend(lst)
wavfile.write(os.path.join(path, name), samplingRate, numpy.int16(numpy.array(outputList)))
if __name__ == "__main__":
# getWavDuration("/home/tmahrt2/Desktop/prosody/A7/03_Newman_scares_Leann.wav")
rootPath = '/home/tmahrt2/Desktop/prosody_stimuli/'
leafList = os.listdir(rootPath)
leafList.sort()
for fn in os.listdir(rootPath):
# Skip non-wave files
if ".wav" not in fn:
continue
fullFN = os.path.join(rootPath, fn)
# Skip directories (this shouldn't be necessary...)
if os.path.isdir(fullFN):
continue
try:
isolateAudioString(fullFN)
except Exception, e:
print e
print "Exception caught for %s" % (rootPath + '/' + fn)
raise
| true |
5c5b74f9a854f2af0f5e8aa686359193a408821e | Python | KaustubhLall/ECE-USC-Server | /slack/modules/meow.py | UTF-8 | 407 | 3.15625 | 3 | [] | no_license | from PyDictionary import PyDictionary
dictionary = PyDictionary()
def process(words):
ret = ''
for word in words:
ret += '-' * 80 + '\n'
ret += 'Word: %-15s' % word + '\n'
ret += '-' * 80 + '\n'
ret += '\n Meanings: ' + str(dictionary.meaning(word))
ret += "\n\t Synonyms: " + str(dictionary.synonym(word))
ret += '\n'
print(ret)
return ret
| true |
75f531edbb9044523c269ffb33d1ab6699e2a7e7 | Python | LIU-FAYANG/ROSBAG-DATA-EXTRACTION | /delay.py | UTF-8 | 710 | 3.03125 | 3 | [] | no_license | import os
print("Delay ==> ")
delay = input()
delay = int(delay)
path = "2020-11-18-16-38-31/2020-11-18-16-38-31_txt"
filename = "2020-11-18-16-38-31/2020-11-18-16-38-31-txt"
try:
os.mkdir(path)
except OSError:
print ("Creation of the directory %s failed" % path)
else:
print ("Successfully created the directory %s " % path)
tmp = os.listdir(filename)
No_of_files = len(os.listdir(filename))
for j in range(0,No_of_files):
index = int(tmp[j].strip(".txt"))
oldindex = filename + "/"+ tmp[j]
newindex = path + "/" + str(index-delay) + '.txt'
os.rename(oldindex,newindex)
print("Rename: "+tmp[j]+" ==> " +str(index-delay) + '.txt')
os.rmdir(filename)
print("Rename Over.")
| true |
52595f58cdadfecf58fd49c9d5323f4204005631 | Python | RajaSekar1311/Data-Science-and-Visualization-KITS | /Data Visualization/FillMissingValues.py | UTF-8 | 1,523 | 3.046875 | 3 | [] | no_license | '''
#Creating a data frame from CSV file
import pandas
import numpy
#reading the data from a csv file using read_csv() method
MyDataFrame = pandas.read_csv('KITS-MissingValuesDataSet.csv')
print(MyDataFrame)
print(UpdatedDataFrame)
'''
'''
#Creating a data frame from CSV file
import pandas
import numpy
#reading the data from a csv file using read_csv() method
MyDataFrame = pandas.read_csv('MissingValues_GCET_Dataset.csv')
print(MyDataFrame)
UpdatedDataFrame=MyDataFrame.interpolate(method ='linear', limit_direction ='forward')
print(UpdatedDataFrame)
'''
import pandas
import numpy
MyDataFrame = pandas.DataFrame(numpy.random.randn(5,3),
index = ['a','c','e','f','h'],
columns = ['One','Two','Three'])
MyDataFrame = MyDataFrame.reindex(['a','b','c','d','e','f','g','h'])
print('\nActual DataFrame:\n',MyDataFrame)
UpdatedDataFrame=MyDataFrame.interpolate(method ='linear', limit_direction ='forward')
print(UpdatedDataFrame)
'''
#reading the data from a csv file using read_csv() method
#MyDataFrame = pandas.read_csv('StudentDataWithMissingValues.csv')
#UpdatedDataFrame = MyDataFrame.fillna(0)
#UpdatedDataFrame = MyDataFrame.fillna(method='pad')
#UpdatedDataFrame = MyDataFrame.fillna(method='bfill')
#UpdatedDataFrame = MyDataFrame['DEPARTMENT'].fillna('CSE',inplace = False)
#UpdatedDataFrame = MyDataFrame.replace(numpy.nan, value = -999)
#UpdatedDataFrame=MyDataFrame.interpolate(method ='linear', limit_direction ='forward')
''' | true |
31f0d46bc2ce1614747b51b7b69afcc771c2eccb | Python | kaoulis/clusteredDQN | /code/ClusteredMemory.py | UTF-8 | 6,635 | 2.921875 | 3 | [] | no_license | import math
import random
from collections import deque
import numpy as np
class CleverMemory():
def __init__(self, memory_size, batch_size, clusters):
self.buffer = []
self.memory_size = memory_size
self.batch_size = batch_size
self.clusters = clusters
# Dynamic clustering while adding the experience
def add(self, experience):
# create clusters of size memory_size/ total_clusters until max number of clusters
if len(self.buffer) < self.clusters:
self.buffer.append(deque(maxlen=round(self.memory_size / self.clusters)))
print('Cluster ', len(self.buffer), ' has been created!')
for sub_buf in self.buffer:
if not sub_buf: # if empty
sub_buf.append(experience)
return
# Pass the current experience in a np.array
s = (np.squeeze(experience[0][0])).tolist()
a = [experience[1]]
r = [experience[2]]
sp = (np.squeeze(experience[3][0])).tolist()
done = [experience[4]]
list1 = np.array(s + a + r + sp + done)
# Pass the experiences of the center of each cluster in a np.array and measure the distance with the current
# exp.
best_sub_buffer_dist = 99999
best_sub_buffer = 0
i = 0
for sub_buf in self.buffer:
mem = np.array([sub_buf[i] for i in range(len(sub_buf))]).tolist()
s = (np.squeeze(mem[0][0][0])).tolist()
a = [mem[0][1]]
r = [mem[0][2]]
sp = (np.squeeze(mem[0][3][0])).tolist()
done = [mem[0][4]]
list2 = np.array(s + a + r + sp + done)
dist = np.linalg.norm(list1 - list2)
if dist < best_sub_buffer_dist:
best_sub_buffer = i
best_sub_buffer_dist = dist
i += 1
self.buffer[best_sub_buffer].append(experience)
# Uniform sampling each cluster.
def sample(self):
batches = deque(maxlen=self.batch_size)
for sub_buf in self.buffer:
batches += random.sample(sub_buf, int(self.batch_size / self.clusters))
batch = np.array([batches[i] for i in range(len(batches))]).T.tolist()
state = np.array(np.squeeze(batch[0]), dtype=np.float32)
action = np.array(batch[1], dtype=np.int8)
reward = np.array(batch[2], dtype=np.float32)
state_prime = np.array(np.squeeze(batch[3]), dtype=np.float32)
done = batch[4]
return state, action, reward, state_prime, done
# Normalised reversed weighted average sampling
def weighted_sample(self, state):
batches = deque(maxlen=self.batch_size)
cur_state = np.squeeze(state)
total_dist = 0
total_weights = 0
logs = []
for sub_buf in self.buffer: # get total distance
mem = np.array([sub_buf[i] for i in range(len(sub_buf))]).tolist()
s = np.squeeze(mem[0][0][0])
total_dist += np.linalg.norm(cur_state - s)
for sub_buf in self.buffer: # get total weights
mem = np.array([sub_buf[i] for i in range(len(sub_buf))]).tolist()
s = np.squeeze(mem[0][0][0])
total_weights += (1 - ((np.linalg.norm(cur_state - s)) / total_dist))
for sub_buf in self.buffer: # sample with inverse weights
mem = np.array([sub_buf[i] for i in range(len(sub_buf))]).tolist()
s = np.squeeze(mem[0][0][0])
weight = 1 - ((np.linalg.norm(cur_state - s)) / total_dist)
inversed_weight = weight / total_weights
logs.append([inversed_weight, int(inversed_weight * self.batch_size)])
batches += random.sample(sub_buf, (int(inversed_weight * self.batch_size)))
batch = np.array([batches[i] for i in range(len(batches))]).T.tolist()
state = np.array(np.squeeze(batch[0]), dtype=np.float32)
action = np.array(batch[1], dtype=np.int8)
reward = np.array(batch[2], dtype=np.float32)
state_prime = np.array(np.squeeze(batch[3]), dtype=np.float32)
done = batch[4]
# print(logs)
return state, action, reward, state_prime, done
# Reciprocal weighted average sampling
def weighted_sampleV2(self, state):
batches = deque(maxlen=self.batch_size)
cur_state = np.squeeze(state)
total_dist = 0
logs = []
for sub_buf in self.buffer: # get total distance
mem = np.array([sub_buf[i] for i in range(len(sub_buf))]).tolist()
s = np.squeeze(mem[0][0][0])
total_dist += (1 / np.linalg.norm(cur_state - s))
for sub_buf in self.buffer: # sample with inverse weights
mem = np.array([sub_buf[i] for i in range(len(sub_buf))]).tolist()
s = np.squeeze(mem[0][0][0])
weight = ((1 / np.linalg.norm(cur_state - s)) / total_dist)
logs.append([weight, int(weight * self.batch_size)])
batches += random.sample(sub_buf, (int(weight * self.batch_size)))
batch = np.array([batches[i] for i in range(len(batches))]).T.tolist()
state = np.array(np.squeeze(batch[0]), dtype=np.float32)
action = np.array(batch[1], dtype=np.int8)
reward = np.array(batch[2], dtype=np.float32)
state_prime = np.array(np.squeeze(batch[3]), dtype=np.float32)
done = batch[4]
# print(logs)
return state, action, reward, state_prime, done
# Beta dynamic clustering only for the state of the experience.
def addV2(self, experience):
if len(self.buffer) < self.clusters:
self.buffer.append(deque(maxlen=round(self.memory_size / self.clusters)))
print('Cluster ', len(self.buffer), ' has been created!')
for sub_buf in self.buffer:
if not sub_buf: # if empty
sub_buf.append(experience)
return
current_s = (np.squeeze(experience[0][0]))
best_sub_buffer_dist = 99999
best_sub_buffer = 0
i = 0
for sub_buf in self.buffer:
mem = np.array([sub_buf[i] for i in range(len(sub_buf))]).tolist()
cluster_s = (np.squeeze(mem[0][0][0]))
dist = np.linalg.norm(current_s - cluster_s)
if dist < best_sub_buffer_dist:
best_sub_buffer = i
best_sub_buffer_dist = dist
i += 1
self.buffer[best_sub_buffer].append(experience)
| true |
92f59fea5428a258d06d12ebf96a35ad2e0c9a63 | Python | ofisser86/jb-Tic-Tac-Toe-with-AI | /Problems/Squares/main.py | UTF-8 | 55 | 2.578125 | 3 | [] | no_license | def sq_sum(*args):
return sum(i * i for i in args)
| true |
bb08446cbb419c20d6b1e9d14bdbe16231b730e7 | Python | kszlak/API_testing_framework | /api_tests_framework/utils/unittest_wrappers/unittest_wrapper.py | UTF-8 | 1,602 | 2.765625 | 3 | [] | no_license | from unittest import TestCase
class ModelValidationTestCase(TestCase):
def assertModel(self, model, model_type):
self.assertTrue(model["createdAt"])
self.assertTrue( model["updatedAt"])
self.assertTrue( model["id"])
self.assertEqual( model["modelType"], model_type )
def assertUserModel(self, model):
# Check if keys exists (double checking)
"""self.assertIn( "createdAt", model )
self.assertIn( "updatedAt", model )
self.assertIn( "id", model )
self.assertIn( "modelType", model )
self.assertIn( "username", model )
self.assertIn( "firstname", model )
self.assertIn( "lastname", model )"""
self.assertModel(model=model, model_type="UserModel")
# Check if is not none
self.assertTrue( model["username"] )
self.assertTrue( model["firstname"] )
self.assertTrue( model["lastname"] )
def assertThreadModel(self, model):
self.assertModel( model=model, model_type="ThreadModel" )
self.assertTrue( model["name"] )
self.assertTrue( model["owner"] )
self.assertTrue( model["user"] )
self.assertIsNotNone( model["private"] )
#different example of validation
#self.assertIn(model["private"], [True, False])
#self.assertIsInstance(model["private"], bool)
self.assertIsNotNone( model["deleted"] )
def assertUsernameValidationModel(self, model):
self.assertModel( model=model, model_type="UsernameValidationModel" )
self.assertIsNotNone( model["errors"] )
| true |
d15571a5c68f88e67daca7d8cc8609551be9dae2 | Python | maria-lazar/education | /year2/semester2/ai/ml_neural_network/my_neural_network.py | UTF-8 | 6,646 | 2.953125 | 3 | [] | no_license | import random
import matplotlib.pyplot as plt
import numpy as np
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def der(x):
return x * (1 - x)
def softmax(y):
exp = [np.exp(i) for i in y]
s = sum(exp)
return [i / s for i in exp]
class MyNeuralNetwork:
def __init__(self, nr_input, nr_output, hidden_layers=(2,), learning_rate=0.1, iterations=100):
self.learning_rate = learning_rate
self.iterations = iterations
self.nr_input = nr_input
self.nr_output = nr_output
self.weights = []
self.weights.append(np.random.rand(hidden_layers[0], nr_input))
self.biases = []
self.biases.append(np.random.rand(hidden_layers[0], 1))
for i in range(1, len(hidden_layers)):
mat = np.random.rand(hidden_layers[i], hidden_layers[i - 1])
bias = np.random.rand(hidden_layers[i], 1)
self.weights.append(mat)
self.biases.append(bias)
self.weights.append(np.random.rand(nr_output, hidden_layers[len(hidden_layers) - 1]))
self.biases.append(np.random.rand(nr_output, 1))
def activate(self, func, mat):
vfunc = np.vectorize(func)
return vfunc(mat)
def train(self, inputs, outputs):
# feed forward
input_mat = np.array(inputs)
layer_results = []
layer_input = input_mat.reshape((input_mat.shape[0], 1))
input_mat = layer_input
for i in range(len(self.weights)):
mat = self.weights[i]
bias = self.biases[i]
result_mat = mat.dot(layer_input)
result_mat = result_mat + bias
if i == len(self.weights) - 1:
result_mat = np.array(softmax(result_mat.flatten()))
result_mat = result_mat.reshape((result_mat.shape[0], 1))
else:
result_mat = self.activate(sigmoid, result_mat)
layer_input = result_mat
layer_results.append(result_mat)
predicted = layer_input.flatten()
# error
out = np.array(outputs)
layer_error = predicted - out
layer_error = layer_error.reshape((layer_error.shape[0], 1))
# backpropagation
errors = self.compute_errors(layer_error)
self.update_weights(errors, layer_results, input_mat)
def compute_errors(self, layer_error):
errors = []
layer_nr = len(self.weights)
for i in range(layer_nr - 1, -1, -1):
error_index = layer_nr - 1 - i
# compute layer error
if i == layer_nr - 1:
errors.append(layer_error)
else:
tran = self.weights[i + 1].transpose()
errors.append(tran.dot(errors[error_index - 1]))
return errors
def update_weights(self, errors, layer_results, input_mat):
layer_nr = len(self.weights)
for i in range(layer_nr - 1, -1, -1):
error_index = layer_nr - 1 - i
# update layer weights
if i == layer_nr - 1:
gradients = errors[error_index]
else:
gradients = self.activate(der, layer_results[i])
gradients = gradients * errors[error_index]
gradients = gradients * self.learning_rate
self.biases[i] = self.biases[i] - gradients
if i != 0:
input_tran = layer_results[i - 1].transpose()
else:
input_tran = input_mat.transpose()
gradients = gradients.dot(input_tran)
self.weights[i] = self.weights[i] - gradients
def fit(self, inputs, outputs):
progress = []
prev_error = 1
not_improving = 0
indexes = [i for i in range(len(inputs))]
for i in range(self.iterations):
train_indexes = random.sample(indexes, len(indexes))
train_sample_inp = [inputs[i] for i in train_indexes]
train_sample_out = [outputs[i] for i in train_indexes]
for j in range(len(train_sample_inp)):
self.train(train_sample_inp[j], train_sample_out[j])
iteration_error = self.mean_square_error(train_sample_out, self.predict_probab(train_sample_inp))
# iteration_error = self.cross_entropy_loss(train_sample_out, self.predict_probab(train_sample_inp))
if prev_error - iteration_error < 0.00001:
not_improving += 1
else:
not_improving = 0
prev_error = iteration_error
print("Iteration {} error: {}".format(i, iteration_error))
progress.append(iteration_error)
plt.plot(progress)
plt.ylabel('Cost')
plt.xlabel('Iteration')
plt.show()
def square_error(self, real, computed):
return sum([(computed[i] - real[i]) ** 2 for i in range(len(computed))]) / len(computed)
def mean_square_error(self, real, computed):
rmse = []
for i in range(len(real[0])):
r = [real[j][i] for j in range(len(real))]
c = [computed[j][i] for j in range(len(computed))]
val = self.square_error(r, c)
rmse.append(val)
return sum([i for i in rmse]) / len(rmse)
def predict_probab(self, inputs):
result = []
for i in range(len(inputs)):
result.append(self.predict_sample(inputs[i]))
return result
def predict_sample(self, inputs_sample):
input_mat = np.array(inputs_sample)
layer_input = input_mat.reshape((input_mat.shape[0], 1))
for i in range(len(self.weights)):
mat = self.weights[i]
bias = self.biases[i]
result_mat = mat.dot(layer_input)
result_mat = result_mat + bias
if i == len(self.weights) - 1:
result_mat = np.array(softmax(result_mat.flatten()))
result_mat = result_mat.reshape((result_mat.shape[0], 1))
else:
result_mat = self.activate(sigmoid, result_mat)
layer_input = result_mat
return layer_input.flatten()
def predict(self, inputs):
probab = self.predict_probab(inputs)
results = []
for i in range(len(probab)):
index = np.where(probab[i] == (max(probab[i])))[0]
results.append(index.tolist()[0])
return results
@staticmethod
def cross_entropy_loss(real, computed):
sum = 0
for i in range(len(computed)):
for j in range(len(computed[i])):
sum += -real[i][j] * np.log(computed[i][j] + 1e-15)
return sum / len(computed)
| true |
362811eba5872c639db4f922d7461431680b5279 | Python | P79N6A/tools-2 | /display.py | UTF-8 | 609 | 2.515625 | 3 | [] | no_license | #!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
import string
from itertools import ifilter,imap
from collections import OrderedDict
#import ujson as json
import json
def lines(f):
for line in ifilter(lambda x: len(x)>0, imap(string.strip, f)):
data = json.loads(line, object_hook=OrderedDict)
#data = json.loads(line, object_pairs_hook=OrderedDict)
#data = json.loads(line)
yield data
for data in lines(sys.stdin):
#print json.dumps(data)
#print json.dumps(data, ensure_ascii=False).encode("utf-8")
print json.dumps(data, ensure_ascii=False, indent=4)
| true |
aaa1ffc6bb188de088ae1d59fac935a0536db700 | Python | aliseforlgh/first | /first/CenterForm.py | UTF-8 | 808 | 2.84375 | 3 | [] | no_license | # QDesktopWidget
import sys
from PyQt5.QtWidgets import QApplication,QMainWindow,QDesktopWidget
# from PyQt5.QtGui import QIcon
class CenterForm(QMainWindow):
def __init__(self):
super(CenterForm, self).__init__()
self.setWindowTitle('窗口居中')
self.resize(800,600)
self.status = self.statusBar()
self.status.showMessage('五秒钟的记忆',5000)
self.center()
def center(self):
screen = QDesktopWidget() .screenGeometry()
size = self.geometry()
new_left = (screen.width() - size.width())/2
new_top = (screen.height() - self.height())/2
self.move(new_left,new_top)
if __name__ == '__main__':
app = QApplication(sys.argv)
center = CenterForm()
center.show()
sys.exit(app.exec_())
| true |
fd2442f3e07b44c07bcce1c7770058f9d1a6e230 | Python | antiDigest/mlBucket | /Assignment5/CrossValidation.py | UTF-8 | 942 | 2.84375 | 3 | [
"MIT"
] | permissive | import pandas as pd
from sklearn.metrics import accuracy_score, precision_score, recall_score, classification_report, f1_score
from sklearn.model_selection import cross_val_score, StratifiedKFold
def crossValidation(clf, X, Y, cv=10):
skf = StratifiedKFold(n_splits=cv)
skf.get_n_splits(X, Y)
accuracy = 0
f1 = 0
precision = 0
for train_index, test_index in skf.split(X, Y):
# print("TRAIN:", train_index, "TEST:", test_index)
X_train, xtest = X[train_index], X[test_index]
y_train, ytest = Y[train_index], Y[test_index]
clf.fit(X_train, y_train)
Y_pred = clf.predict(xtest)
accuracy += accuracy_score(ytest, Y_pred)
f1 += f1_score(ytest, Y_pred, average='weighted')
precision += precision_score(ytest, Y_pred, average='weighted')
return {"accuracy": accuracy / cv,
"f1_score": f1 / cv,
'precision': precision / cv}
| true |
faea08adf38ac1761cdab2c7607b2d11d197e218 | Python | Trustworthy-Software/Reproduction-of-Android-Malware-detection-approaches | /mamadroid/PackAbs.py | UTF-8 | 456 | 2.625 | 3 | [] | no_license |
def PackAbs(call,pos):
partitions=call.split('.')
package=''
for i in range (0,len(partitions)):
if partitions[i] in pos[i]:
package=package+partitions[i]+'.'
else:
if package=="" or package=='com':
package=None
else:
pointflag=0
while pointflag==0:
if package.endswith('.'):
package=package[0:-1]
else:
pointflag=1
break
if package=='com':
package=None
break
return package
| true |
d8cdadfd812c0b98b796543207d55400c23040d1 | Python | kripamurugan/PLab | /18.PY | UTF-8 | 620 | 4.5 | 4 | [] | no_license | print("***********************SQUARE**********************")
n=int(input("Enter value of side :"))
x = lambda a : a * a
print("Area of square is : ")
print(x(n))
print("*******************TRIANGLE**********************")
n1=int(input("Enter value of height (h) :"))
n2=int(input("Enter value of base (b) :"))
x1 = lambda a,b : (a * b)/2
print("Area of square is : ")
print(x1(n1,n2))
print("*******************RETANGLE**********************")
n3=int(input("Enter value of length (l) :"))
n4=int(input("Enter value of width (w) :"))
x2 = lambda a,b : a * b
print("Area of square is : ")
print(x2(n3,n4))
| true |
9522793119ab0db74078e29a8df16b4f42594d94 | Python | sparks-baird/xtal2png | /tests/decoding_test.py | UTF-8 | 2,708 | 2.515625 | 3 | [
"MIT"
] | permissive | """Test decoding functionality using default kwargs and assert matches."""
from os import path
from PIL import Image
from xtal2png.core import XtalConverter
from xtal2png.utils.data import assert_structures_approximate_match, example_structures
def test_arrays_to_structures():
xc = XtalConverter()
data, id_data, id_mapper = xc.structures_to_arrays(example_structures)
structures = xc.arrays_to_structures(data, id_data, id_mapper)
assert_structures_approximate_match(
example_structures, structures, tol_multiplier=2.0
)
return structures
def test_arrays_to_structures_zero_one():
xc = XtalConverter()
data, id_data, id_mapper = xc.structures_to_arrays(
example_structures, rgb_scaling=False
)
structures = xc.arrays_to_structures(data, id_data, id_mapper, rgb_scaling=False)
assert_structures_approximate_match(
example_structures, structures, tol_multiplier=2.0
)
return structures
def test_arrays_to_structures_single():
xc = XtalConverter()
data, id_data, id_mapper = xc.structures_to_arrays([example_structures[0]])
structures = xc.arrays_to_structures(data, id_data, id_mapper)
assert_structures_approximate_match(
[example_structures[0]], structures, tol_multiplier=2.0
)
return structures
def test_png2xtal():
xc = XtalConverter()
imgs = xc.xtal2png(example_structures, show=True, save=True)
decoded_structures = xc.png2xtal(imgs)
assert_structures_approximate_match(
example_structures, decoded_structures, tol_multiplier=2.0
)
def test_png2xtal_single():
xc = XtalConverter()
imgs = xc.xtal2png([example_structures[0]], show=True, save=True)
decoded_structures = xc.png2xtal(imgs, save=False)
assert_structures_approximate_match(
[example_structures[0]], decoded_structures, tol_multiplier=2.0
)
return decoded_structures
def test_png2xtal_rgb_image():
xc = XtalConverter()
imgs = xc.xtal2png(example_structures, show=False, save=False)
imgs = [img.convert("RGB") for img in imgs]
decoded_structures = xc.png2xtal(imgs)
assert_structures_approximate_match(
example_structures, decoded_structures, tol_multiplier=2.0
)
return decoded_structures
def test_png2xtal_from_saved_images():
xc = XtalConverter()
xc.xtal2png(example_structures, show=False, save=True)
fpaths = [path.join(xc.save_dir, savename + ".png") for savename in xc.savenames]
saved_imgs = [Image.open(fpath) for fpath in fpaths]
decoded_structures = xc.png2xtal(saved_imgs)
assert_structures_approximate_match(
example_structures, decoded_structures, tol_multiplier=2.0
)
| true |
a208c9aa7beb3e9b5c7fec436ce656c2f7bede13 | Python | joketeng/LeetCode | /36.py | UTF-8 | 918 | 2.703125 | 3 | [] | no_license | class Solution:
def isValidSudoku(self, board):
"""
:type board: List[List[str]]
:rtype: bool
"""
size = 9
map = {str(i) for i in range(1,10)}
rows = [set() for _ in range(9)]
cols = [set() for _ in range(9)]
boxs = [set() for _ in range(9)]
print(map)
print(cols)
for r in range(size):
for c in range(size):
dight = board[r][c]
if dight == '.':
continue
if dight not in map:
return False
box = (size//3) * (r//(size//3)) + c//(size//3)7
if dight in rows[r] or dight in cols[c] or dight in boxs[box]:
return False
rows[r].add(dight)
cols[c].add(dight)
boxs[box].add(dight)
return True | true |
ec6ac85e2760671dfb82ac20ea9db3a47bdf7db9 | Python | carlosraoni/CombinatorialOptimization | /liftandproject/maxclique.py | UTF-8 | 4,193 | 2.828125 | 3 | [] | no_license | import sys
import cplex
from cplex._internal._matrices import SparsePair
from liftandprojectcuts import generate_lift_and_project_cuts
import time
__MAX_ITER = 10 # Max number of iterations for the cutting plane algorithm
# method to read instance files
def read_instance(file_path):
with open(file_path) as instance:
line_fields = instance.readline().split()
while line_fields[0] == 'c':
line_fields = instance.readline().split()
n = int(line_fields[2])
m = int(line_fields[3])
edges = []
for i in range(m):
line_fields = instance.readline().split()
edges.append((int(line_fields[1]) - 1, int(line_fields[2]) - 1))
return n, m, edges
# Parse argument
if len(sys.argv) < 2:
print 'Usage: ', sys.argv[0],' instance'
exit(1)
start_time = time.time()
n, m, edges = read_instance(sys.argv[1])
print 'n =', n
print 'm = ', m
print edges
edges_set = set(edges)
master_prob = cplex.Cplex()
x = []
# Create problem variables
for i in range(n):
var_index = master_prob.variables.get_num()
var_name = 'x_' + str(i)
x.append(var_index)
#master_prob.variables.add(obj = [1.0], lb = [0.0], ub = [1.0], types = ['B'], names = [var_name])
master_prob.variables.add(obj = [1.0], lb = [0.0], ub = [1.0], names = [var_name])
# Create problem constraints
for i in range(n):
for j in range(i+1, n):
if (i,j) in edges_set or (j, i) in edges_set:
continue
vars = [x[i], x[j]]
coefs = [1.0, 1.0]
master_prob.linear_constraints.add(lin_expr = [cplex.SparsePair(vars, coefs)], senses = ['G'], rhs = [1.0], names = ['C_'+str(i)+'_'+str(j)])
# Cutting plane loop
iteration = 0
previous_obj = 0.0
while iteration < __MAX_ITER:
print "-------------------------- Iteration", iteration, "------------------------------------------------"
# Save current model to a file
#master_prob.write('./output/problem_clq_'+str(iteration)+'.lp')
# Optimize current model
master_prob.solve()
solution = master_prob.solution
current_obj = solution.get_objective_value()
print "Cpx Solution status: " , solution.status[solution.get_status()]
print "Cpx Objective value: " , current_obj
print "Max Clique Upper Bound: ", n - current_obj
x_values = solution.get_values(x)
print "Solution: ", x_values
print
print
print 'Current Execution Time:', time.time() - start_time, 'seconds'
if abs(previous_obj - current_obj) < 1e-9:
break
previous_obj = current_obj
print 'Running lift and project separation'
iteration_cuts = generate_lift_and_project_cuts(master_prob)
if len(iteration_cuts) == 0:
print "No cut found! Finishing Algorithm!"
print "---------------------------------------------------------------------------------------"
break
print "Adding cuts to the master problem"
deepest_cut = iteration_cuts[0]
for cut in iteration_cuts:
if cut['violation'] > deepest_cut['violation']:
deepest_cut = cut
master_prob.linear_constraints.add(lin_expr = [cplex.SparsePair(cut['vars'], cut['coefs'])], senses = [cut['sense']], rhs = [cut['rhs']])
#master_prob.linear_constraints.add(lin_expr = [cplex.SparsePair(deepest_cut['vars'], deepest_cut['coefs'])], senses = [deepest_cut['sense']], rhs = [deepest_cut['rhs']])
print "---------------------------------------------------------------------------------------"
iteration += 1
# Optimize last model
master_prob.write('./output/master_problem_clq.lp')
master_prob.solve()
#master_prob.variables.set_types([(var_name, 'B') for var_name in master_prob.variables.get_names()])
#master_prob.solve()
current_obj = solution.get_objective_value()
print
print 'Final Solution:'
print
solution = master_prob.solution
print "\tMin Set Cover Lower Bound: " , current_obj
print "\tMax Clique Upper Bound: ", n - current_obj
x_values = solution.get_values(x)
print "\tSolution: ", x_values
print
print 'Total Execution Time:', time.time() - start_time, 'seconds' | true |
ee86613a12a402e914b7e61a05fac1141385ab94 | Python | realsidg/NLP_ayodhyaVerdict | /EDA.py | UTF-8 | 3,200 | 3.15625 | 3 | [] | no_license | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
data = pd.read_pickle('dtm.pkl')
data = data.transpose()
data.head()
# In[2]:
top_dict = {}
for c in data.columns:
top = data[c].sort_values(ascending=False).head(30)
top_dict[c]= list(zip(top.index, top.values))
top_dict
# In[3]:
for channel, top_words in top_dict.items():
print(channel)
print(', '.join([word for word, count in top_words[0:14]]))
print('---')
# In[4]:
from collections import Counter
# Let's first pull out the top 30 words for each comedian
words = []
for comedian in data.columns:
top = [word for (word, count) in top_dict[comedian]]
for t in top:
words.append(t)
words
# In[5]:
Counter(words).most_common()
# In[6]:
add_stop_words = [word for word, count in Counter(words).most_common() if count > 6]
add_stop_words
# In[16]:
import nltk
from nltk.corpus import stopwords
nltk.download('stopwords')
stop_words = set(stopwords.words('english'))
# In[17]:
add_stop_words=stop_words
add_stop_words
# In[18]:
from sklearn.feature_extraction import text
from sklearn.feature_extraction.text import CountVectorizer
# Read in cleaned data
data_clean = pd.read_pickle('data_clean.pkl')
# Add new stop words
stop_words = text.ENGLISH_STOP_WORDS.union(add_stop_words)
# Recreate document-term matrix
cv = CountVectorizer(stop_words=stop_words)
data_cv = cv.fit_transform(data_clean.transcript)
data_stop = pd.DataFrame(data_cv.toarray(), columns=cv.get_feature_names())
data_stop.index = data_clean.index
# Pickle it for later use
import pickle
pickle.dump(cv, open("cv_stop.pkl", "wb"))
data_stop.to_pickle("dtm_stop.pkl")
# In[19]:
# Let's update our document-term matrix with the new list of stop words
from sklearn.feature_extraction import text
from sklearn.feature_extraction.text import CountVectorizer
# Read in cleaned data
data_clean = pd.read_pickle('data_clean.pkl')
# Add new stop words
stop_words = text.ENGLISH_STOP_WORDS.union(add_stop_words)
# Recreate document-term matrix
cv = CountVectorizer(stop_words=stop_words)
data_cv = cv.fit_transform(data_clean.transcript)
data_stop = pd.DataFrame(data_cv.toarray(), columns=cv.get_feature_names())
data_stop.index = data_clean.index
# Pickle it for later use
import pickle
pickle.dump(cv, open("cv_stop.pkl", "wb"))
data_stop.to_pickle("dtm_stop.pkl")
# In[20]:
# Let's make some word clouds!
# Terminal / Anaconda Prompt: conda install -c conda-forge wordcloud
from wordcloud import WordCloud
wc = WordCloud(stopwords=stop_words, background_color="white", colormap="Dark2",
max_font_size=150, random_state=42)
# In[22]:
# Reset the output dimensions
import matplotlib.pyplot as plt
plt.rcParams['figure.figsize'] = [16, 6]
full_names = ['ndtv', 'indiatoday', 'republic']
# Create subplots for each comedian
for index, comedian in enumerate(data.columns):
wc.generate(data_clean.transcript[comedian])
plt.subplot(3, 4, index+1)
plt.imshow(wc, interpolation="bilinear")
plt.axis("off")
plt.title(full_names[index])
plt.show()
# There isnt much to conclude from here so lets jump to Sentiment analysis
| true |
b6bdea72820b2092d2aae2ceea100d5eef247720 | Python | Kzne/DDoS-discord-bot | /iplookup.py | UTF-8 | 929 | 2.640625 | 3 | [] | no_license | import sys
import json
import urllib.request as urllib2
import fileinput
import os
fo = open("iplookup.txt", "w")
ip = sys.argv[1]
url = 'http://ip-api.com/json/'
response = urllib2.urlopen('http://ip-api.com/json/'+ip+"?fields=31194943")
data = response.read()
data = str(data)
data = data.replace(',', '\n')
data = data.replace('"', ' ')
data = data.replace("'{", '')
data = data.replace("}'", '')
data = data.replace("Inc.", ' ')
data = data.replace("Inc", ' ')
data = data.replace(':', ':arrow_right:')
fo.write(data)
fo.close()
def remove_empty_lines(filename):
if not os.path.isfile(filename):
print("{} does not exist ".format(filename))
return
with open(filename) as filehandle:
lines = filehandle.readlines()
with open(filename, 'w') as filehandle:
lines = filter(lambda x: x.strip(), lines)
filehandle.writelines(lines)
remove_empty_lines("iplookup.txt")
| true |
e4b7b1aaac8822da0fbb3e92eb5fb2e13271d41f | Python | noahtigner/UO-ComputerScience-DataScience | /CIS 211 - CS II/Proj 5:6 - sudoku-master/sdk_io.py | UTF-8 | 1,121 | 3.3125 | 3 | [
"MIT"
] | permissive | """
Reading and writing Sudoku boards. We use the minimal
subset of the SadMan Sudoku ".sdk" format,
see http://www.sadmansoftware.com/sudoku/faq19.php
Author: M Young, January 2018
"""
import sdk_board
import typing
from typing import List, Union
import sys
from io import IOBase
class InputError(Exception):
pass
def read(f: Union[IOBase, str], board: sdk_board.Board=None) -> sdk_board.Board:
"""Read a Sudoku board from a file. Pass in a path
or an already opened file. Optionally pass in a board to be
filled.
"""
if isinstance(f, str):
f = open(f, "r")
if board is None:
board = sdk_board.Board()
values = []
for row in f:
row = row.strip()
values.append(row)
if len(row) != 9:
raise InputError("Puzzle row wrong length: {}"
.format(row))
if len(values) != 9:
raise InputError("Wrong number of rows in {}"
.format(values))
board.set_tiles(values)
return board
def write(board: sdk_board.Board, f: IOBase=sys.stdout):
"""Print the board"""
| true |
ef32a1444b7921b32b8b884910d489800f94db44 | Python | chien-wei/LeetCode | /0056_Merge_Intervals.py | UTF-8 | 1,469 | 3.28125 | 3 | [] | no_license | # Definition for an interval.
# class Interval:
# def __init__(self, s=0, e=0):
# self.start = s
# self.end = e
class Solution:
def merge(self, intervals):
"""
:type intervals: List[Interval]
:rtype: List[Interval]
"""
if len(intervals) < 1:
return []
intervals.sort(key=lambda x: x.start)
ans = [intervals.pop(0)]
for i in intervals:
if ans[-1].end >= i.start:
ans[-1].end = max(ans[-1].end, i.end)
else:
ans.append(i)
return ans
# 2019/03/06 update:
class Solution:
def merge(self, intervals: List[Interval]) -> List[Interval]:
tuple_for_sort = [(interval.start, interval.end, i, interval) for i, interval in enumerate(intervals)]
tuple_for_sort.sort()
sorted_intervals = list(map(lambda tuple2: tuple2[3], tuple_for_sort))
res = []
start, end = None, None
for interval in sorted_intervals:
if start == None:
start = interval.start
end = interval.end
continue
if interval.start > end:
res.append([start, end])
start = interval.start
end = interval.end
elif interval.start <= end:
end = max(end, interval.end)
if start != None:
res.append([start, end])
return res | true |
eb044fed3d8a0afa2d98eb1165e52f59b96c23b4 | Python | davek44/utility | /isoforms_fpkm.py | UTF-8 | 2,019 | 2.765625 | 3 | [] | no_license | #!/usr/bin/env python
from optparse import OptionParser
################################################################################
# isoforms_fpkm.py
#
# Print the FPKM values for all isoforms of the given gene.
################################################################################
################################################################################
# main
################################################################################
def main():
usage = 'usage: %prog [options] <gene_id> <iso_ft>'
parser = OptionParser(usage)
#parser.add_option()
(options,args) = parser.parse_args()
if len(args) != 2:
parser.error('Must provide a gene_id and isoforms.fpkm_tracking file')
else:
gene_id = args[0]
iso_ft = args[1]
# get headers
fpkm_in = open(iso_ft)
headers = fpkm_in.readline().split()
# determine sample table length
sample_len = 0
for i in range(len(headers)):
if headers[i][-5:] == '_FPKM':
sample = headers[i][:-5]
if len(sample) > sample_len:
sample_len = len(sample)
for line in fpkm_in:
a = line.split('\t')
a[-1] = a[-1].rstrip()
tracking_id = a[0]
line_gene_id = a[3]
if line_gene_id == gene_id:
i = 9
while i < len(a):
sample = headers[i][:-5]
if a[i+3] in ['FAIL','HIDATA']:
cols = (tracking_id, sample_len, sample, a[i+3])
print '%-18s %*s %11s' % cols
else:
fpkm = float(a[i])
cols = (tracking_id, sample_len, sample, fpkm)
print '%-18s %*s %11.3f' % cols
i += 4
fpkm_in.close()
################################################################################
# __main__
################################################################################
if __name__ == '__main__':
main()
| true |
4d583bf6f6df41d3e4ad3a6deb47650809cdd70a | Python | sputnikpetrinets/project_sputnik | /txt_convert.py | UTF-8 | 1,603 | 2.8125 | 3 | [] | no_license | import convert
class WConverterTxt(convert.WConvert):
"""
Converter class takes a PetriNetData instance and
write a file of given format, in this case: .txt
"""
def save(self, outfile):
"""
Save the current working SPN to a file of a given name
in the current dir
"""
import re
import datetime
# Produce and write txt file
dt = re.compile(r"dtype")
f = open(outfile, 'w')
f.write('## ~ File produced by SPNlib on %s ~ ##\n\n'\
% str(datetime.datetime.now().strftime("%d-%m-%Y at %H:%M")))
for j in self.vdict.keys():
# Only write attributes that exist
if self.vdict[j] is not None:
g = re.search(r"dtype", repr(self.vdict[j]))
if g:
# Handle ndarray's habit of adding dtype
value = repr(self.vdict[j]).replace('\n','')
f.write(j + " = " + value[6:g.start()-9].replace(' ','')\
.replace('],',']') +'\n')
else:
value = repr(self.vdict[j]).replace('\n','')
f.write(j + " = " + value[6:-1].replace(' ','') + '\n')
for i in self.mdict.keys():
if self.mdict[i] is not None:
value = repr(self.mdict[i]).replace('\n','')
if len(i) < 5:
i = i + ' '*(5 % len(i))
f.write(i + " = " + value[7:-1].replace(' ','')\
.replace('],[','],\n\t [') + '\n')
| true |
b85b4df3f2ea69537f5aa8f4dc95810e11a028b5 | Python | YashKarthik/wifi_Network_recon | /ip.py | UTF-8 | 531 | 2.875 | 3 | [] | no_license | import nmap
class Network(object):
def __init__(self):
ip = input("enter ip: ")
self.ip = ip
def networkscanner(self):
network = self.ip + '/24'
print("Scanning------->")
nm = nmap.PortScanner()
nm.scan(hosts=network, arguments='-sn')
host_list = [(x, nm[x]['status']['state']) for x in nm.all_hosts()]
for host, status in host_list:
print("Host\t{}".format(host))
if __name__ == '__main__':
D = Network()
D.networkscanner()
| true |
538546fe71032296c5824d3a615126031883b7dc | Python | RussoMarioDamiano/LatentDirichletAllocation_FromScratch | /train_lda.py | UTF-8 | 488 | 2.515625 | 3 | [] | no_license | import pandas as pd
from lda import LDA
df = pd.read_pickle("df.pkl")
punctuation = set("""!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~""")
instances = [[lemma for lemmatized_sentence in lemmatized_speech for lemma in lemmatized_sentence if lemma not in punctuation]
for lemmatized_speech in df.lemmas]
K = 50
beta = 0.01
epochs = 10000
lda = LDA(num_topics=K, corpus=instances, alpha=50/K, beta=beta, epochs=epochs, no_below=9, no_above=0.7)
pd.to_pickle(lda, "lda.pkl")
| true |
53ddeb0480a3c0f1bef41e3b9dd0b2e6ab3711ff | Python | nitiruen/RPA | /readexcel.py | UTF-8 | 1,670 | 2.640625 | 3 | [] | no_license | from openpyxl import load_workbook
excelfile = load_workbook('product.xlsx')
allshseets = excelfile.sheetnames
sheet = excelfile[allshseets[0]]
result = []
count = len(sheet['A']) #เลือก column ไหนก็ได้มานับว่ามีกี่แถว
for i in range(2,count+1):
a = sheet.cell(row=i, column=1).value
b = sheet.cell(row=i, column=2).value
c = sheet.cell(row=i, column=3).value
data = [a,b,c]
result.append(data)
############
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import wikipedia
import time
#ต้องโหลด selenium แล้วเอา chromedriver มาวางไว้ใน folder ของ python หากไท้ได้วางให้ใส่ path ในวงเว็บ chrome แทน
driver = webdriver.Chrome()
url_login = 'http://uncle-machine.com/login/'
driver.get(url_login)
username = driver.find_element_by_id('username')
username.send_keys('uuu@gmail.com')
password = driver.find_element_by_id('password')
password.send_keys('123')
button = driver.find_element_by_xpath('/html/body/div[2]/form/button')
button.click()
time.sleep(2)
#fill data in product page
url_addproduct = 'http://uncle-machine.com/addproduct/'
driver.get(url_addproduct)
for res in result:
name = driver.find_element_by_id('name')
name.send_keys(res[0])
price = driver.find_element_by_id('price')
price.send_keys(res[2])
detail = driver.find_element_by_id('detail')
detail.send_keys(res[1])
button = driver.find_element_by_xpath('/html/body/div[2]/form/button')
button.click()
time.sleep(1)
| true |
1ed7a5bf545f593c64723aa96657121d8f4be565 | Python | shadyelia/Playing-with-python | /Problem_Solving/Prime_number.py | UTF-8 | 921 | 3.84375 | 4 | [] | no_license | import math
def is_prime_v1(n):
"""return 'True' if 'n' is a prime number , 'False' otherwise"""
if n == 1:
return False
for d in range(2, n):
if n % d == 0:
return False
return True
def is_prime_v2(n):
"""return 'True' if 'n' is a prime number , 'False' otherwise"""
if n == 1:
return False
max_divisor = math.floor(math.sqrt(n))
for d in range(2, 1+max_divisor):
if n % d == 0:
return False
return True
def is_prime_v3(n):
"""return 'True' if 'n' is a prime number , 'False' otherwise"""
if n == 1:
return False
if n == 2:
return True
if n > 2 and n % 2 == 0:
return False
max_divisor = math.floor(math.sqrt(n))
for d in range(2, 1+max_divisor, 2):
if n % d == 0:
return False
return True
for n in range(1, 1000):
print(n, is_prime_v3(n))
| true |
e00f240bd308b4326f01ccc5bc35d8fc9443295f | Python | alainkhz/python-gui | /random-x-y.py | UTF-8 | 1,805 | 3.25 | 3 | [] | no_license | from tkinter import*
from tkinter import font
import random # 隨機資料
import pyperclip # 剪貼簿
#////////////////////////////////////////////////////////////////#
# 視窗
win = Tk()
win.title("X Y Generator")
win.geometry("600x370+600+200")
win.config(bg="#272727")
win.iconbitmap("C:\game\icon.ico")
# title lable
title_lable = Label(text="X Y Generator")
title_lable.config(fg="skyblue", bg="#272727", font="微軟正黑體 30")
title_lable.pack()
# min lable
min_range = Label(text="Min range")
min_range.config(fg="white", bg="#272727", font="微軟正黑體 20")
min_range.pack()
# min entry
min_entry = Entry()
min_entry.pack()
#space lable
space_lable= Label(bg="#272727")
space_lable.pack()
# max lable
max_range = Label(text="Max range")
max_range.config(fg="white", bg="#272727", font="微軟正黑體 20")
max_range.pack()
# min entry
max_entry = Entry()
max_entry.pack()
#show X Y
x_show = Label(text="X:",fg="white", bg="#272727", font="微軟正黑體 15")
x_show.pack()
y_show = Label(text="Y:",fg="white", bg="#272727", font="微軟正黑體 15")
y_show.pack()
#按鈕邏輯
def generator_xy ():
min = int(min_entry.get())
max = int(max_entry.get())
x = str(random.randint(min, max))
y = str(random.randint(min, max))
x_show.config(text="X:"+x)
y_show.config(text='Y:'+y)
#複製按鈕
def copy():
xy = x_show.cget("text")+" "+ y_show.cget("text")
pyperclip.copy(xy)
#button generator
button_generator = Button(text="generator", command= generator_xy)
button_generator.config(width=10, height=2)
button_generator.pack()
#space2 lable
space_lable2= Label(bg="#272727")
space_lable2.pack()
#button copy
button_copy = Button(text="copy", command= copy)
button_copy.config(width=10, height=2)
button_copy.pack()
win.mainloop()
| true |
8fdb6a6aede82db0f2a90991d225c62d7a8502b9 | Python | anirudhcodes/telegram_bots | /covid_bot.py | UTF-8 | 4,462 | 2.6875 | 3 | [] | no_license | #!/usr/bin/env python3
from telegram import Update
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters, CallbackContext
import time
import logging
from telegram_token_key import m_token
from covid.lib.errors import CountryNotFound
import covid_stats_plotter
# Enable logging
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO
)
logger = logging.getLogger(__name__)
# Define a few command handlers. These usually take the two arguments update and
# context. Error handlers also receive the raised TelegramError object in error.
def help(update: Update, _: CallbackContext) -> None:
update.message.reply_text('Help Menu:\n/repeat <seconds> to set a recurrence.\n/unset to cancel the recurrence\n/now to get the date at this moment\n/help to print this menu')
def echo(update, context):
if not hasattr(echo,"count"):
echo.count=0
if echo.count <= 0:
# context.bot.send_message(chat_id=update.effective_chat.id, text=update.message.text)
help(update, context)
echo.count = 5
else:
echo.count = echo.count - 1
def alarm(context: CallbackContext): #, country = "canada", state = "ontario") -> None:
"""Send the alarm message."""
job = context.job
state = "ontario"
country = "canada"
covid_stats_plotter.plotStateCases(state)
graph = open(covid_stats_plotter.outputImage, "rb")
context.bot.send_photo(job.context, graph)
graph.close()
covid_stats_plotter.plotCountryCases(country)
graph = open(covid_stats_plotter.outputImage, "rb")
context.bot.send_photo(job.context, graph)
graph.close()
def remove_job_if_exists(name: str, context: CallbackContext) -> bool:
"""Remove job with given name. Returns whether job was removed."""
current_jobs = context.job_queue.get_jobs_by_name(name)
if not current_jobs:
return False
for job in current_jobs:
job.schedule_removal()
return True
def get_once(update: Update, context: CallbackContext) -> None:
"""Add a job to the queue."""
try:
chat_id = update.message.chat_id
context.job_queue.run_once(alarm, 0, context=chat_id, name=str(chat_id))
# alarm(context)# , "canada", "ontario")
except (IndexError, ValueError):
update.message.repeat_timer('Usage: /now <country> <state>')
def repeat_timer(update: Update, context: CallbackContext) -> None:
"""Add a job to the queue."""
chat_id = update.message.chat_id
try:
# args[0] should contain the time for the timer in seconds
due = int(context.args[0])
if due < 0:
update.message.reply_text('Sorry we can not go back to future!')
return
job_removed = remove_job_if_exists(str(chat_id), context)
context.job_queue.run_repeating(alarm, due, context=chat_id, name=str(chat_id))
text = 'Timer successfully set!'
if job_removed:
text += ' Old one was removed.'
update.message.reply_text(text)
except (IndexError, ValueError):
update.message.reply_text('Usage: /repeat <seconds>')
def unset(update: Update, context: CallbackContext) -> None:
"""Remove the job if the user changed their mind."""
chat_id = update.message.chat_id
job_removed = remove_job_if_exists(str(chat_id), context)
text = 'Timer successfully cancelled!' if job_removed else 'You have no active timer.'
update.message.reply_text(text)
def main() -> None:
"""Run bot."""
# Create the Updater and pass it your bot's token.
updater = Updater(m_token)
# Get the dispatcher to register handlers
dispatcher = updater.dispatcher
# on different commands - answer in Telegram
dispatcher.add_handler(CommandHandler("start", help))
dispatcher.add_handler(CommandHandler("help", help))
dispatcher.add_handler(CommandHandler("now", get_once))
dispatcher.add_handler(CommandHandler("repeat", repeat_timer))
dispatcher.add_handler(CommandHandler("unset", unset))
dispatcher.add_handler(MessageHandler(Filters.text & (~Filters.command), echo))
# Start the Bot
updater.start_polling()
# Block until you press Ctrl-C or the process receives SIGINT, SIGTERM or
# SIGABRT. This should be used most of the time, since start_polling() is
# non-blocking and will stop the bot gracefully.
updater.idle()
if __name__ == '__main__':
main()
| true |
4b9164c886929a201a9455cbe760264df7560251 | Python | bumirbayev/python | /lsit4while.py | UTF-8 | 606 | 4.53125 | 5 | [] | no_license | # Problem 2
# Write a function that prints each element in the list on a separate line
"""
def print_list(lst):
i = 0
while i < len(lst):
print(lst[i])
"""
# if you will print_list[3, 4, 5], it will display all 3-s
"""
def list_numbers(n):
i = 0
new_list = []
while i <= n:
new_list.append(i)
i += 1
return 5
"""
def list_numbers(n):
i = 0
new_list = []
while i <= n:
if i % 2 == 0:
new_list.append(i)
i += 1
return new_list
# list_numbers(10) will print [0, 2, 4, 6, 8, 10]
| true |
9712b90a884b1133a1339a4ccb5d3fc2494ad0e7 | Python | icecreamlite/speech_command | /manual_command.py | UTF-8 | 1,411 | 2.609375 | 3 | [] | no_license | #!/home/b/projects/speech_command/venv/bin/python3
from tkinter import *
from subprocess import Popen
import sc
comm = 'acer '
scDir = '/home/b/projects/speech_command/'
def getComm(): #Popup Entry widget, save typewritten and exit when <Return>
root = Tk()
root.title('Acer SC')
root.resizable(0,0) #remove maximize button
ws = root.winfo_screenwidth() #get device screeen width
hs = root.winfo_screenheight() #get device screen height
root.geometry(f'+{ws//2-100}+{hs//2-50}') #initialize window position
def saveComm(event):
global comm
comm += event.widget.get().lower()
root.destroy()
ent = Entry(root, bd=3, bg='white', fg='black')
ent.pack()
ent.focus_set() #focus the entry to type
root.bind('<Return>', saveComm) #call execComm after pressing enter
root.mainloop()
global comm
cSplit = comm.split()
cLen = len(cSplit)
if cLen == 2:
if cSplit[1] == 'terminate':
Popen([scDir + 'bash_scripts/terminate.sh'])
elif cSplit[1] == 'restart':
Popen([scDir + 'bash_scripts/restart.sh', scDir])
else:
Popen([scDir + 'bash_scripts/notify_tts.sh', f'Failed: "{" ".join(cSplit[1:]).title()}" command does not exist', 'Failed'])
elif cLen > 2:
sc.executeVoiceCommand(cSplit)
comm = 'acer '
if __name__ == '__main__':
getComm() | true |
c82dd0347c9c7a260ec16ac4d8cd4d97b57a1870 | Python | QWQ-pixel/practice_python_2 | /num_14/14.1.py | UTF-8 | 299 | 3.015625 | 3 | [] | no_license | def recep():
num, string = int(input()), ''
for i in range(num):
wrd = input()
if 'лук' not in wrd:
string += wrd
if i < num - 1 and i != 0:
string += ', '
else:
string += '.'
print(string)
recep()
| true |
b4bc26af719710c4aa00c86d7ab217f5b4720c5e | Python | luiz158/Dica_Python_Linkedin | /25_Faker/faker_u.py | UTF-8 | 519 | 3.25 | 3 | [] | no_license | # instalar biblioteca(pip)
# pip install Faker
from faker import Faker
# Instanciando classe utilizada e configurando para pt-br
fake = Faker('pt_br')
print(fake.name(), '\n')
# Ana Lívia Pereira
print(fake.address())
# Ladeira Gonçalves, 90
# Cenaculo
# 81515-427 Castro da Mata / DF
print(fake.email())
# abarros@hotmail.com
print(fake.text())
# Culpa dicta veniam ipsum vero atque. Voluptatem animi natus et
# reprehenderit iusto perspiciatis. Eum enim consequuntur placeat beatae.
print(fake.phone_number())
# +55 (084) 3092-1293
| true |
87dc94e50fb0eeb65037f378ebb104510503211b | Python | ycAlex/algorithm020 | /Week1/trappingWaters.py | UTF-8 | 1,020 | 3.359375 | 3 | [] | no_license | #就是寻找V,这里也可能是W-可以理解为N个V叠加
#https://leetcode-cn.com/problems/trapping-rain-water/solution/jie-yong-zhan-lai-ji-lu-yi-ge-di-jian-de-shu-zu-by/
def trap(self, height: List[int]) -> int:
if len(height)<3:
return 0
#初始化结果,和一个记录当前的位置的指针
res,inx = 0,0
#一个栈来保存V的左侧部分
st = []
while(inx<len(height)):
#如果有可能的左侧部分,则看下一个进来的左侧部分的延续还是右侧部分(再次思考V)
while st and height[st[-1]]<height[inx]:
bottom = st.pop(-1)
if len(st)==0:
break
#如果能形成一个小V,计算这个小V的面积
newHeight = min(height[st[-1]],height[inx]) - height[bottom]
area = newHeight*(inx-st[-1]-1)
res+=area
st.append(inx)
inx+=1
return res
| true |
5fa14d1bdc010e47f628b8a972cbffcc13807899 | Python | spicecoaster/pycaribbean2017 | /Workshop/sample_blinky_blue.py | UTF-8 | 169 | 3.140625 | 3 | [] | no_license | import machine
import time
blue_led=machine.Pin(2, machine.Pin.OUT)
for i in range(10):
blue_led.low()
time.sleep(0.5)
blue_led.high()
time.sleep(0.5)
| true |
e53a812f48aab622fa587081f6493b470126effa | Python | timothyyu/python-practice | /python_for_everybody/py2_p4i_old/2.3 compute gross pay.py | UTF-8 | 683 | 4.1875 | 4 | [
"BSD-3-Clause"
] | permissive | #2.3 Write a program to prompt the user for hours and rate per hour
#using raw_input to compute gross pay. Use 35 hours and a rate of 2.75
#per hour to test the program (the pay should be 96.25). You should use
#raw_input to read a string and float() to convert the string to a number.
#Do not worry about error checking or bad user data.
hrs = raw_input("Enter Hours:")
rate = raw_input("Enter Rate:")
pay = float(hrs) * float(rate)
print pay
#use quotes for file location in cmd line to run (because there are spaces)
#i.e. /Google Drive/python for everybody/learn/"2.3 compute gross pay.py"
#had to correct multiple times, used int() when i shouldnt and had rate predefined | true |
25108f0fb2b273eb8d50003889adef10b9596963 | Python | RobertoPrevato/Base64 | /source/core/folders/traversers.py | UTF-8 | 827 | 2.765625 | 3 | [
"MIT"
] | permissive | import os
import re
from os import listdir
from os.path import isfile, join, isdir
class Traverser:
def __init__(self, ext):
self.ext = ext
def include(self, f):
p = re.compile("\.{}$".format(self.ext), re.IGNORECASE)
return p.search(f) is not None
def get_files(self, path):
a = [path + os.sep + f for f in listdir(path) if (isfile(join(path, f)) and self.include(f))]
dr = [f for f in listdir(path) if isdir(join(path, f))]
for b in dr:
a = a + self.get_files(path + os.sep + b)
return a
class PicsTraverser(Traverser):
def __init__(self):
self.ext = re.compile("\.jpg$|\.jpeg$|\.jpe$|\.png$|\.gif|\.svg$", re.IGNORECASE)
def include(self, f):
return self.ext.search(f) is not None
| true |
2e0f689f0ffc50fa438b3a29673104f3204f7a98 | Python | genryu/git-genryu | /astrophysics/EarthMarsOppostion.py | UTF-8 | 2,533 | 3.328125 | 3 | [] | no_license | #Demonstration of Mars-Earth oppositions in Mars orbit
#
# Hopelessly out of proper scale, of course, it's meant to illustrate the motion
#
#There's an opposition every 780 days, but the orbits are eccentric, so 'favourable'
#oppositions, where mars is near perihelion, occur about every 15 years or so
#
#Michael Jones 2012
from visual import *
import math
import time
win=700
scene = display(title="Mars Orbit", width=win, height=win, range=2)
sun = sphere()
sun.pos = vector(0,0,0)
sun.radius = 0.2
sun.color = color.yellow
earth = sphere()
earth.radius = 0.07
earth.color = color.blue
earth.a = 1.0 #Semi-major axis
earth.e = 0.017 #eccentricity of orbit (0=circle)
earth.pos = (earth.a*(1-earth.e),0,0)
earthstart = (earth.a*(1-earth.e),0,0)
earth.sidperiod = 365.246 #Sidereal Period (planet's year, in Earth days)
#earth.rotperiod = 1.000 #Length of the planets sidereal day, in Earth days
#eartharrow = arrow(pos=earth.pos, axis=(-0.1,0,0))
mars = sphere()
mars.radius = 0.035
mars.color = color.red
mars.a = 1.5 #semi-major axis
mars.e = 0.093 #eccentricity of orbit (0=circle)
mars.pos = (mars.a*(1-mars.e),0,0)
marsstart = (mars.a*(1-mars.e),0,0)
mars.sidperiod = 686.98 #Sidereal Period (planet's year, in Earth days)
#mars.rotperiod = 1.025949 #Length of the planets sidereal day, in Earth days
#marsarrow = arrow(pos=mars.pos, axis=(-0.1,0,0))
tlabel=label(pos=(0,1.6,0), text='', xoffset=0, yoffset=0, box=0)
autoscale=0
autocenter=0
estheta=0.0
ertheta=0.0
mstheta=0.0
mrtheta=0.0
dt=1.0
t=0.0
time.sleep(1) #Pause for a bit before starting
while 1: #Do the animation
rate(100)
t=t+dt
earth.pos=rotate(earthstart, angle=estheta)
earth.pos.mag=earth.a*(1-earth.e*earth.e)/(1+earth.e*math.cos(estheta))
# eartharrow.pos=earth.pos
# eartharrow.axis=rotate((-0.1,0,0), angle=ertheta)
mars.pos=rotate(marsstart, angle=mstheta)
mars.pos.mag=mars.a*(1-mars.e*mars.e)/(1+mars.e*math.cos(mstheta))
# marsarrow.pos=mars.pos
# marsarrow.axis=rotate((-0.1,0,0), angle=mrtheta)
tlabel.text="Time %06.2f (days) = %6.4f (years) " % (t, t/365.246)
estheta=estheta+(dt/earth.sidperiod)*2*math.pi
if estheta>2*math.pi:
estheta=estheta-2*math.pi
# ertheta=ertheta+(dt/earth.rotperiod)*2*math.pi
# if ertheta>2*math.pi:
# ertheta=ertheta-2*math.pi
mstheta=mstheta+(dt/mars.sidperiod)*2*math.pi
if mstheta>2*math.pi:
mstheta=mstheta-2*math.pi
# mrtheta=mrtheta+(dt/mars.rotperiod)*2*math.pi
# if mrtheta>2*math.pi:
# mrtheta=mrtheta-2*math.pi | true |
bf35e03301cd6df4e6f90b77b94a799ad6e7854a | Python | ridhohafidz/Learn-Python | /learn-python/TI07-Lab-9-mridhohafidz/test_main.py | UTF-8 | 1,108 | 3.421875 | 3 | [] | no_license | # Bagian ini adalah unit test.
# Unit test digunakan untuk melakukan pengujian
# otomatis terhadap program yang dibangun.
# Jangan lakukan perubahan apapun pada bagian ini.
# Unit test ini akan digunakan oleh dosen/asisten
# sebagai acuan dalam mengoreksi
# hasil pekerjaan mahasiswa.
# Jika ingin mencoba menjalankan unit test ini,
# tekan Ctrl+Shift+S untuk membuka terminal shell
# di bagian kanan bawah layar.
# Ketik perintah pytest, lalu enter.
# Jika masih ada AssertionError, berarti fungsi yang
# dibuat belum benar.
# Jika tidak ada AssertionError dan hasil pengujian
# menunjukkan 100% sukses, berarti fungsi sudah benar.
import main
def test_sort_desc_1():
assert main.sort_desc([3, 6, 2, 7, 1]) == [7, 6, 3, 2, 1]
def test_sort_desc_2():
assert main.sort_desc([3, 3, 3, 3, 3]) == [3, 3, 3, 3, 3]
def test_sort_desc_3():
assert main.sort_desc([5]) == [5]
def test_sort_desc_4():
assert main.sort_desc([]) == []
def test_sort_desc_5():
assert main.sort_desc([1, 2, 3, 4, 5, 6]) == [6, 5, 4, 3, 2, 1]
def test_sort_desc_6():
assert main.sort_desc([9, 6, 3, 1]) == [9, 6, 3, 1]
| true |
feeb4e53b96e385dd99848a01ab179b4413b26d9 | Python | igortereshchenko/amis_python | /km72/Maystrenko_Anna/4/task4.py | UTF-8 | 166 | 3.578125 | 4 | [] | no_license | year = int(input("Введіть ваше число >> "))
if (year%4 == 0) and (year%100 != 0) or (year%400 == 0):
print('LEAP')
else:
print('COMMON') | true |
dead7cbd34a68122ce84108d03f3427071a6848a | Python | pingting420/LeetCode_Algorithms | /Two Points/LC680. Valid Palindrome II.py | UTF-8 | 515 | 3.875 | 4 | [] | no_license | def validPalindrome(s):
def checkPalindrome(low, high):#First to check if it palindrome
i ,j = low, high
while i < j:
if s[i] != s[j]:
return False
i += 1
j -= 1
return True
low, high = 0, len(s) - 1
while low < high:
if s[low] == s[high]:
low +=1
high -=1
else:
return checkPalindrome(low+1, high) or checkPalindrome(low, high -1)
return True
#Time complexity: O(N) | true |
c3316a4383899c00906b07956a6ddc943259e530 | Python | fzEro555/Intro-to-Data-Analytics-Projects | /basic_analysis/main.py | UTF-8 | 4,064 | 2.75 | 3 | [] | no_license |
from reddit import reddit_api, clean_reddit_data
from reddit.process_reddit_data import process_reddit_data
from nytimes.search_api import search_articles as nytimes_data
from guardian.guardian_api import retrieve_articles as guardian_data
from fpds.process_fpds import process_fpds as fpds_data
from basic_analysis.count import count as count_mentions_in_news_and_reddit_data
from basic_analysis.combine_lean_data import combine as combine_counts_from_news_and_reddit_data
from basic_analysis.attribute_stats import main as statistical_analysis
from basic_analysis.LOF import main as lof
from basic_analysis.binning import main as binning
from basic_analysis.histograms import main as histogram_and_correlations
from basic_analysis.clustering import main as clustering
from basic_analysis.association_rules import main as association_rules
from hypothesis_testing.decision_tree import main as decision_tree_and_random_forest
from hypothesis_testing.anova import main as anova
from hypothesis_testing.knn import main as knn
from hypothesis_testing.svm import main as svm
from hypothesis_testing.freq_spend import main as prepare_data_for_naive_bayes_and_linear_regression
from hypothesis_testing.naive_bayes import main as naive_bayes
from hypothesis_testing.linear_regression import main as linear_regression
# get and process data from all four sources, reddit, nytimes, guardian, fpds
def get_and_process_data():
# reddit data
reddit_api.extractdata()
clean_reddit_data.nullratio()
clean_reddit_data.invalidratio()
clean_reddit_data.cleandata()
process_reddit_data()
# nytimes data
nytimes_data()
# guardian data
guardian_data()
# FPDS data
fpds_data()
return
def further_processing():
count_mentions_in_news_and_reddit_data()
combine_counts_from_news_and_reddit_data()
return
# include all the hypothesis testing part
def hypothesis_testing():
print("\n\n===================={}================================\n".format("H1: Decision Tree, Random Forest"))
# hypothesis 1: predict level of hurricane
# methods used: decision tree, random forest
decision_tree_and_random_forest()
print("\n\n===================={}================================\n".format("H2: ANOVA, KNN, SVM"))
# hypothesis 2: storm hits
# methods used: anova, knn, svm
anova()
knn()
svm()
print("\n\n===================={}================================\n".format("H3: Naive Bayes, Linear Regression"))
# hypothesis 3: government spending
# methods used: naive bayes, linear regression
# prepare_data_for_naive_bayes_and_linear_regression()
naive_bayes()
linear_regression()
return
if __name__ == "__main__":
# # getting and process data
# get_and_process_data()
# # extract counts and other information useful for tasks for project 2
# # including counting, combining and reshaping the data, etc
# further_processing()
# statistical analysis
print("\n\n===================={}================================\n".format("Statistical Analysis"))
statistical_analysis()
# identify outliers (LOF) and remove them
print("\n\n===================={}================================\n".format("LOF"))
lof()
# binning
print("\n\n===================={}================================\n".format("Binning"))
binning()
# histogram and correlations
print("\n\n===================={}================================\n".format("Histogram and Correlations"))
histogram_and_correlations()
# clustering analysis
print("\n\n===================={}================================\n".format("Clustering Analysis"))
clustering()
# association rules
print("\n\n===================={}================================\n".format("Association Rules"))
association_rules()
# predictive analysis, hypothesis testing
hypothesis_testing()
# input("any")
| true |
a90fbf6cc914748925ba452953f992fe28938432 | Python | rvernica/gv-sitemap | /sitemap.py | UTF-8 | 8,849 | 2.90625 | 3 | [
"Apache-2.0",
"MIT"
] | permissive | '''
Create a sitemap image using Graphviz
'''
import argparse, ast, json, logging, os, requests, sys, urlparse
from bs4 import BeautifulSoup
from graphviz import Digraph
try:
LOG
except NameError:
logging.basicConfig(
filename='%s/%s.log' %
(os.path.dirname(os.path.realpath(__file__)),
os.path.basename(__file__[:-3])),
level=logging.DEBUG)
LOG = logging.getLogger('sitemap')
from screenshot import Screenshot
from sitemapurl import SitemapUrl
class Sitemap(object):
'''
Crawl the given website and output a DOT structure.
'''
screenshot_dir = 'screenshots'
def __init__(self, baseurl, authurl=None, authpayload=None,
skipself=False, skipbase=False, skipbaseback=False,
skipauth=None, skipdownload=None, skipscreenshot=None,
getscreenshots=False):
if baseurl.endswith('/'):
self.baseurl = SitemapUrl(baseurl)
else:
self.baseurl = SitemapUrl(baseurl + '/')
self.sitemap = {}
self.cookies = None
## Auth if necessary
if authurl and authpayload:
response = requests.post(
authurl, data=authpayload, allow_redirects=False)
self.cookies = response.cookies
self.skipself = skipself
self.skipbase = skipbase
self.skipbaseback = skipbaseback
self.skipauth = skipauth
self.skipdownload = skipdownload
self.skipscreenshot = skipscreenshot
self.getscreenshots = getscreenshots
def get_urls_from_response(self, url, response):
'''
Extract URLs from response
'''
soup = BeautifulSoup(response.text)
urls = [link.get('href') for link in soup.find_all('a')]
urls = self.clean_urls(urls)
urlresp = SitemapUrl(response.url)
if url != urlresp:
self.sitemap[url] = {'outgoing': [urlresp]}
urlsout = [u for u in set(urls) \
if (not self.skipself or u != url)
and (not self.skipbaseback or u != self.baseurl)
and ('auth' not in u)]
self.sitemap[urlresp] = {'outgoing': urlsout, 'image': None}
return urls
def clean_urls(self, urls):
'''
1. Add BASE_URL to relative URLs
2. Remove URLs from other domains
3. Remove GET parameters from URL
'''
urls_new = []
for url in urls:
if not url:
continue
if url.startswith('/'):
url = self.baseurl + url[1:]
if not url.startswith(self.baseurl):
continue
urls_new.append(SitemapUrl(url))
return urls_new
def filter_ulrs(self, urls_new, urls):
'''
Filter and fix URLs
'''
urls_filtered = []
for url in urls_new:
if url in self.sitemap.keys() \
or url in urls \
or url in urls_filtered:
continue
urls_filtered.append(url)
return urls_filtered
def crawl(self):
'''
Given a list of starting urls, recursively finds all descendant
urls recursively
'''
urls = [self.baseurl]
while len(urls) != 0:
url = urls.pop(0)
if self.skipdownload and self.skipdownload in url:
LOG.debug('Skip URL: %s', url)
continue
LOG.debug('Request URL: %s', url)
LOG.debug('Remaining URLs: %s', urls)
LOG.debug('Visited URLs: %s', self.sitemap.keys())
response = requests.get(url, cookies=self.cookies)
LOG.debug('Response: %s', response.__str__())
LOG.debug('Response URL: %s', response.url)
urls_new = self.get_urls_from_response(url, response)
LOG.debug('Complete URLs: %s', urls_new.__str__())
urls_new = self.filter_ulrs(urls_new, urls)
LOG.info('Selected URLs: %s', urls_new.__str__())
urls.extend(urls_new)
LOG.info('Sitemap: %s', self.sitemap.__str__())
self.clean()
def clean(self):
'''
1. Remove base URL
2. Remove authentication URLs
'''
if self.skipbase:
del self.sitemap[self.baseurl]
if self.skipauth:
for key in self.sitemap.keys():
if self.skipauth in key:
del self.sitemap[key]
if self.skipbase or self.skipauth:
for (key, value) in self.sitemap.items():
self.sitemap[key]['outgoing'] = \
[u for u in value['outgoing']
if (self.skipbase and not u == self.baseurl)
or (self.skipauth and not self.skipauth in u)]
def screenshots(self):
'''
Take screenshots of all the pages in sitemap.
'''
if not os.path.exists(Sitemap.screenshot_dir):
os.makedirs(Sitemap.screenshot_dir)
cookies_list = []
for (key, value) in self.cookies.items():
cookies_list.append(key + '=' + value)
urls = []
for url in self.sitemap.keys():
if not self.skipscreenshot \
or self.skipscreenshot not in url:
urls.append(url)
self.sitemap[url]['image'] = os.path.join(
Sitemap.screenshot_dir, url.pretty() + '.png')
screenshot = Screenshot(path=Sitemap.screenshot_dir)
screenshot.screenshot(urls, cookies_list)
def gen_dot(self):
'''
Generate the DOT file for Graphviz
'''
dot = Digraph(comment='Sitemap',
graph_attr={'splines':'ortho',
'concentrate':'true'},
node_attr={'fontsize':'60',
'labelloc':'b',
'shape':'box'},
edge_attr={'penwidth':'10'})
if self.getscreenshots:
dot.node_atts['penwidth'] = '0'
for (key, value) in self.sitemap.items():
name = key.pretty()
dot.node(name,
label=name,
image=value['image'])
for (key, value) in self.sitemap.items():
for key2 in value['outgoing']:
dot.edge(key.pretty(), key2.pretty())
return dot.source
if __name__ == '__main__':
## Argument Parser
parser = argparse.ArgumentParser(
description='Crawl website and output GraphViz input file '
+ 'containing sitemap.')
parser.add_argument(
'baseurl',
help='Base URL of the website.')
parser.add_argument(
'--authurl',
help='URL for POST authentication.')
parser.add_argument(
'--authpayload',
help='Payload for the POST authentication. e.g., '
+ '\'{"username": "foo", "password": "bar"}\'',
type=json.loads)
parser.add_argument(
'--ignoreid',
help='Ignore URLs where the difference is just an integer. '
+ 'e.g., if http://foo/1/bar and http://foo/2/bar '
+ 'are both present only one of them is visited.',
action='store_true')
parser.add_argument(
'--skipself',
help='Skip edges pointing to the same pages they are originating from.',
action='store_true')
parser.add_argument(
'--skipbase',
help='Repress base URL from the sitemap',
action='store_true')
parser.add_argument(
'--skipbaseback',
help='Repress links back to base URL from the sitemap',
action='store_true')
parser.add_argument(
'--skipauth',
help='Repress authentication URLs containing the given string '
+ 'from the sitemap')
parser.add_argument(
'--skipdownload',
help='Skip downloading the URLs containing the given string.')
parser.add_argument(
'--getscreenshots',
help='Take screenshots of each page for use as node image.',
action='store_true')
parser.add_argument(
'--skipscreenshot',
help='Repress screenshots for URLs containing the given string.')
if ('--authurl' in sys.argv) and ('--authpayload' not in sys.argv):
parser.error('--authpayload needs to be set if --authurl is used')
args = parser.parse_args()
SitemapUrl.enabled = args.ignoreid
sitemap = Sitemap(args.baseurl, args.authurl, args.authpayload,
args.skipself, args.skipbase, args.skipbaseback,
args.skipauth, args.skipdownload, args.skipscreenshot,
args.getscreenshots)
sitemap.crawl()
if args.getscreenshots:
sitemap.screenshots()
print sitemap.gen_dot()
| true |
0800c4958192676344e14c19f7c23ed299ac503e | Python | cvr/foam_scripts | /myfoam_write_boundaryData_regularWave.py | UTF-8 | 6,264 | 2.578125 | 3 | [] | no_license | #!/sw/epd-7.3-2/bin/python
import numpy as np
import sys,os
from scipy import optimize
from math import pi
import matplotlib.pyplot as plt
from mylib_DictWriter import write_boundaryData_scalar,write_boundaryData_vector
################################################################################
#user input start here
#general parameters
para={}
para['startTime']=0#start time
para['endTime']=50#end time
para['deltaT']=0.02#time step in ./constant/boundaryData/inlet_patch_name
para['x_inlet']=0#x coordinates of inlet plane
para['ymin_inlet']=-0.5#min y coordinates of inlet plane
para['ymax_inlet']=0.5#max y coordinates of inlet plane
para['zmin_inlet']=0#min z coordinates of inlet plane
para['zmax_inlet']=2#max z coordinates of inlet plane
para['nz']=200#number of cells in z direction. must be integer
para['g']=9.81#gravity acceleration
para['log_path']='./'#path to save the log file when execute this script
para['inlet_patch_name'] = 'inlet'#./constant/boundaryData/inlet_patch_name
#parameters for regular wave in airy wave theory
wave={}
wave['depth']=1#depth of water, h
wave['waveheight']=0.1#wave height, H
wave['omega']=pi#wave circular frequency, omega
wave['rho']=998.8#density of water
#wave['k']=0.5 #wave number is not independent of omega thus it should not be defined here
#user input end here
################################################################################
def generate_grid(para):#generate points on inlet plane
if type(para['nz']) != int:
print 'nz must be integer!'
sys.exit()
x=np.zeros(para['nz']*2)+para['x_inlet']
y1=np.zeros(para['nz'])+para['ymin_inlet']
y2=np.zeros(para['nz'])+para['ymax_inlet']
y=np.append(y1,y2)
z1=np.linspace(para['zmin_inlet'],para['zmax_inlet'],para['nz'])
z=np.append(z1,z1)
points=np.vstack((x,y,z))
points=np.transpose(points)
return points#return a numpy array. each row is xyz coordinates of a points on the plane
def get_k(para,wave):#compute wave number from omega
from math import tanh
depth=wave['depth']
g=para['g']
omega=wave['omega']
def dispersion(k,omega,depth,g):
return g*k*tanh(k*depth)-omega**2
max_root=1000
return optimize.brentq(dispersion,0,max_root,args=(omega,depth,g))#return wave number k
################################################################################
#main
#initial check
if ('boundaryData' in os.listdir('./constant')):
print "Warning!!\n Old boundaryData exist in ./constant."
print "You may need to clean it before creating new data."
log_file=open(para['log_path']+'myfoam_write_boundaryData_regularWave.log','w')
wave['k']=get_k(para,wave)
log_file.write('Compute wave number, k, from frequency, omega. k='+str(wave['k'])+'\n')
points=generate_grid(para) #generate points on inlet plane
log_file.write( 'points on inlet patch: \n'+str(points)+'\n')
write_boundaryData_vector(points,'','points',para,foam_class='vectorField',foam_object='points') #write points file to ./constant/boundaryData
t_list=np.arange(para['startTime'],para['endTime'],para['deltaT'])
t_list=np.append(t_list,para['endTime'])#append endTime to the end of array
log_file.write( 'time files to be created: \n'+str(t_list)+'\n')
from math import pi
phase_shifted=-pi/2
eta_list=0.5*wave['waveheight']*np.cos(-wave['omega']*t_list+phase_shifted)#add -pi/2 so that eta=0 at t=0
log_file.write( 'wave height: \n'+str(eta_list)+'\n')
depth_list=eta_list+wave['depth']#depth of water at inlet, varying with time
log_file.write( 'water depth at inlet: \n'+str(depth_list)+'\n')
log_file.write('\n\n')
log_file.write('#'*80+'n')
log_file.write('Create velocity and pressure for each time step:\n')
for i,t in enumerate(t_list):
#if z coordinates in a row of points is smaller than eta, alpha_water should be 1 there and 0 otherwise.
log_file.write( '\nt= '+str(t)+'\n')
alpha_water=0.5*(np.sign(depth_list[i]-points[:,2])+1)#a list containing value of alpha.water in each cell on inlet patch
n1=0
for n in range(alpha_water.size): #compute how many cells are with value alpha.water=1
if alpha_water[n] > 0:
n1=n1+1
continue
else:
break
log_file.write( 'alpha.water in each cell: \n'+str(alpha_water)+'\n')
write_boundaryData_scalar(alpha_water,t,'alpha.water',para)
u=0.5*wave['waveheight']*wave['omega']*np.cosh(wave['k']*(points[:,2]))/np.sinh(wave['k']*wave['depth'])*np.cos(-wave['omega']*t+phase_shifted)
u=u*alpha_water#u in all cells above free surface are set to zero
u[n1:]=u[n1-1]#u in all cells above free surface are set to equal to velocity just below the free surface
w=0.5*wave['waveheight']*wave['omega']*np.sinh(wave['k']*(points[:,2]))/np.sinh(wave['k']*wave['depth'])*np.sin(-wave['omega']*t+phase_shifted)
w=w*alpha_water#w in all cells above free surface are set to zero
w[n1:]=w[n1-1]#w in all cells above free surface are set to equal to velocity just below the free surface
v=points[:,2]*0#v is all zero
log_file.write( 'u: \n'+str(u)+'\n')
log_file.write( 'v: \n'+str(v)+'\n')
log_file.write( 'w: \n'+str(w)+'\n')
velocity=np.vstack((u,v,w))
velocity=np.transpose(velocity)
log_file.write( 'velocity:\n'+str(velocity)+'\n')
write_boundaryData_vector(velocity,t,'U',para)
#pressure
#actually there is no need to prescribe pressure
p=wave['rho']*para['g']*(-(points[:,2]-wave['depth'])+wave['waveheight']*0.5*np.cosh(wave['k']*(points[:,2]))/np.cosh(wave['k']*wave['depth'])*np.cos(-wave['omega']*t+phase_shifted))
p=p*alpha_water
p_rgh=wave['rho']*para['g']*(wave['waveheight']*0.5*np.cosh(wave['k']*(points[:,2]))/np.cosh(wave['k']*wave['depth'])*np.cos(-wave['omega']*t+phase_shifted)) + wave['rho']*para['g']*points[:,2]#this is actually p+rgh instead of p-rgh
p_rgh=p_rgh*alpha_water#p in all cells above free surface are set to zero
#fixed negative p_rgh
#for n in range(p_rgh.size):
# if p_rgh[n] < 0:
# p_rgh[n] = 0
#p=p*alpha_water
#log_file.write( 'p:\n'+str(p)+'\n')
#log_file.write( 'p_rgh:\n'+str(p_rgh)+'\n')
#write_boundaryData_scalar(p_rgh,t,'p_rgh',para)
#write_boundaryData_scalar(p,t,'p',para)
| true |