text stringlengths 38 1.54M |
|---|
class MyDecorator:
def __init__(self, func):
self.func = func
def __call__(self, *args, **kwargs):
self.func(*args, **kwargs)
print("Function Executed")
print("\n")
@MyDecorator
def function():
print("SHREYANSH KUMAR")
if __name__ == "__main__":
function() |
from PySide import QtCore, QtGui
import sys
class MyCounter(QtCore.QObject):
def __init__(self):
QtCore.QObject.__init__(self)
self.__data = 0
def setValue(self, value = 1):
try:
value = int(value)
if value < 0 or value > 50:
return
else:
self.__data = value
except ValueError as S:
return
self.emit(QtCore.SIGNAL("valueChanged(int)"), self.__data)
self.emit(QtCore.SIGNAL("valueChanged(QString)"), str(self.__data))
def Incr(self):
self.__data += 1
self.emit(QtCore.SIGNAL("valueChanged(int)"), self.__data)
self.emit(QtCore.SIGNAL("valueChanged(QString)"), str(self.__data))
def Decr(self):
self.__data -= 1
self.emit(QtCore.SIGNAL("valueChanged(int)"), self.__data)
self.emit(QtCore.SIGNAL("valueChanged(QString)"), str(self.__data))
def Clean(self):
self.__data = 0
self.emit(QtCore.SIGNAL("valueChanged(int)"), self.__data)
self.emit(QtCore.SIGNAL("valueChanged(QString)"), str(self.__data))
app = QtGui.QApplication(sys.argv)
Window = QtGui.QMainWindow()
Window.resize(500, 300)
SpinBox = QtGui.QSpinBox(Window)
SpinBox.setMaximum(50)
SpinBox.setWindowTitle("Spinbox")
SpinBox.setGeometry(10, 10, 100, 30)
Slider = QtGui.QSlider(QtCore.Qt.Horizontal, Window)
Slider.setMaximum(50)
Slider.setGeometry(10, 50, 100, 50)
LineEdit = QtGui.QLineEdit(Window)
LineEdit.setGeometry(10, 110, 100, 30)
Incr = QtGui.QPushButton("+1", Window)
Incr.setGeometry(120, 10, 50, 50)
Decr = QtGui.QPushButton("-1", Window)
Decr.setGeometry(180, 10, 50, 50)
Counter = MyCounter()
Clean = QtGui.QPushButton("Clean", Window)
Clean.setGeometry(140, 60, 70, 40)
QtCore.QObject.connect(Incr, QtCore.SIGNAL("clicked()"),
Counter.Incr)
QtCore.QObject.connect(Decr, QtCore.SIGNAL("clicked()"),
Counter.Decr)
QtCore.QObject.connect(Clean, QtCore.SIGNAL("clicked()"),
Counter.Clean)
QtCore.QObject.connect(Slider, QtCore.SIGNAL("valueChanged(int)"),
Counter.setValue)
QtCore.QObject.connect(SpinBox, QtCore.SIGNAL("valueChanged(int)"),
Counter.setValue)
QtCore.QObject.connect(LineEdit, QtCore.SIGNAL("textChanged(QString)"),
Counter.setValue)
QtCore.QObject.connect(Counter, QtCore.SIGNAL("valueChanged(int)"),
SpinBox.setValue)
QtCore.QObject.connect(Counter, QtCore.SIGNAL("valueChanged(int)"),
Slider.setValue)
QtCore.QObject.connect(Counter, QtCore.SIGNAL("valueChanged(QString)"),
LineEdit.setText)
Window.show()
app.exec_() |
#/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 27 19:44:26 2019
@author: Martina Cerina
Workdirectory: /Users/ludovicaflocco/Desktop/Machine_Learning
"""
# Loading Libraries
import pandas as pd
import statsmodels.formula.api as smf # regression modeling
import seaborn as sns
import matplotlib.pyplot as plt
import os
### For KNN model
from sklearn.model_selection import train_test_split # train/test split
from sklearn.neighbors import KNeighborsRegressor # KNN for Regression
import statsmodels.formula.api as smf # regression modeling
import sklearn.metrics # more metrics for model performance evaluation
from sklearn.model_selection import cross_val_score # k-folds cross validation
os.chdir('E:/OneDrive/Hult/Machine Learning/Assignments/Group Assignment/Data2')
file = 'birthweight_feature_set.xlsx'
birthweight = pd.read_excel(file)
########################
# Fundamental Dataset Exploration
########################
# Column names
birthweight.columns
#mage = mother's age
#meduc = mother's education
#monpre = month prenatal care began
#npvis = number of prenatal visits
#fage = father's age
#feduc = father's education
#omaps = one minute apgar score
#fmaps = five minutes apgar score
#cigs = average cigarettes per day
#drink = average drink per day
#male = 1 if baby male
#mwhte = 1 if mother white
#mblck = 1 if mothe black
#moth = 1 if mothe is other
#fwhte = 1 if father white
#fblck = 1 if father black
#goth = 1 if father others
#bwght = birthweigh grams
# Dimensions of the DataFrame
birthweight.shape #18 variables
# Information about each variable
birthweight.info()
# Descriptive statistics (IS IT RELEVANT NOW?)
birthweight.describe().round(2)
#NEED TO CHECK IT : DON'T THINK WE NEED IT
birthweight.sort_values('bwght', ascending = False)
###############################################################################
# Imputing Missing Values
###############################################################################
print(
birthweight
.isnull()
.sum()
)
for col in birthweight:#This is something we should always be trying to do
""" Create columns that are 0s if a value was not missing and 1 if
a value is missing. """
if birthweight[col].isnull().any():
birthweight['m_'+col] = birthweight[col].isnull().astype(int)
#if there is a null value in any column i want to flag it
#meduc 30
#monpre 5
#npvis 68
#fage 6
#feduc 47
#omaps 3
#fmaps 3
#cigs 110
#drink 115
df_dropped = birthweight.dropna()
#Creating histograms to check if normal distributed or not
sns.distplot(df_dropped['meduc']) #NOT NORMALLY DISTRIBUTED UNLESS DIVIDED INTO DIFF STEPS
sns.distplot(df_dropped['monpre']) #NOT NORMALLY DISTRIBUTED UNLESS DIVIDED INTO DIFF STEPS
sns.distplot(df_dropped['npvis']) #KINDA NORMALLY --> MEAN
sns.distplot(df_dropped['fage']) #YES NORMALLY --> MEAN
sns.distplot(df_dropped['feduc']) #NOT NORMALLY DISTRIBUED
sns.distplot(df_dropped['omaps']) #NOT NORMALLY DISTRIBUTED
sns.distplot(df_dropped['fmaps']) #TAKE THE MEAN
sns.distplot(df_dropped['cigs']) #NOT
sns.distplot(df_dropped['drink']) #zero inflated
# # drink is zero inflated. Imputing with zero.
# fill = 0
#MEAN IMPUTATION FOR NPVIS VARIABLE
fill = birthweight['npvis'].median()
birthweight['npvis'] = birthweight['npvis'].fillna(fill)
fill = birthweight['meduc'].median()
birthweight['meduc'] = birthweight['meduc'].fillna(fill)
# fill = birthweight['monpre'].median()
# birthweight['monpre'] = birthweight['monpre'].fillna(fill)
fill = birthweight['feduc'].median()
birthweight['feduc'] = birthweight['feduc'].fillna(fill)
# Checking the overall dataset to verify that there are no missing values remaining
print(
birthweight
.isnull()
.any()
.any()
)
###############################################################################
# Outlier Analysis
###############################################################################
birthweight_quantiles = birthweight.loc[:, :].quantile([0.20,
0.40,
0.60,
0.80,
1.00])
print(birthweight_quantiles)
for col in birthweight:
print(col)
########################
# Visual EDA (Histograms)
########################
plt.subplot(2, 2, 1)
sns.distplot(birthweight['mage'],
color = 'g')
plt.xlabel('mage')
sns.boxplot(x =birthweight['mage'])
########################
plt.subplot(2, 2, 2)
sns.distplot(birthweight['meduc'],
color = 'y')
plt.xlabel('meduc')
########################
plt.subplot(2, 2, 3)
sns.distplot(birthweight['monpre'],
kde = False,
rug = True,
color = 'orange')
plt.xlabel('monpre')
########################
plt.subplot(2, 2, 4)
sns.distplot(birthweight['npvis'],
kde = False,
rug = True,
color = 'r')
plt.xlabel('npvis')
plt.tight_layout()
# plt.savefig('Birthweight Histograms 1.png')
########################
########################
plt.subplot(2, 2, 1)
sns.distplot(birthweight['fage'],
color = 'g')
plt.xlabel('fage')
########################
plt.subplot(2, 2, 2)
sns.distplot(birthweight['feduc'],
kde = False,
rug = True,
color = 'orange')
plt.xlabel('feduc')
########################
plt.subplot(2, 2, 3)
sns.distplot(birthweight['omaps'],
kde = False,
rug = True,
color = 'r')
plt.xlabel('omaps')
########################
plt.subplot(2, 2, 4)
sns.distplot(birthweight['fmaps'],
color = 'y')
plt.xlabel('fmaps')
plt.tight_layout()
# plt.savefig('Birthweight Data Histograms 2.png')
plt.show()
########################
########################
plt.subplot(2, 2, 1)
sns.distplot(birthweight['cigs'],
kde = False,
rug = True,
color = 'orange')
plt.xlabel('cigs')
########################
plt.subplot(2, 2, 2)
sns.distplot(birthweight['drink'],
kde = False,
rug = True,
color = 'r')
plt.xlabel('drink')
########################
plt.subplot(2, 2, 3)
sns.distplot(birthweight['bwght'],
color = 'g')
plt.xlabel('bwght')
plt.tight_layout()
# plt.savefig('Birthweight Data Histograms 3.png')
########################
# Tuning and Flagging Outliers
########################
birthweight_quantiles = birthweight.loc[:, :].quantile([0.05,
0.40,
0.60,
0.80,
0.95])
print(birthweight_quantiles['mage'])
"""
Assumed Continuous/Interval Variables -
Mother's age
Mother's education
Month Prenatal Care began
Number of Prenatal Visits
Father's Age
Father's Education
One Minute apgar score
Five Minute's apgar score
Average cigaretts per day
Average drinks per day
Birthweigh grams
Binary Classifiers -
Baby male
Mother white
Mothe black
Mother is other
Father white
Father black
Father others
"""
# Outlier flags
mage_low = 20
mage_high = 55
overall_low_meduc = 10
monpre_low = 0
monpre_high = 7
npvis_low = 5
npvis_high = 18
fage_low = 20
fage_high = 62
overall_low_feduc = 7
overall_low_omaps = 4
overall_low_fmaps = 6
overall_cigs = 19
bwght_low = 2500
bwght_high = 4500
overall_drink = 11
########################
# Create a new column for Race and cEdu
birthweight['race'] = 0
birthweight['cEdu'] = 0
abc = birthweight['cigs']
for val in enumerate(birthweight.loc[ : , 'fwhte']):
birthweight.loc[val[0], 'race'] = str(birthweight.loc[val[0], 'mwhte']) + \
str(birthweight.loc[val[0], 'mblck']) + \
str(birthweight.loc[val[0], 'moth']) + \
str(birthweight.loc[val[0], 'fwhte']) + \
str(birthweight.loc[val[0], 'fblck']) + \
str(birthweight.loc[val[0], 'foth'])
birthweight.loc[val[0], 'cEdu'] = birthweight.loc[val[0], 'meduc'] + \
birthweight.loc[val[0], 'feduc']
########################
# Creating Outlier Flags
########################
# Building loops for outlier imputation
########################
# Mage
birthweight['out_mage'] = 0
for val in enumerate(birthweight.loc[ : , 'mage']):
if val[1] >= mage_high:
birthweight.loc[val[0], 'out_mage'] = 1
if val[1] <= mage_low:
birthweight.loc[val[0], 'out_mage'] = -1
########################
# Meduc
birthweight['out_meduc'] = 0
for val in enumerate(birthweight.loc[ : , 'meduc']):
if val[1] <= overall_low_meduc:
birthweight.loc[val[0], 'out_meduc'] = -1
########################
# Monpre
birthweight['out_monpre'] = 0
for val in enumerate(birthweight.loc[ : , 'monpre']):
if val[1] >= monpre_high:
birthweight.loc[val[0], 'out_monpre'] = 1
if val[1] <= monpre_low:
birthweight.loc[val[0], 'out_monpre'] = -1
########################
# Npvis
birthweight['out_npvis'] = 0
for val in enumerate(birthweight.loc[ : , 'npvis']):
if val[1] >= npvis_high:
birthweight.loc[val[0], 'out_npvis'] = 1
if val[1] <= npvis_low:
birthweight.loc[val[0], 'out_npvis'] = -1
########################
# Fage
birthweight['out_fage'] = 0
for val in enumerate(birthweight.loc[ : , 'fage']):
if val[1] >= fage_high:
birthweight.loc[val[0], 'out_fage'] = 1
if val[1] <= fage_low:
birthweight.loc[val[0], 'out_fage'] = -1
########################
# Feduc
birthweight['out_feduc'] = 0
for val in enumerate(birthweight.loc[ : , 'feduc']):
if val[1] <= overall_low_feduc:
birthweight.loc[val[0], 'out_feduc'] = -1
########################
# Omaps
birthweight['out_omaps'] = 0
for val in enumerate(birthweight.loc[ : , 'omaps']):
if val[1] <= overall_low_omaps:
birthweight.loc[val[0], 'out_omaps'] = -1
########################
# Fmaps
birthweight['out_fmaps'] = 0
for val in enumerate(birthweight.loc[ : , 'fmaps']):
if val[1] <= overall_low_fmaps:
birthweight.loc[val[0], 'out_fmaps'] = -1
########################
# Cigs
birthweight['out_cigs'] = 0
for val in enumerate(birthweight.loc[ : , 'cigs']):
if val[1] >= overall_cigs:
birthweight.loc[val[0], 'out_cigs'] = 1
########################
# Bwght
birthweight['out_bwght'] = 0
for val in enumerate(birthweight.loc[ : , 'bwght']):
if val[1] >= bwght_high:
birthweight.loc[val[0], 'out_bwght'] = 1
if val[1] <= bwght_low:
birthweight.loc[val[0], 'out_bwght'] = -1
########################
# Drink
birthweight['out_drink'] = 0
for val in enumerate(birthweight.loc[ : , 'drink']):
if val[1] >= overall_drink:
birthweight.loc[val[0], 'out_drink'] = 1
###############################################################################
# Correlation Analysis
###############################################################################
birthweight.head()
df_corr = birthweight.corr().round(2)
print(df_corr)
df_corr.loc['bwght'].sort_values(ascending = False)
########################
# Correlation Heatmap
########################
# Using palplot to view a color scheme
sns.palplot(sns.color_palette('coolwarm', 12))
fig, ax = plt.subplots(figsize=(15,15))
df_corr2 = df_corr.iloc[1:19, 1:19]
sns.heatmap(df_corr2,
cmap = 'coolwarm',
square = True,
annot = True,
linecolor = 'black',
linewidths = 0.5)
# plt.savefig('Variable Correlation Heatmap.png')
plt.show()
# birthweight.to_excel('Birthweight_explored.xlsx')
#help(corr)
#help(pd.corr)
#help(pd.DataFrame.corr)
#We are often dealing with pearson correlation
#with unsupervised we will experience Spearman correlation
########################
# SUMMARY BEFORE RUNNING THE MODEL
########################
#print(birthweight.head())
fmaps_dummies = pd.get_dummies(list(birthweight['fmaps']), prefix = 'fmaps', drop_first = True)
omaps_dummies = pd.get_dummies(list(birthweight['omaps']), prefix = 'omaps', drop_first = True)
drink_dummies = pd.get_dummies(list(birthweight['drink']), prefix = 'drink', drop_first = True)
meduc_dummies = pd.get_dummies(list(birthweight['meduc']), prefix = 'meduc', drop_first = True)
feduc_dummies = pd.get_dummies(list(birthweight['feduc']), prefix = 'feduc', drop_first = True)
race_dummies = pd.get_dummies(list(birthweight['race']), prefix = 'race', drop_first = True)
npvis_dummies = pd.get_dummies(list(birthweight['npvis']), prefix = 'npvis', drop_first = True)
cigs_dummies = pd.get_dummies(list(birthweight['cigs']), prefix = 'cigs', drop_first = True)
# male_dummies = pd.get_dummies(list(birthweight['male']), prefix = 'male', drop_first = True)
# mwhte_dummies = pd.get_dummies(list(birthweight['mwhte']), prefix = 'mwhte', drop_first = True)
# mblck_dummies = pd.get_dummies(list(birthweight['mblck']), prefix = 'mblck', drop_first = True)
# moth_dummies = pd.get_dummies(list(birthweight['moth']), prefix = 'moth', drop_first = True)
# fwhte_dummies = pd.get_dummies(list(birthweight['fwhte']), prefix = 'fwhte', drop_first = True)
# fblck_dummies = pd.get_dummies(list(birthweight['fblck']), prefix = 'fblck', drop_first = True)
# foth_dummies = pd.get_dummies(list(birthweight['foth']), prefix = 'foth', drop_first = True)
birthweight_2 = pd.concat(
[birthweight.loc[:,:],
fmaps_dummies, drink_dummies,
meduc_dummies, race_dummies,
omaps_dummies, npvis_dummies,
cigs_dummies, feduc_dummies],
axis = 1)
birthweight_2['C_wM'] = birthweight_2['male'] * birthweight_2['mwhte']
birthweight_2['C_bM'] = birthweight_2['male'] * birthweight_2['mblck']
birthweight_2['C_oM'] = birthweight_2['male'] * birthweight_2['moth']
birthweight_2['C_bF'] = birthweight_2['male'] * birthweight_2['fblck']
birthweight_2['C_bM'] = birthweight_2['male'] * birthweight_2['mblck']
birthweight_2['cigolic'] = birthweight_2['cigs'] * birthweight_2['drink']
birthweight_2['edu'] = birthweight_2['feduc'] * birthweight_2['meduc']
birthweight_2['oth'] = birthweight_2['foth'] * birthweight_2['moth']
# birthweight_2['blck'] = birthweight_2['fblck'] * birthweight_2['mblck'] DOes not give result
# birthweight_2['whte'] = birthweight_2['fwhte'] * birthweight_2['mwhte']
#birthweight_2.to_excel('abcd.xlsx')
lm_full = smf.ols(formula = """bwght ~ mage +
monpre +
fage +
birthweight_2['feduc_7.0'] +
birthweight_2['feduc_8.0'] +
birthweight_2['feduc_10.0'] +
birthweight_2['feduc_11.0'] +
birthweight_2['feduc_12.0'] +
birthweight_2['feduc_13.0'] +
birthweight_2['feduc_14.0'] +
birthweight_2['feduc_15.0'] +
birthweight_2['feduc_16.0'] +
birthweight_2['feduc_17.0'] +
birthweight_2['cigs_1'] +
birthweight_2['cigs_2'] +
birthweight_2['cigs_3'] +
birthweight_2['cigs_4'] +
birthweight_2['cigs_5'] +
birthweight_2['cigs_6'] +
birthweight_2['cigs_7'] +
birthweight_2['cigs_8'] +
birthweight_2['cigs_9'] +
birthweight_2['cigs_10'] +
birthweight_2['cigs_11'] +
birthweight_2['cigs_12'] +
birthweight_2['cigs_13'] +
birthweight_2['cigs_14'] +
birthweight_2['cigs_15'] +
birthweight_2['cigs_16'] +
birthweight_2['cigs_17'] +
birthweight_2['cigs_18'] +
birthweight_2['cigs_19'] +
birthweight_2['cigs_20'] +
birthweight_2['cigs_21'] +
birthweight_2['cigs_22'] +
birthweight_2['cigs_23'] +
birthweight_2['cigs_24'] +
birthweight_2['cigs_25'] +
male +
mwhte +
mblck +
moth +
fwhte +
fblck +
foth +
m_meduc +
m_npvis +
m_feduc +
cEdu +
out_mage +
out_meduc +
out_monpre +
out_npvis +
out_fage +
out_feduc +
out_omaps +
out_fmaps +
out_cigs +
out_bwght +
out_drink +
birthweight_2['omaps_3'] +
birthweight_2['omaps_4'] +
birthweight_2['omaps_5'] +
birthweight_2['omaps_6'] +
birthweight_2['omaps_7'] +
birthweight_2['omaps_8'] +
birthweight_2['omaps_9'] +
birthweight_2['omaps_10'] +
birthweight_2['fmaps_6'] +
birthweight_2['fmaps_7'] +
birthweight_2['fmaps_8'] +
birthweight_2['fmaps_9'] +
birthweight_2['fmaps_10'] +
birthweight_2['drink_1'] +
birthweight_2['drink_2'] +
birthweight_2['drink_3'] +
birthweight_2['drink_4'] +
birthweight_2['drink_5'] +
birthweight_2['drink_6'] +
birthweight_2['drink_7'] +
birthweight_2['drink_8'] +
birthweight_2['drink_9'] +
birthweight_2['drink_10'] +
birthweight_2['drink_11'] +
birthweight_2['drink_12'] +
birthweight_2['drink_13'] +
birthweight_2['drink_14'] +
birthweight_2['meduc_10.0'] +
birthweight_2['meduc_11.0'] +
birthweight_2['meduc_12.0'] +
birthweight_2['meduc_13.0'] +
birthweight_2['meduc_14.0'] +
birthweight_2['meduc_15.0'] +
birthweight_2['meduc_16.0'] +
birthweight_2['meduc_17.0'] +
birthweight_2['race_001010'] +
birthweight_2['race_001100'] +
birthweight_2['race_010001'] +
birthweight_2['race_010010'] +
birthweight_2['race_010100'] +
birthweight_2['race_100100'] +
birthweight_2['npvis_3.0'] +
birthweight_2['npvis_5.0'] +
birthweight_2['npvis_6.0'] +
birthweight_2['npvis_7.0'] +
birthweight_2['npvis_8.0'] +
birthweight_2['npvis_9.0'] +
birthweight_2['npvis_10.0'] +
birthweight_2['npvis_11.0'] +
birthweight_2['npvis_12.0'] +
birthweight_2['npvis_13.0'] +
birthweight_2['npvis_14.0'] +
birthweight_2['npvis_15.0'] +
birthweight_2['npvis_16.0'] +
birthweight_2['npvis_17.0'] +
birthweight_2['npvis_18.0'] +
birthweight_2['npvis_19.0'] +
birthweight_2['npvis_20.0'] +
birthweight_2['npvis_25.0'] +
birthweight_2['npvis_30.0'] +
birthweight_2['npvis_31.0'] +
birthweight_2['npvis_35.0']
""",
data = birthweight_2)
# Fitting Results
results = lm_full.fit()
# Printing Summary Statistics
results.pvalues
print(results.summary())
rsq_lm_full = results.rsquared.round(3)
print(f"""
Summary Statistics:
R-Squared: {results.rsquared.round(3)}
Adjusted R-Squared: {results.rsquared_adj.round(3)}
""")
########################
# Significant Model
########################
lm_significant = smf.ols(formula = """bwght ~ mage +
monpre +
fage +
feduc +
cigs +
drink +
male +
m_meduc +
m_npvis +
m_feduc +
race +
cEdu +
out_mage +
out_meduc +
out_monpre +
out_npvis +
out_fage +
out_feduc +
out_omaps +
out_fmaps +
out_cigs +
out_bwght +
out_drink +
birthweight_2['omaps_3'] +
birthweight_2['omaps_4'] +
birthweight_2['omaps_5'] +
birthweight_2['omaps_6'] +
birthweight_2['omaps_7'] +
birthweight_2['omaps_8'] +
birthweight_2['omaps_9'] +
birthweight_2['omaps_10'] +
birthweight_2['fmaps_6'] +
birthweight_2['fmaps_7'] +
birthweight_2['fmaps_8'] +
birthweight_2['fmaps_9'] +
birthweight_2['fmaps_10'] +
birthweight_2['meduc_10.0'] +
birthweight_2['meduc_11.0'] +
birthweight_2['meduc_12.0'] +
birthweight_2['meduc_13.0'] +
birthweight_2['meduc_14.0'] +
birthweight_2['meduc_15.0'] +
birthweight_2['meduc_16.0'] +
birthweight_2['meduc_17.0'] +
birthweight_2['race_001010'] +
birthweight_2['race_001100'] +
birthweight_2['race_010001'] +
birthweight_2['race_010010'] +
birthweight_2['race_010100'] +
birthweight_2['race_100100'] +
birthweight_2['npvis_3.0'] +
birthweight_2['npvis_5.0'] +
birthweight_2['npvis_6.0'] +
birthweight_2['npvis_7.0'] +
birthweight_2['npvis_8.0'] +
birthweight_2['npvis_9.0'] +
birthweight_2['npvis_10.0'] +
birthweight_2['npvis_11.0'] +
birthweight_2['npvis_12.0'] +
birthweight_2['npvis_13.0'] +
birthweight_2['npvis_14.0'] +
birthweight_2['npvis_15.0'] +
birthweight_2['npvis_16.0'] +
birthweight_2['npvis_17.0'] +
birthweight_2['npvis_18.0'] +
birthweight_2['npvis_19.0'] +
birthweight_2['npvis_20.0'] +
birthweight_2['npvis_25.0'] +
birthweight_2['npvis_30.0'] +
birthweight_2['npvis_31.0'] +
birthweight_2['npvis_35.0']
""",
data = birthweight_2)
# Fitting Results
results = lm_significant.fit()
results.rsquared_adj.round(3)
# Printing Summary Statistics
print(results.summary())
rsq_lm_significant = results.rsquared.round(3)
print(f"""
Summary Statistics:
R-Squared: {results.rsquared.round(3)}
Adjusted R-Squared: {results.rsquared_adj.round(3)}
""")
#########################################
######### KNN FUll Model ################
#########################################
birthweight_2_data = birthweight_2.drop(['bwght'], axis = 1)
birthweight_2_target = birthweight_2.loc[:, 'bwght']
X_train, X_test, y_train, y_test = train_test_split(
birthweight_2_data,
birthweight_2_target,
test_size = 0.1,
random_state = 508)
# Training set
print(X_train.shape)
print(y_train.shape)
# Testing set
print(X_test.shape)
print(y_test.shape)
# We need to merge our X_train and y_train sets so that they can be
# used in statsmodels
birthweight_2_train = pd.concat([X_train, y_train], axis = 1)
birthweight_2_test = pd.concat([X_test, y_test], axis = 1)
# ########################
# # Step 1: Create a model object
# ########################
# # Creating a regressor object
# knn_reg = KNeighborsRegressor(algorithm = 'auto',
# n_neighbors = 1)
# # Checking the type of this new object
# type(knn_reg)
# # Teaching (fitting) the algorithm based on the training data
# knn_reg.fit(X_train, y_train)
# # Predicting on the X_data that the model has never seen before
# y_pred = knn_reg.predict(X_test)
# # Printing out prediction values for each test observation
# print(f"""
# Test set predictions:
# {y_pred}
# """)
# # Calling the score method, which compares the predicted values to the actual
# # values
# y_score = knn_reg.score(X_test, y_test)
# # The score is directly comparable to R-Square
# print(y_score)
#########################################
########### Using Stats MethodL #########
#########################################
lm_significant_stats = smf.ols(formula = """bwght ~ mage +
meduc +
monpre +
fage +
feduc +
cigs +
drink +
male +
m_meduc +
m_npvis +
m_feduc +
race +
cEdu +
out_mage +
out_meduc +
out_monpre +
out_npvis +
out_fage +
out_feduc +
out_omaps +
out_fmaps +
out_cigs +
out_bwght +
out_drink +
birthweight_2_train['omaps_3'] +
birthweight_2_train['omaps_4'] +
birthweight_2_train['omaps_5'] +
birthweight_2_train['omaps_6'] +
birthweight_2_train['omaps_7'] +
birthweight_2_train['omaps_8'] +
birthweight_2_train['omaps_9'] +
birthweight_2_train['omaps_10'] +
birthweight_2_train['fmaps_6'] +
birthweight_2_train['fmaps_7'] +
birthweight_2_train['fmaps_8'] +
birthweight_2_train['fmaps_9'] +
birthweight_2_train['fmaps_10'] +
birthweight_2_train['meduc_10.0'] +
birthweight_2_train['meduc_11.0'] +
birthweight_2_train['meduc_12.0'] +
birthweight_2_train['meduc_13.0'] +
birthweight_2_train['meduc_14.0'] +
birthweight_2_train['meduc_15.0'] +
birthweight_2_train['meduc_16.0'] +
birthweight_2_train['meduc_17.0'] +
birthweight_2_train['race_001010'] +
birthweight_2_train['race_001100'] +
birthweight_2_train['race_010001'] +
birthweight_2_train['race_010010'] +
birthweight_2_train['race_010100'] +
birthweight_2_train['race_100100'] +
birthweight_2_train['npvis_3.0'] +
birthweight_2_train['npvis_5.0'] +
birthweight_2_train['npvis_6.0'] +
birthweight_2_train['npvis_7.0'] +
birthweight_2_train['npvis_8.0'] +
birthweight_2_train['npvis_9.0'] +
birthweight_2_train['npvis_10.0'] +
birthweight_2_train['npvis_11.0'] +
birthweight_2_train['npvis_12.0'] +
birthweight_2_train['npvis_13.0'] +
birthweight_2_train['npvis_14.0'] +
birthweight_2_train['npvis_15.0'] +
birthweight_2_train['npvis_16.0'] +
birthweight_2_train['npvis_17.0'] +
birthweight_2_train['npvis_18.0'] +
birthweight_2_train['npvis_19.0'] +
birthweight_2_train['npvis_20.0'] +
birthweight_2_train['npvis_25.0'] +
birthweight_2_train['npvis_30.0'] +
birthweight_2_train['npvis_31.0'] +
birthweight_2_train['npvis_35.0']
""",
data = birthweight_2_train)
# Fitting Results
results = lm_significant.fit()
# Printing Summary Statistics
print(results.summary())
rsq_lm_stat_significant = results.rsquared.round(3)
#rsq_lm_stat_significant_test = lm_significant_stats.predict(X_test)
###############################################################################
# Applying the Optimal Model in scikit-learn
###############################################################################
birthweight_data = birthweight_2.loc[:,['mage',
'cigs',
'male',
'mwhte',
'mblck',
'moth',
'fwhte',
'fblck',
'foth',
'fmaps',
'omaps',
'out_omaps',
'out_fmaps',
'm_meduc',
'm_npvis',
'm_feduc',
'cEdu',
'C_wM',
'C_bM',
'C_bF',
'oth',
'cigolic',
'edu',
'out_mage',
'out_meduc',
'out_monpre',
'out_npvis',
'out_fage',
'out_feduc',
'out_cigs',
'out_bwght',
'out_drink',
'drink_4',
'drink_5',
'drink_6',
'drink_7',
'drink_8',
'drink_9',
'drink_10',
'drink_11',
'drink_12',
'drink_13',
'drink_14',
'npvis'
]]
# Preparing the target variable
birthweight_target = birthweight_2.loc[:, 'bwght']
# Same code as before
X_train, X_test, y_train, y_test = train_test_split(
birthweight_data,
birthweight_target,
test_size = 0.1,
random_state = 508)
########################
# Using KNN on the optimal model (same code as before)
########################
# Exact loop as before
training_accuracy = []
test_accuracy = []
neighbors_settings = range(1, 51)
for n_neighbors in neighbors_settings:
# build the model
clf = KNeighborsRegressor(n_neighbors = n_neighbors)
clf.fit(X_train, y_train)
# record training set accuracy
training_accuracy.append(clf.score(X_train, y_train))
# record generalization accuracy
test_accuracy.append(clf.score(X_test, y_test))
plt.plot(neighbors_settings, training_accuracy, label = "training accuracy")
plt.plot(neighbors_settings, test_accuracy, label = "test accuracy")
plt.ylabel("Accuracy")
plt.xlabel("n_neighbors")
plt.legend()
plt.show()
print(max(test_accuracy))
print("Best test accuracy at N = ",test_accuracy.index(max(test_accuracy)))
########################
# The best results occur when k = 3.
########################
# Building a model with k = 3
knn_reg = KNeighborsRegressor(algorithm = 'auto',
n_neighbors = test_accuracy.index(max(test_accuracy)))
# Fitting the model based on the training data
knn_reg_fit = knn_reg.fit(X_train, y_train)
# Scoring the model
y_score_knn_optimal = knn_reg.score(X_test, y_test)
# The score is directly comparable to R-Square
print(y_score_knn_optimal)
# Generating Predictions based on the optimal KNN model
knn_reg_optimal_pred = knn_reg_fit.predict(X_test)
# Predictions
y_pred = knn_reg.predict(X_test)
print(f"""
Test set predictions:
{y_pred.round(2)}
""")
########################
## Does OLS predict better than KNN?
########################
from sklearn.linear_model import LinearRegression
X_train, X_test, y_train, y_test = train_test_split(
birthweight_data,
birthweight_target,
test_size = 0.1,
random_state = 508)
# Prepping the Model
lr = LinearRegression(fit_intercept = False)
# Fitting the model
lr_fit = lr.fit(X_train, y_train)
# Predictions
lr_pred = lr_fit.predict(X_test)
print(f"""
Test set predictions:
{lr_pred.round(2)}
""")
# Scoring the model
y_score_ols_optimal = lr_fit.score(X_test, y_test)
# The score is directly comparable to R-Square
print("Fit score of scikit LR model: ",y_score_ols_optimal)
# Let's compare the testing score to the training score.
print('Training Score', lr.score(X_train, y_train).round(4))
print('Testing Score:', lr.score(X_test, y_test).round(4))
cv_lr_3 = cross_val_score(lr,
birthweight_data,
birthweight_target,
cv = 3)
print("Cross validation score of LR: ", (pd.np.mean(cv_lr_3)))
"""
Prof. Chase:
These values are much lower than what we saw before when we didn't create
a train/test split. However, these results are realistic given we have
a better understanding as to how well our model will predict on new data.
"""
# Printing model results
print(f"""
Optimal model KNN score: {y_score_knn_optimal.round(3)}
Optimal model OLS score: {y_score_ols_optimal.round(3)}
CrossValidation (CV 3) score: {pd.np.mean(cv_lr_3).round(3)}
R-Square LM Full: {rsq_lm_full.round(3)}
R-Square LM Signf: {rsq_lm_significant.round(3)}
R-Square LM Signf (stat): {rsq_lm_stat_significant.round(3)}
""")
quant = birthweight['mage'].quantile([0.25, 0.50, 0.75, 1])
birthweight.loc[2, 'quant'] = 0
for val in enumerate(birthweight.loc[ : , 'mage']):
print(val[1])
if val[1] <= quant.iloc[0]:
birthweight.loc[val[0], 'quant'] = 25
elif val[1] <= quant.iloc[1]:
birthweight.loc[val[0], 'quant'] = 50
elif val[1] <= quant.iloc[2]:
birthweight.loc[val[0], 'quant'] = 75
else:
birthweight.loc[val[0], 'quant'] = 100
x = birthweight.groupby('quant')['bwght'].mean()
objects = ('Q1 ' + str(quant.iloc[0]), 'Q2 ' + str(quant.iloc[1]), 'Q3 ' + str(quant.iloc[2]), 'Q1 ' + str(quant.iloc[3]))
y_pos = np.arange(len(objects))
performance = [x.iloc[0], x.iloc[1], x.iloc[2], x.iloc[3]]
plt.bar(y_pos, performance, align='center', alpha=0.5)
plt.xticks(y_pos, objects)
plt.ylabel('Birthweight')
plt.title('MOTHER AGE vs Birthweight')
plt.savefig('mage_Bwt.png')
plt.show() |
# Explicacion de condicionales
# Funcion para recibir datos desde consola
# input()
# int()
# Operadores Logicos
# ==, <, >, <=, >= // or, and
'''
# =========================================
camisa = 1
if camisa == 1: # Expresion Logica
# Sentencia o acciona ejecutar si se cumple la expresion logica
print("Tiene la camisa negra")
print("\nFin del programa")
# =========================================
camisa = 0
if camisa == 1: # Expresion Logica
# Sentencia o acciona ejecutar si se cumple la expresion logica
print("Tiene la camisa negra")
else:
print("Tiene la camisa de otro color")
print("\nFin del programa")
'''
# =========================================
# 45 - 55 años validar si se puede vacunar por la edad
edad = int(input("Por favor ingrese su edad: "))
# 2 forma //edad = int(input("Por favor ingrese su edad"))
# 1 forma // edad = int(edad)
# Validar la edad
# 3 forma // if int(edad) >= 45 and int(edad) <= 55:
if edad >= 45 and edad <= 55:
print("La persona se puede vacunar")
else:
print("La persona NO se puede vacunar aun")
|
# Generated by Django 2.2 on 2019-04-26 10:45
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('app', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='project',
name='creation_date',
),
migrations.AddField(
model_name='document',
name='created_at',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='document',
name='updated_at',
field=models.DateTimeField(auto_now=True),
),
migrations.AddField(
model_name='label',
name='created_at',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='label',
name='updated_at',
field=models.DateTimeField(auto_now=True),
),
migrations.AddField(
model_name='project',
name='created_at',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='project',
name='updated_at',
field=models.DateTimeField(auto_now=True),
),
]
|
# Volatility
# Copyright (C) 2012-13 Volatility Foundation
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License Version 2 as
# published by the Free Software Foundation. You may not use, modify or
# distribute this program under any other version of the GNU General
# Public License.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
# Notwithstanding any rights to use the Software granted by the foregoing,
# if entities or individuals have received a Cease & Desist letter from
# the Volatility Project, the Volatility Foundation, or its copyright holders
# for violating the terms of the GPL version 2, those entities (their employees,
# subcontractors, independent contractors, and affiliates) and / or persons
# are granted no such rights and any use by any one or more of them is
# expressly prohibited, in accordance with Section 4 of the GPL version 2.
# Any rights granted to such entities and / or persons by earlier license
# agreements have been previously terminated as to them.
#pylint: disable-msg=C0111
import os
import re
import math
import volatility.obj as obj
import volatility.utils as utils
import volatility.debug as debug
import volatility.win32.tasks as tasks_mod
import volatility.win32.modules as modules
import volatility.plugins.common as common
import volatility.plugins.taskmods as taskmods
import json
#--------------------------------------------------------------------------------
# Constants
#--------------------------------------------------------------------------------
PAGE_SIZE = 0x1000
PAGE_MASK = PAGE_SIZE - 1
IMAGE_EXT = "img"
DATA_EXT = "dat"
FILEOFFSET_MASK = 0xFFFFFFFFFFFF0000
VACB_BLOCK = 0x40000
VACB_ARRAY = 0x80
VACB_OFFSET_SHIFT = 18
VACB_LEVEL_SHIFT = 7
VACB_SIZE_OF_FIRST_LEVEL = 1 << (VACB_OFFSET_SHIFT + VACB_LEVEL_SHIFT)
class _CONTROL_AREA(obj.CType):
def extract_ca_file(self, unsafe = False):
""" Extracts a file from a specified CONTROL_AREA
Attempts to extract the memory resident pages pertaining to a
particular CONTROL_AREA object.
Args:
control_area: Instance of a CONTROL_AREA object
unsafe: Relax safety constraints for more data
Returns:
mdata: List of pages, (physoffset, fileoffset, size) tuples, that are memory resident
zpad: List of pages, (offset, size) tuples, that not memory resident
Raises:
"""
zpad = []
mdata = []
# Depending on the particular address space being used we need to
# determine if the MMPTE will be either 4 or 8 bytes. The x64
# and IA32_PAE both use 8 byte PTEs. Whereas, IA32 uses 4 byte
# PTE entries.
memory_model = self.obj_vm.profile.metadata.get('memory_model', '32bit')
pae = self.obj_vm.pae
if pae:
mmpte_size = self.obj_vm.profile.get_obj_size("_MMPTEPA")
else:
mmpte_size = self.obj_vm.profile.get_obj_size("_MMPTE")
# Calculate the size of the _CONTROL_AREA object. It is used to find
# the correct offset for the SUBSECTION object and the size of the
# CONTROL_AREA can differ between versions of Windows.
control_area_size = self.size()
# The segment is used to describe the physical view of the
# file. We also use this as a semantic check to see if
# the processing should continue. If the Segment address
# is invalid, then we return.
Segment = self.Segment
if not Segment.is_valid():
return mdata, zpad
# The next semantic check validates that the _SEGMENT object
# points back to the appropriate _CONTROL_AREA object. If the
# check is invalid, then we return.
if (self.obj_offset != Segment.ControlArea):
return mdata, zpad
# This is a semantic check added to make sure the Segment.SizeOfSegment value
# is consistant with the Segment.TotalNumberOfPtes. This occurs fequently
# when traversing through CONTROL_AREA Objects (~5%), often leading to
# impossible values. Thus, to be conservative we do not proceed if the
# Segment does not seem sound.
if Segment.SizeOfSegment != (Segment.TotalNumberOfPtes * PAGE_SIZE):
return mdata, zpad
# The _SUBSECTION object is typically found immediately following
# the CONTROL_AREA object. For Image Section Objects, the SUBSECTIONS
# typically correspond with the sections found in the PE. On the otherhand,
# for Data Section Objects, there is typically only a single valid SUBSECTION.
subsection_offset = self.obj_offset + control_area_size
#subsection = obj.Object("_SUBSECTION", subsection_offset, self.kaddr_space)
subsection = obj.Object("_SUBSECTION", subsection_offset, self.obj_vm)
# This was another check which was inspired by Ruud's code. It
# verifies that the first SubsectionBaase (Mmst) never starts
# at the beginning of a page. The UNSAFE option allows us to
# ignore this constraint. This was necessary for dumping file data
# for file objects found with filescan (ie $Mft)
SubsectionBase = subsection.SubsectionBase
if (SubsectionBase & PAGE_MASK == 0x0) and not unsafe:
return mdata, zpad
# We obtain the Subsections associated with this file
# by traversing the singly linked list. Ideally, this
# list should be null (0) terminated. Upon occasion we
# we have seen instances where the link pointers are
# undefined (XXX). If we hit an invalid pointer, the we
# we exit the traversal.
while subsection.is_valid() and subsection.v() != 0x0:
if not subsection:
break
# This constraint makes sure that the _SUBSECTION object
# points back to the associated CONTROL_AREA object. Otherwise,
# we exit the traversal.
if (self.obj_offset != subsection.ControlArea):
break
# Extract subsection meta-data into local variables
# this helps with performance and not having to do
# repetitive lookups.
PtesInSubsection = subsection.PtesInSubsection
SubsectionBase = subsection.SubsectionBase
NextSubsection = subsection.NextSubsection
# The offset into the file is stored implicitely
# based on the PTE's location within the Subsection.
StartingSector = subsection.StartingSector
SubsectionOffset = StartingSector * 0x200
# This was another check based on something Ruud
# had done. We also so instances where DataSectionObjects
# would hit a SubsectionBase that was paged aligned
# and hit strange data. In those instances, the
# MMPTE SubsectionAddress would not point to the associated
# Subsection. (XXX)
if (SubsectionBase & PAGE_MASK == 0x0) and not unsafe:
break
ptecount = 0
while (ptecount < PtesInSubsection):
pteoffset = SubsectionBase + (mmpte_size * ptecount)
FileOffset = SubsectionOffset + ptecount * 0x1000
# The size of MMPTE changes depending on if it is IA32 (4 bytes)
# or IA32_PAE/AMD64 (8 bytes).
objname = "_MMPTE"
if pae:
objname = "_MMPTEPA"
mmpte = obj.Object(objname, offset = pteoffset, vm = \
subsection.obj_vm)
if not mmpte:
ptecount += 1
continue
# First we check if the entry is valid. If the entry is valid
# then we get the physical offset. The valid entries are actually
# handled by the hardware.
if mmpte.u.Hard.Valid == 0x1:
# There are some valid Page Table entries where bit 63
# is used to specify if the page is executable. This is
# maintained by the processor. If it is not executable,
# then the bit is set. Within the Intel documentation,
# this is known as the Execute-disable (XD) flag. Regardless,
# we will use the get_phys_addr method from the address space
# to obtain the physical address.
### Should we check the size of the PAGE? Haven't seen
# a hit for LargePage.
#if mmpte.u.Hard.LargePage == 0x1:
# print "LargePage"
physoffset = mmpte.u.Hard.PageFrameNumber << 12
mdata.append([physoffset, FileOffset, PAGE_SIZE])
ptecount += 1
continue
elif mmpte.u.Soft.Prototype == 0x1:
# If the entry is not a valid physical address then
# we check if it contains a pointer back to the SUBSECTION
# object. If so, the page is in the backing file and we will
# need to pad to maintain spacial integrity of the file. This
# check needs to be performed for looking for the transition flag.
# The prototype PTEs are initialized as MMPTE_SUBSECTION with the
# SubsectionAddress.
# On x86 systems that use 4 byte MMPTE , the MMPTE_SUBSECTION
# stores an "encoded" version of the SUBSECTION object address.
# The data is relative to global variable (MmSubsectionBase or
# MmNonPagedPoolEnd) depending on the WhichPool member of
# _SUBSECTION. This applies to x86 systems running ntoskrnl.exe.
# If bit 10 is set then it is prototype/subsection
if (memory_model == "32bit") and not pae:
SubsectionOffset = \
((mmpte.u.Subsect.SubsectionAddressHigh << 7) |
(mmpte.u.Subsect.SubsectionAddressLow << 3))
#WhichPool = mmpte.u.Subsect.WhichPool
#print "mmpte 0x%x ptecount 0x%x sub-32 0x%x pteoffset 0x%x which 0x%x subdelta 0x%x"%(mmpte.u.Long,ptecount,subsection_offset,pteoffset,WhichPool,SubsectionOffset)
zpad.append([FileOffset, PAGE_SIZE])
ptecount += 1
continue
if memory_model == "64bit" or pae:
SubsectionAddress = mmpte.u.Subsect.SubsectionAddress
else:
SubsectionAddress = mmpte.u.Long
if SubsectionAddress == subsection.obj_offset:
# sub proto/prot 4c0 420
#print "mmpte 0x%x ptecount 0x%x sub 0x%x offset 0x%x"%(mmpte.u.Long,ptecount,SubsectionAddress,pteoffset)
zpad.append([FileOffset, PAGE_SIZE])
ptecount += 1
continue
elif (SubsectionAddress == (subsection.obj_offset + 4)):
# This was a special case seen on IA32_PAE systems where
# the SubsectionAddress pointed to subsection.obj_offset+4
# (0x420, 0x460, 0x4a0)
#print "mmpte 0x%x ptecount 0x%x sub+4 0x%x offset 0x%x"%(mmpte.u.Long,ptecount,SubsectionAddress,pteoffset)
zpad.append([FileOffset, PAGE_SIZE])
ptecount += 1
continue
else:
#print "mmpte 0x%x ptecount 0x%x sub_unk 0x%x offset 0x%x suboffset 0x%x"%(mmpte.u.Long,ptecount,SubsectionAddress,pteoffset,subsection.obj_offset)
zpad.append([FileOffset, PAGE_SIZE])
ptecount += 1
continue
# Check if the entry is a DemandZero entry.
elif (mmpte.u.Soft.Transition == 0x0):
if ((mmpte.u.Soft.PageFileLow == 0x0) and
(mmpte.u.Soft.PageFileHigh == 0x0)):
# Example entries include: a0,e0
#print "mmpte 0x%x ptecount 0x%x zero offset 0x%x subsec 0x%x"%(mmpte.u.Long,ptecount,pteoffset,subsection.obj_offset)
zpad.append([FileOffset, PAGE_SIZE])
ptecount += 1
else:
#print "mmpte 0x%x ptecount 0x%x paged offset 0x%x subsec 0x%x file 0x%x offset 0x%x"%(mmpte.u.Long,ptecount,pteoffset,subsection.obj_offset,mmpte.u.Soft.PageFileLow,mmpte.u.Soft.PageFileHigh)
zpad.append([FileOffset, PAGE_SIZE])
ptecount += 1
# If the entry is not a valid physical address then
# we also check to see if it is in transition.
elif mmpte.u.Trans.Transition == 0x1:
physoffset = mmpte.u.Trans.PageFrameNumber << 12
#print "mmpte 0x%x ptecount 0x%x transition 0x%x offset 0x%x"%(mmpte.u.Long,ptecount,physoffset,pteoffset)
mdata.append([physoffset, FileOffset, PAGE_SIZE])
ptecount += 1
continue
else:
# This is a catch all for all the other entry types.
# sub proto/pro 420,4e0,460,4a0 (x64 +0x28)(x32 +4)
# other a0,e0,0, (20,60)
# 0x80000000
#print "mmpte 0x%x ptecount 0x%x other offset 0x%x subsec 0x%x"%(mmpte.u.Long,ptecount,pteoffset,subsection.obj_offset)
zpad.append([FileOffset, PAGE_SIZE])
ptecount += 1
# Traverse the singly linked list to its next member.
subsection = NextSubsection
return (mdata, zpad)
class _SHARED_CACHE_MAP(obj.CType):
def is_valid(self):
if not obj.CType.is_valid(self):
return False
# Added a semantic check to make sure the data is in a sound state. It's better
# to catch it early.
FileSize = self.FileSize.QuadPart
ValidDataLength = self.ValidDataLength.QuadPart
SectionSize = self.SectionSize.QuadPart
#print "SectionSize 0x%x < 0 or FileSize < 0x%x ValidDataLength 0x%x"%(SectionSize,FileSize,ValidDataLength)
#if SectionSize < 0 or (FileSize < ValidDataLength):
if SectionSize < 0 or ((FileSize < ValidDataLength) and (ValidDataLength != 0x7fffffffffffffff)):
return False
return True
def process_index_array(self, array_pointer, level, limit, vacbary = None):
""" Recursively process the sparse multilevel VACB index array
Args:
array_pointer: The address of a possible index array
shared_cache_map: The associated SHARED_CACHE_MAP object
level: The current level
limit: The level where we abandon all hope. Ideally this is 7
vacbary: An array of collected VACBs
Returns:
vacbary: Collected VACBs
"""
if vacbary is None:
vacbary = []
if level > limit:
return []
# Create an array of VACB entries
VacbArray = obj.Object("Array", offset = array_pointer, \
vm = self.obj_vm, count = VACB_ARRAY, \
targetType = "address", parent = self)
# Iterate through the entries
for _i in range(0, VACB_ARRAY):
# Check if the VACB entry is in use
if VacbArray[_i] == 0x0:
continue
Vacbs = obj.Object("_VACB", offset = int(VacbArray[_i]), vm = self.obj_vm)
# Check if this is a valid VACB entry by verifying
# the SharedCacheMap member.
if Vacbs.SharedCacheMap == self.obj_offset:
# This is a VACB associated with this cache map
vacbinfo = self.extract_vacb(Vacbs, VACB_BLOCK)
if vacbinfo:
vacbary.append(vacbinfo)
else:
#Process the next level of the multi-level array
vacbary = self.process_index_array(VacbArray[_i], level + 1, limit, vacbary)
#vacbary = vacbary + _vacbary
return vacbary
def extract_vacb(self, vacbs, size):
""" Extracts data from a specified VACB
Attempts to extract the memory resident data from a specified
VACB.
Args:
vacbs: The VACB object
size: How much data should be read from the VACB
shared_cache_map: The associated SHARED_CACHE_MAP object
Returns:
vacbinfo: Extracted VACB meta-information
"""
# This is used to collect summary information. We will eventually leverage this
# when creating the externally exposed APIs.
vacbinfo = {}
# Check if the Overlay member of _VACB is resident
# The Overlay member stores information about the FileOffset
# and the ActiveCount. This is just another proactive check
# to make sure the objects are seemingly sound.
if not vacbs.Overlay:
return vacbinfo
# We should add another check to make sure that
# the SharedCacheMap member of the VACB points back
# to the corresponding SHARED_CACHE_MAP
if vacbs.SharedCacheMap != self.v():
return vacbinfo
# The FileOffset member of VACB is used to denote the
# offset within the file where the view begins. Since all
# views are 256 KB in size, the bottom 16 bits are used to
# store the number of references to the view.
FileOffset = vacbs.Overlay.FileOffset.QuadPart
if not FileOffset:
return vacbinfo
ActiveCount = vacbs.Overlay.ActiveCount
FileOffset = FileOffset & FILEOFFSET_MASK
BaseAddress = vacbs.BaseAddress.v()
vacbinfo['foffset'] = int(FileOffset)
vacbinfo['acount'] = int(ActiveCount)
vacbinfo['voffset'] = int(vacbs.obj_offset)
vacbinfo['baseaddr'] = int(BaseAddress)
vacbinfo['size'] = int(size)
return vacbinfo
def extract_scm_file(self):
""" Extracts a file from a specified _SHARED_CACHE_MAP
Attempts to extract the memory resident pages pertaining to a
particular _SHARED_CACHE_MAP object.
Args:
shared_cache_map: Instance of a _SHARED_CACHE_MAP object
Returns:
vacbary: List of collected VACB meta information.
Raises:
"""
vacbary = []
if self.obj_offset == 0x0:
return
# Added a semantic check to make sure the data is in a sound state.
#FileSize = shared_cache_map.FileSize.QuadPart
#ValidDataLength = shared_cache_map.ValidDataLength.QuadPart
SectionSize = self.SectionSize.QuadPart
# Let's begin by determining the number of Virtual Address Control
# Blocks (VACB) that are stored within the cache (nonpaged). A VACB
# represents one 256-KB view in the system cache. There a are a couple
# options to use for the data size: ValidDataLength, FileSize,
# and SectionSize.
full_blocks = SectionSize / VACB_BLOCK
left_over = SectionSize % VACB_BLOCK
# As an optimization, the shared cache map object contains a VACB index
# array of four entries. The VACB index arrays are arrays of pointers
# to VACBs, that track which views of a given file are mapped in the cache.
# For example, the first entry in the VACB index array refers to the first
# 256 KB of the file. The InitialVacbs can describe a file up to 1 MB (4xVACB).
iterval = 0
while (iterval < full_blocks) and (full_blocks <= 4):
Vacbs = self.InitialVacbs[iterval]
vacbinfo = self.extract_vacb(Vacbs, VACB_BLOCK)
if vacbinfo: vacbary.append(vacbinfo)
iterval += 1
# We also have to account for the spill over data
# that is not found in the full blocks. The first case to
# consider is when the spill over is still in InitialVacbs.
if (left_over > 0) and (full_blocks < 4):
Vacbs = self.InitialVacbs[iterval]
vacbinfo = self.extract_vacb(Vacbs, left_over)
if vacbinfo: vacbary.append(vacbinfo)
# If the file is larger than 1 MB, a seperate VACB index array
# needs to be allocated. This is based on how many 256 KB blocks
# would be required for the size of the file. This newly allocated
# VACB index array is found through the Vacbs member of
# SHARED_CACHE_MAP.
Vacbs = self.Vacbs
if not Vacbs or (Vacbs.v() == 0):
return vacbary
# There are a number of instances where the initial value in
# InitialVacb will also be the fist entry in Vacbs. Thus we
# ignore, since it was already processed. It is possible to just
# process again as the file offset is specified for each VACB.
if self.InitialVacbs[0].obj_offset == Vacbs.v():
return vacbary
# If the file is less than 32 MB than it can be found in
# a single level VACB index array.
size_of_pointer = self.obj_vm.profile.get_obj_size("address")
if not SectionSize > VACB_SIZE_OF_FIRST_LEVEL:
ArrayHead = Vacbs.v()
_i = 0
for _i in range(0, full_blocks):
vacb_addr = ArrayHead + (_i * size_of_pointer)
vacb_entry = obj.Object("address", offset = vacb_addr, vm = Vacbs.obj_vm)
# If we find a zero entry, then we proceed to the next one.
# If the entry is zero, then the view is not mapped and we
# skip. We do not pad because we use the FileOffset to seek
# to the correct offset in the file.
if not vacb_entry or (vacb_entry.v() == 0x0):
continue
Vacb = obj.Object("_VACB", offset = vacb_entry.v(), vm = self.obj_vm)
vacbinfo = self.extract_vacb(Vacb, VACB_BLOCK)
if vacbinfo:
vacbary.append(vacbinfo)
if left_over > 0:
vacb_addr = ArrayHead + ((_i + 1) * size_of_pointer)
vacb_entry = obj.Object("address", offset = vacb_addr, vm = Vacbs.obj_vm)
if not vacb_entry or (vacb_entry.v() == 0x0):
return vacbary
Vacb = obj.Object("_VACB", offset = vacb_entry.v(), vm = self.obj_vm)
vacbinfo = self.extract_vacb(Vacb, left_over)
if vacbinfo:
vacbary.append(vacbinfo)
# The file is less than 32 MB, so we can
# stop processing.
return vacbary
# If we get to this point, then we know that the SectionSize is greator than
# VACB_SIZE_OF_FIRST_LEVEL (32 MB). Then we have a "sparse multilevel index
# array where each VACB index array is made up of 128 entries. We no
# longer assume the data is sequential. (Log2 (32 MB) - 18)/7
#tree_depth = math.ceil((math.ceil(math.log(file_size, 2)) - 18)/7)
level_depth = math.ceil(math.log(SectionSize, 2))
level_depth = (level_depth - VACB_OFFSET_SHIFT) / VACB_LEVEL_SHIFT
level_depth = math.ceil(level_depth)
limit_depth = level_depth
if SectionSize > VACB_SIZE_OF_FIRST_LEVEL:
# Create an array of 128 entries for the VACB index array
VacbArray = obj.Object("Array", offset = Vacbs.v(), \
vm = self.obj_vm, count = VACB_ARRAY, \
targetType = "address", parent = self)
# We use a bit of a brute force method. We walk the
# array and if any entry points to the shared cache map
# object then we extract it. Otherwise, if it is non-zero
# we attempt to traverse to the next level.
for _i in range(0, VACB_ARRAY):
if VacbArray[_i] == 0x0:
continue
Vacb = obj.Object("_VACB", offset = int(VacbArray[_i]), vm = self.obj_vm)
if Vacb.SharedCacheMap == self.obj_offset:
vacbinfo = self.extract_vacb(Vacb, VACB_BLOCK)
if vacbinfo:
vacbary.append(vacbinfo)
else:
# The Index is a pointer
#Process the next level of the multi-level array
# We set the limit_depth to be the depth of the tree
# as determined from the size and we initialize the
# current level to 2.
vacbary = self.process_index_array(VacbArray[_i], 2, limit_depth, vacbary)
#vacbary = vacbary + _vacbary
return vacbary
class ControlAreaModification(obj.ProfileModification):
conditions = {'os': lambda x: x == 'windows'}
def modification(self, profile):
profile.object_classes.update({
'_CONTROL_AREA': _CONTROL_AREA,
'_SHARED_CACHE_MAP': _SHARED_CACHE_MAP,
})
#--------------------------------------------------------------------------------
# VTypes
#--------------------------------------------------------------------------------
# Windows x86 symbols for ntkrnlpa
ntkrnlpa_types_x86 = {
'__ntkrnlpa' : [ 0x8, {
'Long' : [ 0x0, ['unsigned long long']],
'VolatileLong' : [ 0x0, ['unsigned long long']],
'Hard' : [ 0x0, ['_MMPTE_HARDWARE_64']],
'Flush' : [ 0x0, ['_HARDWARE_PTE']],
'Proto' : [ 0x0, ['_MMPTE_PROTOTYPE']],
'Soft' : [ 0x0, ['_MMPTE_SOFTWARE_64']],
'TimeStamp' : [ 0x0, ['_MMPTE_TIMESTAMP']],
'Trans' : [ 0x0, ['_MMPTE_TRANSITION_64']],
'Subsect' : [ 0x0, ['_MMPTE_SUBSECTION_64']],
'List' : [ 0x0, ['_MMPTE_LIST']],
} ],
'_MMPTEPA' : [ 0x8, {
'u' : [ 0x0, ['__ntkrnlpa']],
} ],
'_MMPTE_SUBSECTION_64' : [ 0x8, {
'Valid' : [ 0x0, ['BitField', dict(start_bit = 0, end_bit = 1, native_type = 'unsigned long long')]],
'Unused0' : [ 0x0, ['BitField', dict(start_bit = 1, end_bit = 5, native_type = 'unsigned long long')]],
'Protection' : [ 0x0, ['BitField', dict(start_bit = 5, end_bit = 10, native_type = 'unsigned long long')]],
'Prototype' : [ 0x0, ['BitField', dict(start_bit = 10, end_bit = 11, native_type = 'unsigned long long')]],
'Unused1' : [ 0x0, ['BitField', dict(start_bit = 11, end_bit = 32, native_type = 'unsigned long long')]],
'SubsectionAddress' : [ 0x0, ['BitField', dict(start_bit = 32, end_bit = 64, native_type = 'long long')]],
} ],
'_MMPTE_TRANSITION_64' : [ 0x8, {
'Valid' : [ 0x0, ['BitField', dict(start_bit = 0, end_bit = 1, native_type = 'unsigned long long')]],
'Write' : [ 0x0, ['BitField', dict(start_bit = 1, end_bit = 2, native_type = 'unsigned long long')]],
'Owner' : [ 0x0, ['BitField', dict(start_bit = 2, end_bit = 3, native_type = 'unsigned long long')]],
'WriteThrough' : [ 0x0, ['BitField', dict(start_bit = 3, end_bit = 4, native_type = 'unsigned long long')]],
'CacheDisable' : [ 0x0, ['BitField', dict(start_bit = 4, end_bit = 5, native_type = 'unsigned long long')]],
'Protection' : [ 0x0, ['BitField', dict(start_bit = 5, end_bit = 10, native_type = 'unsigned long long')]],
'Prototype' : [ 0x0, ['BitField', dict(start_bit = 10, end_bit = 11, native_type = 'unsigned long long')]],
'Transition' : [ 0x0, ['BitField', dict(start_bit = 11, end_bit = 12, native_type = 'unsigned long long')]],
'PageFrameNumber' : [ 0x0, ['BitField', dict(start_bit = 12, end_bit = 48, native_type = 'unsigned long long')]],
'Unused' : [ 0x0, ['BitField', dict(start_bit = 48, end_bit = 64, native_type = 'unsigned long long')]],
}],
'_MMPTE_HARDWARE_64' : [ 0x8, {
'Valid' : [ 0x0, ['BitField', dict(start_bit = 0, end_bit = 1, native_type = 'unsigned long long')]],
'Dirty1' : [ 0x0, ['BitField', dict(start_bit = 1, end_bit = 2, native_type = 'unsigned long long')]],
'Owner' : [ 0x0, ['BitField', dict(start_bit = 2, end_bit = 3, native_type = 'unsigned long long')]],
'WriteThrough' : [ 0x0, ['BitField', dict(start_bit = 3, end_bit = 4, native_type = 'unsigned long long')]],
'CacheDisable' : [ 0x0, ['BitField', dict(start_bit = 4, end_bit = 5, native_type = 'unsigned long long')]],
'Accessed' : [ 0x0, ['BitField', dict(start_bit = 5, end_bit = 6, native_type = 'unsigned long long')]],
'Dirty' : [ 0x0, ['BitField', dict(start_bit = 6, end_bit = 7, native_type = 'unsigned long long')]],
'LargePage' : [ 0x0, ['BitField', dict(start_bit = 7, end_bit = 8, native_type = 'unsigned long long')]],
'Global' : [ 0x0, ['BitField', dict(start_bit = 8, end_bit = 9, native_type = 'unsigned long long')]],
'CopyOnWrite' : [ 0x0, ['BitField', dict(start_bit = 9, end_bit = 10, native_type = 'unsigned long long')]],
'Unused' : [ 0x0, ['BitField', dict(start_bit = 10, end_bit = 11, native_type = 'unsigned long long')]],
'Write' : [ 0x0, ['BitField', dict(start_bit = 11, end_bit = 12, native_type = 'unsigned long long')]],
'PageFrameNumber' : [ 0x0, ['BitField', dict(start_bit = 12, end_bit = 48, native_type = 'unsigned long long')]],
'reserved1' : [ 0x0, ['BitField', dict(start_bit = 48, end_bit = 52, native_type = 'unsigned long long')]],
'SoftwareWsIndex' : [ 0x0, ['BitField', dict(start_bit = 52, end_bit = 63, native_type = 'unsigned long long')]],
'NoExecute' : [ 0x0, ['BitField', dict(start_bit = 63, end_bit = 64, native_type = 'unsigned long long')]],
} ],
'_MMPTE_SOFTWARE_64' : [ 0x8, {
'Valid' : [ 0x0, ['BitField', dict(start_bit = 0, end_bit = 1, native_type = 'unsigned long long')]],
'PageFileLow' : [ 0x0, ['BitField', dict(start_bit = 1, end_bit = 5, native_type = 'unsigned long long')]],
'Protection' : [ 0x0, ['BitField', dict(start_bit = 5, end_bit = 10, native_type = 'unsigned long long')]],
'Prototype' : [ 0x0, ['BitField', dict(start_bit = 10, end_bit = 11, native_type = 'unsigned long long')]],
'Transition' : [ 0x0, ['BitField', dict(start_bit = 11, end_bit = 12, native_type = 'unsigned long long')]],
'UsedPageTableEntries' : [ 0x0, ['BitField', dict(start_bit = 12, end_bit = 22, native_type = 'unsigned long long')]],
'InStore' : [ 0x0, ['BitField', dict(start_bit = 22, end_bit = 23, native_type = 'unsigned long long')]],
'Reserved' : [ 0x0, ['BitField', dict(start_bit = 23, end_bit = 32, native_type = 'unsigned long long')]],
'PageFileHigh' : [ 0x0, ['BitField', dict(start_bit = 32, end_bit = 64, native_type = 'unsigned long long')]],
} ],
}
class DumpFilesVTypesx86(obj.ProfileModification):
"""This modification applies the vtypes for all
versions of 32bit Windows."""
before = ['WindowsObjectClasses']
conditions = {'os': lambda x: x == 'windows',
'memory_model': lambda x : x == '32bit'}
def modification(self, profile):
profile.vtypes.update(ntkrnlpa_types_x86)
class DumpFiles(common.AbstractWindowsCommand):
"""Extract memory mapped and cached files"""
def __init__(self, config, *args, **kwargs):
common.AbstractWindowsCommand.__init__(self, config, *args, **kwargs)
self.kaddr_space = None
self.filters = []
config.add_option('REGEX', short_option = 'r',
help = 'Dump files matching REGEX',
action = 'store', type = 'string')
config.add_option('IGNORE-CASE', short_option = 'i',
help = 'Ignore case in pattern match',
action = 'store_true', default = False)
config.add_option('OFFSET', short_option = 'o', default = None,
help = 'Dump files for Process with physical address OFFSET',
action = 'store', type = 'int')
config.add_option('PHYSOFFSET', short_option = 'Q', default = None,
help = 'Dump File Object at physical address PHYSOFFSET',
action = 'store', type = 'int')
config.add_option('DUMP-DIR', short_option = 'D', default = None,
cache_invalidator = False,
help = 'Directory in which to dump extracted files')
config.add_option('SUMMARY-FILE', short_option = 'S', default = None,
cache_invalidator = False,
help = 'File where to store summary information')
config.add_option('PID', short_option = 'p', default = None,
help = 'Operate on these Process IDs (comma-separated)',
action = 'store', type = 'str')
config.add_option('NAME', short_option = 'n',
help = 'Include extracted filename in output file path',
action = 'store_true', default = False)
config.add_option('UNSAFE', short_option = 'u',
help = 'Relax safety constraints for more data',
action = 'store_true', default = False)
# Possible filters include:
# SharedCacheMap,DataSectionObject,ImageSectionObject,HandleTable,VAD
config.add_option("FILTER", short_option = 'F', default = None,
help = 'Filters to apply (comma-separated)')
def filter_tasks(self, tasks):
""" Reduce the tasks based on the user selectable PIDS parameter.
Returns a reduced list or the full list if config.PIDS not specified.
"""
if self._config.PID is None:
return tasks
try:
pidlist = [int(p) for p in self._config.PID.split(',')]
except ValueError:
debug.error("Invalid PID {0}".format(self._config.PID))
return [t for t in tasks if t.UniqueProcessId in pidlist]
def audited_read_bytes(self, vm, vaddr, length, pad):
""" This function provides an audited zread capability
It performs a similar function to zread, in that it will
pad "invalid" pages. The main difference is that it allows
us to collect auditing information about which pages were actually
present and which ones were padded.
Args:
vm: The address space to read the data from.
vaddr: The virtual address to start reading the data from.
length: How many bytes to read
pad: This argument controls if the unavailable bytes are padded.
Returns:
ret: Data that was read
mdata: List of pages that are memory resident
zpad: List of pages that not memory resident
Raises:
"""
zpad = []
mdata = []
vaddr, length = int(vaddr), int(length)
ret = ''
while length > 0:
chunk_len = min(length, PAGE_SIZE - (vaddr % PAGE_SIZE))
buf = vm.read(vaddr, chunk_len)
if vm.vtop(vaddr) is None:
zpad.append([vaddr, chunk_len])
if pad:
buf = '\x00' * chunk_len
else:
buf = ''
else:
mdata.append([vaddr, chunk_len])
ret += buf
vaddr += chunk_len
length -= chunk_len
return ret, mdata, zpad
def calculate(self):
""" Finds all the requested FILE_OBJECTS
Traverses the VAD and HandleTable to find all requested
FILE_OBJECTS
"""
# Initialize containers for collecting artifacts.
control_area_list = []
shared_maps = []
procfiles = []
# These lists are used for object collecting files from
# both the VAD and handle tables
vadfiles = []
handlefiles = []
# Determine which filters the user wants to see
self.filters = []
if self._config.FILTER:
self.filters = self._config.FILTER.split(',')
# Instantiate the kernel address space
self.kaddr_space = utils.load_as(self._config)
# Check to see if the physical address offset was passed for a
# particular process. Otherwise, use the whole task list.
if self._config.OFFSET != None:
tasks_list = [taskmods.DllList.virtual_process_from_physical_offset(
self.kaddr_space, self._config.OFFSET)]
else:
# Filter for the specified processes
tasks_list = self.filter_tasks(tasks_mod.pslist(self.kaddr_space))
# If a regex is specified, build it.
if self._config.REGEX:
try:
if self._config.IGNORE_CASE:
file_re = re.compile(self._config.REGEX, re.I)
else:
file_re = re.compile(self._config.REGEX)
except re.error, e:
debug.error('Error parsing regular expression: {0:s}'.format(e))
# Check to see if a specific physical address was specified for a
# FILE_OBJECT. In particular, this is useful for FILE_OBJECTS that
# are found with filescan that are not associated with a process
# For example, $Mft.
if self._config.PHYSOFFSET:
file_obj = obj.Object("_FILE_OBJECT", self._config.PHYSOFFSET, self.kaddr_space.base, native_vm = self.kaddr_space)
procfiles.append((None, [file_obj]))
#return
# Iterate through the process list and collect all references to
# FILE_OBJECTS from both the VAD and HandleTable. Each open handle to a file
# has a corresponding FILE_OBJECT.
if not self._config.PHYSOFFSET:
for task in tasks_list:
pid = task.UniqueProcessId
# Extract FILE_OBJECTS from the VAD
if not self.filters or "VAD" in self.filters:
for vad in task.VadRoot.traverse():
if vad != None:
try:
control_area = vad.ControlArea
if not control_area:
continue
file_object = vad.FileObject
if file_object:
vadfiles.append(file_object)
except AttributeError:
pass
if not self.filters or "HandleTable" in self.filters:
# Extract the FILE_OBJECTS from the handle table
if task.ObjectTable.HandleTableList:
for handle in task.ObjectTable.handles():
otype = handle.get_object_type()
if otype == "File":
file_obj = handle.dereference_as("_FILE_OBJECT")
handlefiles.append(file_obj)
# Append the lists of file objects
#allfiles = handlefiles + vadfiles
procfiles.append((pid, handlefiles + vadfiles))
for pid, allfiles in procfiles:
for file_obj in allfiles:
if not self._config.PHYSOFFSET:
offset = file_obj.obj_offset
else:
offset = self._config.PHYSOFFSET
name = None
if file_obj.FileName:
name = str(file_obj.file_name_with_device())
# Filter for specific FILE_OBJECTS based on user defined
# regular expression.
if self._config.REGEX:
if not name:
continue
if not file_re.search(name):
continue
# The SECTION_OBJECT_POINTERS structure is used by the memory
# manager and cache manager to store file-mapping and cache information
# for a particular file stream. We will use it to determine what type
# of FILE_OBJECT we have and how it should be parsed.
if file_obj.SectionObjectPointer:
DataSectionObject = \
file_obj.SectionObjectPointer.DataSectionObject
SharedCacheMap = \
file_obj.SectionObjectPointer.SharedCacheMap
ImageSectionObject = \
file_obj.SectionObjectPointer.ImageSectionObject
# The ImageSectionObject is used to track state information for
# an executable file stream. We will use it to extract memory
# mapped binaries.
if not self.filters or "ImageSectionObject" in self.filters:
if ImageSectionObject and ImageSectionObject != 0:
summaryinfo = {}
# It points to a image section object( CONTROL_AREA )
control_area = \
ImageSectionObject.dereference_as('_CONTROL_AREA')
if not control_area in control_area_list:
control_area_list.append(control_area)
# The format of the filenames: file.<pid>.<control_area>.[img|dat]
ca_offset_string = "0x{0:x}".format(control_area.obj_offset)
if self._config.NAME and name != None:
fname = name.split("\\")
ca_offset_string += "." + fname[-1]
file_string = ".".join(["file", str(pid), ca_offset_string, IMAGE_EXT])
of_path = os.path.join(self._config.DUMP_DIR, file_string)
(mdata, zpad) = control_area.extract_ca_file(self._config.UNSAFE)
summaryinfo['name'] = name
summaryinfo['type'] = "ImageSectionObject"
if pid:
summaryinfo['pid'] = int(pid)
else:
summaryinfo['pid'] = None
summaryinfo['present'] = mdata
summaryinfo['pad'] = zpad
summaryinfo['fobj'] = int(offset)
summaryinfo['ofpath'] = of_path
yield summaryinfo
# The DataSectionObject is used to track state information for
# a data file stream. We will use it to extract artifacts of
# memory mapped data files.
if not self.filters or "DataSectionObject" in self.filters:
if DataSectionObject and DataSectionObject != 0:
summaryinfo = {}
# It points to a data section object (CONTROL_AREA)
control_area = DataSectionObject.dereference_as('_CONTROL_AREA')
if not control_area in control_area_list:
control_area_list.append(control_area)
# The format of the filenames: file.<pid>.<control_area>.[img|dat]
ca_offset_string = "0x{0:x}".format(control_area.obj_offset)
if self._config.NAME and name != None:
fname = name.split("\\")
ca_offset_string += "." + fname[-1]
file_string = ".".join(["file", str(pid), ca_offset_string, DATA_EXT])
of_path = os.path.join(self._config.DUMP_DIR, file_string)
(mdata, zpad) = control_area.extract_ca_file(self._config.UNSAFE)
summaryinfo['name'] = name
summaryinfo['type'] = "DataSectionObject"
if pid:
summaryinfo['pid'] = int(pid)
else:
summaryinfo['pid'] = None
summaryinfo['present'] = mdata
summaryinfo['pad'] = zpad
summaryinfo['fobj'] = int(offset)
summaryinfo['ofpath'] = of_path
yield summaryinfo
# The SharedCacheMap is used to track views that are mapped to the
# data file stream. Each cached file has a single SHARED_CACHE_MAP object,
# which has pointers to slots in the system cache which contain views of the file.
# The shared cache map is used to describe the state of the cached file.
if self.filters and "SharedCacheMap" not in self.filters:
continue
if SharedCacheMap:
vacbary = []
summaryinfo = {}
#The SharedCacheMap member points to a SHARED_CACHE_MAP object.
shared_cache_map = SharedCacheMap.dereference_as('_SHARED_CACHE_MAP')
if shared_cache_map.obj_offset == 0x0:
continue
# Added a semantic check to make sure the data is in a sound state. It's better
# to catch it early.
if not shared_cache_map.is_valid():
continue
if not shared_cache_map.obj_offset in shared_maps:
shared_maps.append(shared_cache_map.obj_offset)
else:
continue
shared_cache_map_string = ".0x{0:x}".format(shared_cache_map.obj_offset)
if self._config.NAME and name != None:
fname = name.split("\\")
shared_cache_map_string = shared_cache_map_string + "." + fname[-1]
of_path = os.path.join(self._config.DUMP_DIR, "file." + str(pid) + shared_cache_map_string + ".vacb")
vacbary = shared_cache_map.extract_scm_file()
summaryinfo['name'] = name
summaryinfo['type'] = "SharedCacheMap"
if pid:
summaryinfo['pid'] = int(pid)
else:
summaryinfo['pid'] = None
summaryinfo['fobj'] = int(offset)
summaryinfo['ofpath'] = of_path
summaryinfo['vacbary'] = vacbary
yield summaryinfo
def render_text(self, outfd, data):
"""Renders output for the dumpfiles plugin.
This includes extracting the file artifacts from memory
to the specified dump directory.
Args:
outfd: The file descriptor to write the text to.
data: (summaryinfo)
"""
# Summary file object
summaryfo = None
summaryinfo = data
if self._config.DUMP_DIR == None:
debug.error("Please specify a dump directory (--dump-dir)")
if not os.path.isdir(self._config.DUMP_DIR):
debug.error(self._config.DUMP_DIR + " is not a directory")
if self._config.SUMMARY_FILE:
summaryfo = open(self._config.SUMMARY_FILE, 'wb')
for summaryinfo in data:
if summaryinfo['type'] == "DataSectionObject":
outfd.write("DataSectionObject {0:#010x} {1:<6} {2}\n".format(summaryinfo['fobj'], summaryinfo['pid'], summaryinfo['name']))
if len(summaryinfo['present']) == 0:
continue
of = open(summaryinfo['ofpath'], 'wb')
for mdata in summaryinfo['present']:
rdata = None
if not mdata[0]:
continue
try:
rdata = self.kaddr_space.base.read(mdata[0], mdata[2])
except (IOError, OverflowError):
debug.debug("IOError: Pid: {0} File: {1} PhysAddr: {2} Size: {3}".format(summaryinfo['pid'], summaryinfo['name'], mdata[0], mdata[2]))
if not rdata:
continue
of.seek(mdata[1])
of.write(rdata)
continue
# XXX Verify FileOffsets
#for zpad in summaryinfo['pad']:
# of.seek(zpad[0])
# of.write("\0" * zpad[1])
if self._config.SUMMARY_FILE:
json.dump(summaryinfo, summaryfo)
of.close()
elif summaryinfo['type'] == "ImageSectionObject":
outfd.write("ImageSectionObject {0:#010x} {1:<6} {2}\n".format(summaryinfo['fobj'], summaryinfo['pid'], summaryinfo['name']))
if len(summaryinfo['present']) == 0:
continue
of = open(summaryinfo['ofpath'], 'wb')
for mdata in summaryinfo['present']:
rdata = None
if not mdata[0]:
continue
try:
rdata = self.kaddr_space.base.read(mdata[0], mdata[2])
except (IOError, OverflowError):
debug.debug("IOError: Pid: {0} File: {1} PhysAddr: {2} Size: {3}".format(summaryinfo['pid'], summaryinfo['name'], mdata[0], mdata[2]))
if not rdata:
continue
of.seek(mdata[1])
of.write(rdata)
continue
# XXX Verify FileOffsets
#for zpad in summaryinfo['pad']:
# print "ZPAD 0x%x"%(zpad[0])
# of.seek(zpad[0])
# of.write("\0" * zpad[1])
if self._config.SUMMARY_FILE:
json.dump(summaryinfo, summaryfo)
of.close()
elif summaryinfo['type'] == "SharedCacheMap":
outfd.write("SharedCacheMap {0:#010x} {1:<6} {2}\n".format(summaryinfo['fobj'], summaryinfo['pid'], summaryinfo['name']))
of = open(summaryinfo['ofpath'], 'wb')
for vacb in summaryinfo['vacbary']:
if not vacb:
continue
(rdata, mdata, zpad) = self.audited_read_bytes(self.kaddr_space, vacb['baseaddr'], vacb['size'], True)
### We need to update the mdata,zpad
if rdata:
try:
of.seek(vacb['foffset'])
of.write(rdata)
except IOError:
# TODO: Handle things like write errors (not enough disk space, etc)
continue
vacb['present'] = mdata
vacb['pad'] = zpad
if self._config.SUMMARY_FILE:
json.dump(summaryinfo, summaryfo)
of.close()
else:
return
if self._config.SUMMARY_FILE:
summaryfo.close()
|
import shutil
import subprocess
import tempfile
from urllib.parse import urlparse, urlunparse
from CommonServerPython import *
''' GLOBALS '''
HOSTNAME = ''
USERNAME = ''
PORT = ''
SSH_EXTRA_PARAMS = ''
SCP_EXTRA_PARAMS = ''
DOCUMENT_ROOT = ''
CERTIFICATE_FILE = tempfile.NamedTemporaryFile(delete=False, mode='w')
INTEGRATION_COMMAND_NAME = 'pan-os-edl'
def initialize_instance(params: Dict[str, str]) -> None:
global HOSTNAME, USERNAME, PORT, SSH_EXTRA_PARAMS, SCP_EXTRA_PARAMS, DOCUMENT_ROOT, CERTIFICATE_FILE
authentication = params.get('Authentication', {}) # type: ignore
HOSTNAME = str(params.get('hostname', '')) # type: ignore
USERNAME = str(authentication.get('identifier', '')) # type: ignore
PORT = str(params.get('port')) if params.get('port', '') and len(params.get('port')) > 0 else '' # type: ignore
SSH_EXTRA_PARAMS = params.get('ssh_extra_params').split() if params.get( # type: ignore
'ssh_extra_params') else None
SCP_EXTRA_PARAMS = params.get('scp_extra_params').split() if params.get( # type: ignore
'scp_extra_params') else None
DOCUMENT_ROOT = f'/{params.get("document_root")}' if params.get('document_root') else ''
create_certificate_file(authentication)
def create_certificate_file(authentication) -> None:
password = authentication.get('password', None)
certificate = None
if 'credentials' in authentication and 'sshkey' in authentication['credentials'] and len(
authentication['credentials']['sshkey']) > 0:
certificate = authentication.get('credentials', None).get('sshkey')
if certificate:
CERTIFICATE_FILE.write(certificate)
CERTIFICATE_FILE.flush()
os.chmod(CERTIFICATE_FILE.name, 0o400)
elif password:
# check that password field holds a certificate and not a password
if password.find('-----') == -1:
raise DemistoException('Password parameter must contain a certificate.')
# split certificate by dashes
password_list = password.split('-----')
# replace spaces with newline characters
password_fixed = '-----'.join(password_list[:2] + [password_list[2].replace(' ', '\n')] + password_list[3:])
CERTIFICATE_FILE.write(password_fixed)
CERTIFICATE_FILE.flush()
os.chmod(CERTIFICATE_FILE.name, 0o400)
else:
raise DemistoException('To connect to the remote server, provide a certificate.')
''' UTILS '''
def ssh_execute(command: str):
if PORT and SSH_EXTRA_PARAMS:
param_list = ['ssh', '-o', 'StrictHostKeyChecking=no', '-i', CERTIFICATE_FILE.name, '-p',
PORT] + SSH_EXTRA_PARAMS + [USERNAME + '@' + HOSTNAME, command] # type: ignore
result = subprocess.run(param_list, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
elif PORT:
result = subprocess.run(
['ssh', '-o', 'StrictHostKeyChecking=no', '-i', CERTIFICATE_FILE.name, '-p', PORT,
USERNAME + '@' + HOSTNAME, command], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
elif SSH_EXTRA_PARAMS:
param_list = ['ssh', '-o', 'StrictHostKeyChecking=no', '-i',
CERTIFICATE_FILE.name] + SSH_EXTRA_PARAMS + [USERNAME + '@' + HOSTNAME, command] # type: ignore
result = subprocess.run(param_list, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
else:
result = subprocess.run(
['ssh', '-o', 'StrictHostKeyChecking=no', '-i', CERTIFICATE_FILE.name, USERNAME + '@' + HOSTNAME, command],
stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
if result.returncode != 0:
if result.stderr:
if result.stderr.find("Warning: Permanently added") != -1:
return result.stdout # ignore addition of new hosts warnings
elif result.stderr.find("Permission denied") != -1:
raise DemistoException(
'Permission denied, check your username and certificate.\n' + 'Got error: ' + result.stderr)
else:
raise DemistoException(result.stderr)
elif command.find('grep') != -1 and result.returncode == 1:
# a search command that did not find any value
demisto.results({
'Type': 11,
'Contents': 'Search string was not found in the external file path given.',
'ContentsFormat': formats['text']
})
sys.exit(0)
else:
raise DemistoException(f'Command failed with exit status:{str(result.returncode)}')
return result.stdout
def scp_execute(file_name: str, file_path: str):
if SCP_EXTRA_PARAMS:
param_list = ['scp', '-o', 'StrictHostKeyChecking=no',
'-i', CERTIFICATE_FILE.name] + SCP_EXTRA_PARAMS + [file_name, # type: ignore
f'{USERNAME}@{HOSTNAME}:\'{file_path}\'']
result = subprocess.run(param_list, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
else:
param_list = ['scp', '-o', 'StrictHostKeyChecking=no', '-i', CERTIFICATE_FILE.name, file_name,
f'{USERNAME}@{HOSTNAME}:\'{file_path}\'']
result = subprocess.run(param_list, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
if result.returncode != 0:
if result.stderr:
if result.stderr.find("Warning: Permanently added") != -1:
return True # ignore addition of new hosts warnings
else:
raise DemistoException(result.stderr)
else:
raise DemistoException(f'Command failed with exit status:{str(result.returncode)}')
else:
return True
def parse_url(item: str) -> str:
""" Parse url if in url form to valid EDL form - without http / https
Args:
item(str): Item to parse.
Returns:
str: parsed item, if URL returned without http / https
Examples:
>>> parse_url('http://google.com')
'google.com'
>>> parse_url('https://google.com')
'google.com'
>>> parse_url('https://google.com/hello_world')
'google.com/hello_world'
>>> parse_url('not url')
'not url'
"""
try:
url_obj = urlparse(item)._replace(scheme='')
return urlunparse(url_obj).replace('//', '')
except ValueError:
return item
def parse_items(items: str) -> List[str]:
""" Parse list of item to update, parsing steps:
1. Remove http and https from
Args:
items(str): items for update
Returns:
list: list of parsed items.
"""
return [parse_url(item) for item in argToList(items)]
''' COMMANDS '''
def edl_get_external_file(file_path: str, retries: int = 1) -> str:
command = f'cat \'{file_path}\''
while retries > 0:
result = ssh_execute(command)
# counting newlines as in some edge cases the external web server returns the file content intermittently
# with newline as every other char
num_lines = float(result.count('\n'))
if num_lines > len(result) / 3:
demisto.info(f'The number of newlines chars in the file is too big. Try number {retries} before failure.')
retries -= 1
else:
return result
# if we get here, we failed as the file contains too many newlines to be valid
raise DemistoException('The file contains too many newlines to be valid. '
'Please check the file contents on the external web server manually.')
def edl_get_external_file_command(args: dict):
"""
Get external file from web-server and prints to the war room
"""
file_path = str(args.get('file_path', ''))
retries = int(args.get('retries', '1'))
if DOCUMENT_ROOT:
file_path = os.path.join(DOCUMENT_ROOT, file_path)
result = edl_get_external_file(file_path, retries)
sorted_list = sorted(result.split('\n'))
md = tableToMarkdown('File Content:', sorted_list, headers=['List'])
demisto.results({
'ContentsFormat': formats['markdown'],
'Type': entryTypes['note'],
'Contents': md
})
def edl_search_external_file(file_path: str, search_string: str):
return ssh_execute(f'grep \'{search_string}\' \'{file_path}\'')
def edl_search_external_file_command(args: dict):
"""
Search the external file and return all matching entries to Warroom
"""
file_path: str = str(args.get('file_path', ''))
if DOCUMENT_ROOT:
file_path = os.path.join(DOCUMENT_ROOT, file_path)
search_string: str = str(args.get('search_string', ''))
result = edl_search_external_file(file_path, search_string)
sorted_list = sorted(result.split('\n'))
md = tableToMarkdown(f'Search Results for {search_string}:', sorted_list, headers=['Result'])
demisto.results({
'ContentsFormat': formats['markdown'],
'Type': entryTypes['note'],
'Contents': md
})
def edl_update_external_file(file_path: str, list_name: str, verbose: bool):
dict_of_lists = demisto.getIntegrationContext()
list_data = sorted(dict_of_lists.get(list_name))
file_name = file_path.rsplit('/', 1)[-1]
if not file_name.endswith('.txt'):
file_name += '.txt'
try:
with open(file_name, 'w') as file:
file.write("\n".join(list_data))
success = scp_execute(file_name, file_path)
finally:
shutil.rmtree(file_name, ignore_errors=True)
if not success:
raise DemistoException('External file was not updated successfully.')
else:
if verbose:
external_file_items = ssh_execute(f'cat \'{file_path}\'')
if external_file_items:
md = tableToMarkdown('Updated File Data:', external_file_items, headers=['Data'])
else:
md = 'External file has no items.'
else:
md = 'External file updated successfully.'
demisto.results({
'Type': entryTypes['note'],
'Contents': md,
'ContentsFormat': formats['markdown']
})
def edl_update_external_file_command(args: dict):
"""
Overrides external file path with internal list
"""
file_path: str = str(args.get('file_path', ''))
if DOCUMENT_ROOT:
file_path = os.path.join(DOCUMENT_ROOT, file_path)
list_name: str = str(args.get('list_name', ''))
verbose = args.get('verbose') == 'true'
edl_update_external_file(file_path, list_name, verbose)
def edl_update_internal_list(list_name: str, list_items: list, add: bool, verbose: bool):
dict_of_lists = demisto.getIntegrationContext()
if not dict_of_lists:
demisto.debug('PAN-OS EDL Management integration context is empty.')
dict_of_lists = {list_name: list_items}
if verbose:
md = tableToMarkdown('List items:', list_items, headers=[list_name])
else:
md = 'Instance context updated successfully.'
else:
if not dict_of_lists.get(list_name, None) and not add:
raise Exception(f'Cannot remove items from an empty list: {list_name}.')
if dict_of_lists.get(list_name, None):
if add:
chosen_list = dict_of_lists.get(list_name)
if not isinstance(chosen_list, list):
chosen_list = [chosen_list]
list_items = list(set(chosen_list + list_items))
else: # remove
list_items = [item for item in dict_of_lists.get(list_name) if item not in list_items]
if not add and len(list_items) == 0:
# delete list from instance context, can happen only upon remove of objects
demisto.debug(f'PAN-OS EDL Management deleting {list_name} from the integration context.')
dict_of_lists.pop(list_name, None)
md = 'List is empty, deleted from instance context.'
else:
# update list in instance context, can happen upon removal or addition of objects
sorted_list = sorted(list_items)
dict_of_lists.update({list_name: sorted_list})
if verbose:
md = tableToMarkdown('List items:', sorted_list, headers=[list_name])
else:
md = 'Instance context updated successfully.'
if not dict_of_lists: # to be removed, debugging purposes only
demisto.debug('PAN-OS EDL Management updating an empty object to the integration context.')
demisto.debug(f'PAN-OS EDL Management updating {list_name} with {len(list_items)} in the integration context.')
demisto.setIntegrationContext(dict_of_lists)
demisto.results({
'ContentsFormat': formats['markdown'],
'Type': entryTypes['note'],
'Contents': md
})
def edl_update_internal_list_command(args: dict):
"""
Updates the instance context with the list name and items given
"""
list_name: str = str(args.get('list_name', ''))
list_items: list = argToList(str(args.get('list_items', '')))
if args.get('add_or_remove') not in ['add', 'remove']:
raise Exception('add_or_remove argument is not \'add\' neither \'remove\'.')
add = args.get('add_or_remove') == 'add'
verbose = args.get('verbose') == 'true'
edl_update_internal_list(list_name, list_items, add, verbose)
def edl_update(args: dict):
"""
Updates the instance context with the list name and items given
Overrides external file path with internal list
"""
file_path: str = str(args.get('file_path', ''))
if DOCUMENT_ROOT:
file_path = os.path.join(DOCUMENT_ROOT, file_path)
# Parse list items
list_name: str = str(args.get('list_name', ''))
list_items = parse_items(items=str(args.get('list_items', '')))
if args.get('add_or_remove') not in ['add', 'remove']:
raise DemistoException('add_or_remove argument is neither \'add\' nor \'remove\'.')
add = args.get('add_or_remove') == 'add'
verbose = args.get('verbose') == 'true'
# update internal list
edl_update_internal_list(list_name, list_items, add, verbose)
# scp internal list to file_path
edl_update_external_file(file_path, list_name, verbose)
def edl_update_from_external_file(list_name: str, file_path: str, type_: str, retries: int):
dict_of_lists = demisto.getIntegrationContext()
list_data = dict_of_lists.get(list_name, None)
file_data = edl_get_external_file(file_path, retries)
sorted_file_data = sorted(file_data.split('\n'))
if list_data:
set_internal = set(list_data)
set_external = set(sorted_file_data)
set_external.discard('')
if type_ == 'merge':
unified = set_internal.union(set_external)
list_data_new = list(unified)
else: # type_ == 'override'
list_data_new = list(set_external)
sorted_list_data_new = sorted(list_data_new)
dict_of_lists.update({list_name: sorted_list_data_new})
demisto.setIntegrationContext(dict_of_lists)
return sorted_list_data_new
else:
dict_of_lists.update({list_name: sorted_file_data})
demisto.setIntegrationContext(dict_of_lists)
return sorted_file_data
def edl_update_from_external_file_command(args: dict):
"""
Updates internal list data with external file contents
"""
file_path: str = str(args.get('file_path', ''))
if DOCUMENT_ROOT:
file_path = os.path.join(DOCUMENT_ROOT, file_path)
list_name: str = str(args.get('list_name', ''))
type_: str = args.get('type', 'false')
verbose: bool = args.get('verbose') == 'true'
retries: int = int(args.get('retries', '1'))
list_data_new = edl_update_from_external_file(list_name, file_path, type_, retries)
if verbose:
md = tableToMarkdown('List items:', list_data_new, headers=[list_name])
else:
md = 'Instance context updated successfully'
demisto.results({
'Type': entryTypes['note'],
'Contents': md,
'ContentsFormat': formats['markdown']
})
def edl_delete_external_file(file_path: str) -> str:
ssh_execute(f'rm -f \'{file_path}\'')
return 'File deleted successfully'
def edl_delete_external_file_command(args: dict):
"""
Delete external file
"""
file_path = str(args.get('file_path', ''))
if DOCUMENT_ROOT:
file_path = os.path.join(DOCUMENT_ROOT, file_path)
result = edl_delete_external_file(file_path)
demisto.results({
'Type': entryTypes['note'],
'Contents': result,
'ContentsFormat': formats['text']
})
def edl_list_internal_lists_command():
"""
List all instance context lists
"""
dict_of_lists = demisto.getIntegrationContext()
list_names = sorted(list(dict_of_lists.keys()))
md = tableToMarkdown('Instance context Lists:', list_names, headers=['List names'])
demisto.results({
'ContentsFormat': formats['markdown'],
'Type': entryTypes['note'],
'Contents': md
})
def edl_search_internal_list_command(args: dict):
"""
Search a string on internal list
"""
list_name = args.get('list_name')
search_string = args.get('search_string')
dict_of_lists = demisto.getIntegrationContext()
list_data = dict_of_lists.get(list_name, None)
if not list_data:
demisto.results({
'Type': 11,
'Contents': f'List {list_name} was not found in the instance context.',
'ContentsFormat': formats['text']
})
elif search_string in list_data:
demisto.results({
'Type': entryTypes['note'],
'Contents': f'Search string {search_string} is in the internal list {list_name}.',
'ContentsFormat': formats['text']
})
else:
demisto.results({
'Type': 11,
'Contents': f'Search string {search_string} was not found in the instance context list {list_name}.',
'ContentsFormat': formats['text']
})
def edl_print_internal_list_command(args: dict):
"""
Print to the war room instance context list
"""
list_name = str(args.get('list_name', ''))
dict_of_lists = demisto.getIntegrationContext()
list_data = sorted(dict_of_lists.get(list_name, None))
if not list_data:
demisto.results({
'Type': 11,
'Contents': 'List was not found in instance context.',
'ContentsFormat': formats['text']
})
else:
md = tableToMarkdown('List items:', list_data, headers=[list_name])
demisto.results({
'Type': entryTypes['note'],
'Contents': md,
'ContentsFormat': formats['markdown']
})
def edl_dump_internal_list_command(args: dict):
"""
Dumps an instance context list to either a file or incident context
"""
destination = args.get('destination')
list_name = str(args.get('list_name', ''))
dict_of_lists = demisto.getIntegrationContext()
list_data = sorted(dict_of_lists.get(list_name, []))
if not list_data:
demisto.results({
'Type': 11,
'Contents': 'List was not found in instance context or has no data.',
'ContentsFormat': formats['text']
})
sys.exit(0)
if destination == 'file': # dump list as file
internal_file_path = demisto.uniqueFile()
try:
with open(internal_file_path, 'w') as f:
f.write("\n".join(list_data))
file_type = entryTypes['entryInfoFile']
with open(internal_file_path, 'rb') as file:
file_entry = fileResult(internal_file_path, file.read(), file_type)
demisto.results(file_entry)
finally:
shutil.rmtree(internal_file_path, ignore_errors=True)
else: # update incident context
md = tableToMarkdown('List items:', list_data, headers=[list_name])
ec = {
'ListName': list_name,
'ListItems': list_data
}
demisto.results({
'Type': entryTypes['note'],
'Contents': md,
'ContentsFormat': formats['markdown'],
'EntryContext': {
"PANOSEDL(val.ListName == obj.ListName)": ec
}
})
def edl_compare_command(args: dict):
list_name = str(args.get('list_name', ''))
file_path = str(args.get('file_path', ''))
retries = int(args.get('retries', '1'))
if DOCUMENT_ROOT:
file_path = os.path.join(DOCUMENT_ROOT, file_path)
dict_of_lists = demisto.getIntegrationContext()
list_data = dict_of_lists.get(list_name, None)
if not list_data:
demisto.results({
'Type': 11,
'Contents': 'List was not found in instance context.',
'ContentsFormat': formats['text']
})
sys.exit(0)
file_data = edl_get_external_file(file_path, retries)
if not file_data:
demisto.results({
'Type': 11,
'Contents': 'file was not found in external web-server.',
'ContentsFormat': formats['text']
})
sys.exit(0)
set_internal = set(list_data)
set_external = set(file_data.split('\n'))
set_external.discard('')
unique_internal = set_internal - set_external
unique_external = set_external - set_internal
md = ''
if unique_external:
md += '### Warning: External file contains values that are not in the internal Demisto list.\n'
md += '#### If these changes are unexpected, check who has permission to write to the external file.\n'
md += tableToMarkdown('', list(unique_external),
headers=[file_path.rsplit('/')[-1]])
if unique_internal:
md += '### Warning: Internal list contains values that are not in the external file.\n'
md += '#### If these changes are unexpected, check who has permission to write to the external file.\n'
md += tableToMarkdown('', list(unique_internal), headers=[list_name])
if len(md) == 0:
md = 'Internal list and external file have the same values.'
demisto.results({
'Type': 11 if unique_external or unique_internal else entryTypes['note'],
'Contents': md,
'ContentsFormat': formats['markdown'],
})
def edl_get_external_file_metadata_command(args: dict):
file_path = str(args.get('file_path', ''))
if DOCUMENT_ROOT:
file_path = os.path.join(DOCUMENT_ROOT, file_path)
result = ssh_execute(f'stat \'{file_path}\'')
file_size = int(result.split("Size: ", 1)[1].split(" ", 1)[0])
file_name = file_path.split("/")[-1]
if len(file_name) < 0:
file_name = file_path
last_modified_parts = result.split("Change: ", 1)[1].split(" ", 2)[0:2]
last_modified = ' '.join(last_modified_parts)
number_of_lines = int(ssh_execute(f'wc -l < \'{file_path}\'')) + 1
metadata_outputs = {
'FileName': file_name,
'Size': file_size,
'LastModified': last_modified,
'NumberOfLines': number_of_lines
}
demisto.results({
'Type': entryTypes['note'],
'Contents': result,
'ContentsFormat': formats['text'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('File metadata:', metadata_outputs,
['FileName', 'Size', 'NumberOfLines', 'LastModified'], removeNull=True),
'EntryContext': {"PANOSEDL(val.FileName == obj.FileName)": metadata_outputs}
})
''' EXECUTION '''
def main() -> None:
command = demisto.command()
args = demisto.args()
params = demisto.params()
LOG(f'command is {command}')
commands = {
f'{INTEGRATION_COMMAND_NAME}-get-external-file': edl_get_external_file_command,
f'{INTEGRATION_COMMAND_NAME}-search-external-file': edl_search_external_file_command,
f'{INTEGRATION_COMMAND_NAME}-update-internal-list': edl_update_internal_list_command,
f'{INTEGRATION_COMMAND_NAME}-update-external-file': edl_update_external_file_command,
f'{INTEGRATION_COMMAND_NAME}-update': edl_update,
f'{INTEGRATION_COMMAND_NAME}-update-from-external-file': edl_update_from_external_file_command,
f'{INTEGRATION_COMMAND_NAME}-delete-external-file': edl_delete_external_file_command,
f'{INTEGRATION_COMMAND_NAME}-search-internal-list': edl_search_internal_list_command,
f'{INTEGRATION_COMMAND_NAME}-print-internal-list': edl_print_internal_list_command,
f'{INTEGRATION_COMMAND_NAME}-dump-internal-list': edl_dump_internal_list_command,
f'{INTEGRATION_COMMAND_NAME}-compare': edl_compare_command,
f'{INTEGRATION_COMMAND_NAME}-get-external-file-metadata': edl_get_external_file_metadata_command,
}
try:
initialize_instance(params=params)
if command == 'test-module':
ssh_execute('echo 1')
return_results('ok')
elif command == 'pan-os-edl-list-internal-lists':
edl_list_internal_lists_command()
elif command in commands:
commands[command](args)
else:
raise NotImplementedError(f'Command "{command}" is not implemented in {INTEGRATION_COMMAND_NAME}.')
except Exception as err:
if str(err).find('warning') != -1:
LOG(str(err))
else:
demisto.error(traceback.format_exc()) # print the traceback
return_error(str(err), err)
finally:
shutil.rmtree(CERTIFICATE_FILE.name, ignore_errors=True)
LOG.print_log()
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
|
#!/usr/bin/env python
from __future__ import print_function
import argparse
import random
import time
import os
import numpy as np
from optparse import OptionParser
import torch
import torch.backends.cudnn as cudnn
import torch.optim as optim
from torch.autograd import Variable
from torch.optim.lr_scheduler import LambdaLR as LR_Policy
import torch.nn as nn
import cat_and_dog_model as mnist_model
from dataloader import img_Dataset as mnist_Dataset
from tools.config_tools import Config
from tools import utils
import matplotlib as mpl
import pickle
from eval import test
mpl.use('Agg')
from matplotlib import pyplot as plt
parser = OptionParser()
parser.add_option('--config',
type=str,
help="training configuration",
default="./configs/test_config.yaml")
(opts, args) = parser.parse_args()
assert isinstance(opts, object)
opt = Config(opts.config)
#print(opt)
if opt.checkpoint_folder is None:
opt.checkpoint_folder = 'checkpoints'
# make dir
if not os.path.exists(opt.checkpoint_folder):
os.system('mkdir {0}'.format(opt.checkpoint_folder))
def main():
global opt
train_dataset = mnist_Dataset(num_of_cross=0,cross=1)
if opt.manualSeed is None:
opt.manualSeed = random.randint(1, 10000)
if torch.cuda.is_available() and not opt.cuda:
print("WARNING: You have a CUDA device, so you should probably run with \"cuda: True\"")
torch.manual_seed(opt.manualSeed)
else:
if int(opt.ngpu) == 1:
print('so we use 1 gpu to training')
print('setting gpu on gpuid {0}'.format(opt.gpu_id))
if opt.cuda:
os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpu_id
torch.cuda.manual_seed(opt.manualSeed)
cudnn.benchmark = True
#loss_rec = np.load('acc_train.npy')
#acc_rec = np.load('acc.npy')
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=opt.batchSize,
shuffle=True, num_workers=int(opt.workers))
# create model
model = mnist_model.cat_and_dog_resnet()
if opt.init_model != '':
print('loading pretrained model from {0}'.format(opt.init_model))
model.load_state_dict(torch.load(opt.init_model))
if opt.cuda:
print('shift model and criterion to GPU .. ')
model = model.cuda()
# criterion = criterion.cuda()
acc = test(model,opt,0,Training =False,cross=1)
if __name__ == '__main__':
main()
|
# -*- coding=utf-8 -*-
import sys
reload(sys)
sys.setdefaultencoding('utf8')
def ConvertCN(s):
return s.encode('gb18030')
print (ConvertCN("fdÄã")) |
import torch
import torch.nn as nn
import torch.nn.functional as F
from utils import conv_params, linear_params, bnparams, bnstats, \
flatten_params, flatten_stats
import numpy as np
class WideResNet(nn.Module):
def __init__(self, depth, width, ninputs = 3,
num_groups = 3, num_classes = None, dropout = 0.):
super(WideResNet, self).__init__()
self.depth = depth
self.width = width
self.num_groups = num_groups
self.num_classes = num_classes
self.dropout = dropout
self.mode = True # Training
#widths = torch.Tensor([16, 32, 64]).mul(width).int()
widths = np.array([16, 32, 64]).astype(np.int)*width
def gen_block_params(ni, no):
return {
'conv0': conv_params(ni, no, 3),
'conv1': conv_params(no, no, 3),
'bn0': bnparams(ni),
'bn1': bnparams(no),
'convdim': conv_params(ni, no, 1) if ni != no else None,
}
def gen_group_params(ni, no, count):
return {'block%d' % i: gen_block_params(ni if i == 0 else no, no)
for i in range(count)}
def gen_group_stats(ni, no, count):
return {'block%d' % i: {'bn0': bnstats(ni if i == 0 else no), 'bn1': bnstats(no)}
for i in range(count)}
params = {'conv0': conv_params(ni=ninputs, no=widths[0], k=3)}
stats = {}
for i in range(num_groups+1):
if i == 0:
params.update({'group'+str(i): gen_group_params(widths[i], widths[i], depth)})
stats.update({'group'+str(i): gen_group_stats(widths[i], widths[i], depth)})
else:
params.update({'group'+str(i): gen_group_params(widths[i-1], widths[i], depth)})
stats.update({'group'+str(i): gen_group_stats(widths[i-1], widths[i], depth)})
if num_classes is not None:
params.update({'fc': linear_params(widths[i], num_classes)})
params.update({'bn': bnparams(widths[i])})
stats.update({'bn': bnstats(widths[i])})
params = flatten_params(params)
stats = flatten_stats(stats)
self.params = nn.ParameterDict({})
self.stats = nn.ParameterDict({})
for key in params.keys():
self.params.update({key:nn.Parameter(params[key],requires_grad=True)})
for key in stats.keys():
self.stats.update({key:nn.Parameter(stats[key], requires_grad=False)})
''' TODO:CHECK
def train(self, mode=True):
self.mode = mode
for key in self.params.keys():
self.params[key].requires_grad = self.mode
return super(WideResNet, self).train(mode=mode)
def eval(self):
self.mode = False
for key in self.params.keys():
self.params[key].requires_grad = self.mode
return super(WideResNet, self).eval()
'''
def forward(self, input):
def activation(x, params, stats, base, mode):
return F.relu(F.batch_norm(x, weight=params[base + '_weight'],
bias=params[base + '_bias'],
running_mean=stats[base + '_running_mean'],
running_var=stats[base + '_running_var'],
training=mode, momentum=0.1, eps=1e-5), inplace=True)
def block(x, params, stats, base, mode, stride):
o1 = activation(x, params, stats, base + '_bn0', mode)
y = F.conv2d(o1, params[base + '_conv0'], stride=stride, padding=1)
o2 = activation(y, params, stats, base + '_bn1', mode)
o2 = torch.nn.Dropout(p=self.dropout)(o2) # Dropout from the code of ARC. dropout = 0.3
z = F.conv2d(o2, params[base + '_conv1'], stride=1, padding=1)
if base + '_convdim' in params:
return z + F.conv2d(o1, params[base + '_convdim'], stride=stride)
else:
return z + x
def group(o, params, stats, base, mode, stride):
for i in range(self.depth):
o = block(o, params, stats, '%s_block%d' % (base, i), mode, stride if i == 0 else 1)
return o
assert input.is_cuda == self.params['conv0'].is_cuda
if input.is_cuda:
assert input.get_device() == self.params['conv0'].get_device()
x = F.conv2d(input.float(), self.params['conv0'], padding=1)
o = group(x, self.params, self.stats, 'group0', self.mode, stride=1)
if self.num_groups >= 1:
o = group(o, self.params, self.stats, 'group1', self.mode, stride=2)
if self.num_groups >= 2:
o = group(o, self.params, self.stats, 'group2', self.mode, stride=2)
o = activation(o, self.params, self.stats, 'bn', self.mode)
if self.num_classes is not None:
o = F.avg_pool2d(o, o.shape[2], 1, 0)
o = o.view(o.size(0), -1)
o = F.linear(o, self.params['fc_weight'], self.params['fc_bias'])
return o
def resnet(depth, width, num_classes, is_full_wrn = True, is_fully_convolutional = False):
#assert (depth - 4) % 6 == 0, 'depth should be 6n+4'
#n = (depth - 4) // 6
#wrn = WideResNet(depth, width, ninputs=3,useCuda=True, num_groups=3, num_classes=num_classes)
n = depth
widths = torch.Tensor([16, 32, 64]).mul(width).int()
def gen_block_params(ni, no):
return {
'conv0': conv_params(ni, no, 3),
'conv1': conv_params(no, no, 3),
'bn0': bnparams(ni),
'bn1': bnparams(no),
'convdim': conv_params(ni, no, 1) if ni != no else None,
}
def gen_group_params(ni, no, count):
return {'block%d' % i: gen_block_params(ni if i == 0 else no, no)
for i in range(count)}
def gen_group_stats(ni, no, count):
return {'block%d' % i: {'bn0': bnstats(ni if i == 0 else no), 'bn1': bnstats(no)}
for i in range(count)}
params = {
'conv0': conv_params(3,16,3),
'group0': gen_group_params(16, widths[0], n),
'group1': gen_group_params(widths[0], widths[1], n),
'group2': gen_group_params(widths[1], widths[2], n),
'bn': bnparams(widths[2]),
'fc': linear_params(widths[2], num_classes),
}
stats = {
'group0': gen_group_stats(16, widths[0], n),
'group1': gen_group_stats(widths[0], widths[1], n),
'group2': gen_group_stats(widths[1], widths[2], n),
'bn': bnstats(widths[2]),
}
if not is_full_wrn:
''' omniglot '''
params['bn'] = bnparams(widths[1])
#params['fc'] = linear_params(widths[1]*16*16, num_classes)
params['fc'] = linear_params(widths[1], num_classes)
stats['bn'] = bnstats(widths[1])
'''
# banknote
params['bn'] = bnparams(widths[2])
#params['fc'] = linear_params(widths[2]*16*16, num_classes)
params['fc'] = linear_params(widths[2], num_classes)
stats['bn'] = bnstats(widths[2])
'''
flat_params = flatten_params(params)
flat_stats = flatten_stats(stats)
def activation(x, params, stats, base, mode):
return F.relu(F.batch_norm(x, weight=params[base + '.weight'],
bias=params[base + '.bias'],
running_mean=stats[base + '.running_mean'],
running_var=stats[base + '.running_var'],
training=mode, momentum=0.1, eps=1e-5), inplace=True)
def block(x, params, stats, base, mode, stride):
o1 = activation(x, params, stats, base + '.bn0', mode)
y = F.conv2d(o1, params[base + '.conv0'], stride=stride, padding=1)
o2 = activation(y, params, stats, base + '.bn1', mode)
z = F.conv2d(o2, params[base + '.conv1'], stride=1, padding=1)
if base + '.convdim' in params:
return z + F.conv2d(o1, params[base + '.convdim'], stride=stride)
else:
return z + x
def group(o, params, stats, base, mode, stride):
for i in range(n):
o = block(o, params, stats, '%s.block%d' % (base,i), mode, stride if i == 0 else 1)
return o
def full_wrn(input, params, stats, mode):
assert input.get_device() == params['conv0'].get_device()
x = F.conv2d(input, params['conv0'], padding=1)
g0 = group(x, params, stats, 'group0', mode, 1)
g1 = group(g0, params, stats, 'group1', mode, 2)
g2 = group(g1, params, stats, 'group2', mode, 2)
o = activation(g2, params, stats, 'bn', mode)
o = F.avg_pool2d(o, o.shape[2], 1, 0)
o = o.view(o.size(0), -1)
o = F.linear(o, params['fc.weight'], params['fc.bias'])
return o
def not_full_wrn(input, params, stats, mode):
assert input.get_device() == params['conv0'].get_device()
x = F.conv2d(input, params['conv0'], padding=1)
g0 = group(x, params, stats, 'group0', mode, 1)
g1 = group(g0, params, stats, 'group1', mode, 2)
# omniglot
o = activation(g1, params, stats, 'bn', mode)
o = F.avg_pool2d(o, o.shape[2], 1, 0)
# banknote
'''
g2 = group(g1, params, stats, 'group2', mode, 2)
o = activation(g2, params, stats, 'bn', mode)
o = F.avg_pool2d(o, 16, 1, 0)
'''
o = o.view(o.size(0), -1)
o = F.linear(o, params['fc.weight'], params['fc.bias'])
return o
def fcn_full_wrn(input, params, stats, mode):
assert input.get_device() == params['conv0'].get_device()
x = F.conv2d(input, params['conv0'], padding=1)
g0 = group(x, params, stats, 'group0', mode, 1)
g1 = group(g0, params, stats, 'group1', mode, 2)
g2 = group(g1, params, stats, 'group2', mode, 2)
o = activation(g2, params, stats, 'bn', mode)
return o
def fcn_not_full_wrn(input, params, stats, mode):
assert input.get_device() == params['conv0'].get_device()
x = F.conv2d(input, params['conv0'], padding=1)
g0 = group(x, params, stats, 'group0', mode, 1)
g1 = group(g0, params, stats, 'group1', mode, 2)
o = activation(g1, params, stats, 'bn', mode)
return o
if is_fully_convolutional:
if is_full_wrn:
return fcn_full_wrn, flat_params, flat_stats
else:
return fcn_not_full_wrn, flat_params, flat_stats
else:
if is_full_wrn:
return full_wrn, flat_params, flat_stats
else:
return not_full_wrn, flat_params, flat_stats
|
# -*- coding: utf-8 -*-
from yacc import yacc, willow_list
import lis as lis
def main():
with open('testfile.c', 'r') as content_file:
content = content_file.read()
AST = yacc.parse(content)
print AST
if AST != None:
lis.eval(AST)
main() |
from .base import *
DEBUG = False
# EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
EMAIL_BACKEND='django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST =get_secret("EMAIL_HOST")
EMAIL_PORT = get_secret("EMAIL_PORT")
EMAIL_HOST_USER = get_secret("EMAIL_HOST_USER")
EMAIL_HOST_PASSWORD = get_secret("EMAIL_HOST_PASSWORD")
EMAIL_USE_TLS = True |
def main(inp_str):
'''
inp_str: input data
return: vowels used in the string
'''
vowels = ['a', 'e', 'i', 'o', 'u']
result = ''
for each_vow in vowels:
if each_vow in inp_str:
result = result + each_vow
return result
if __name__ == '__main__':
text1 = "Moon flowers bloom only at night, closing during the day,"
text2 = "1 Yard (yd) = 3 feet (ft)."
res1 = main(text1)
print(res1)
res2 = main(text2)
print(res2)
|
import utils
import numpy as np
import tensorflow as tf
# must match what was saved
batch_size = 128
num_hidden_units = 200
num_layers = 3
num_tweets = 50
max_tweet_len = 20
top_n = 20
X_train, Y_train, index_to_word, word_to_index, vocab_size, unknown_lookup = utils.load_dataset()
vocab_size += 1 # due to 0 being used as padding...
ending_punc = ['.', ',', '?', '!', '"', "'", ":", '...']
starting_punc = ['"', '“']
never_cap = ['http']
def postprocess(sentence_lst):
sentence_acc = ""
prev_word = ""
for word in sentence_lst:
# basic grammar handling
if (prev_word in ['.', '?', '!'] or prev_word in [""]) and ord(word[0]) >= ord('a') and ord(word[0]) <= ord('z') and 'http' not in word:
word = word.capitalize()
# basic spacing handling....
if len(sentence_acc) == 0:
sentence_acc += word
elif prev_word in starting_punc and (len(sentence_acc) <= 1 or sentence_acc[-1] != " "):
sentence_acc += word
elif word not in ending_punc or prev_word in ['"', "'"]:
sentence_acc += " {}".format(word)
else:
sentence_acc += word
prev_word = word
return sentence_acc
with tf.Session() as sess:
loader = tf.train.import_meta_graph('checkpoints/trump_lstm-740.meta')
loader.restore(sess, 'checkpoints/trump_lstm-740')
while(num_tweets):
sentence = []
counter = 0
next_token = np.ones((128,1)) # start token
next_LSTM_state = np.zeros((num_layers, 2, batch_size, num_hidden_units))
# while an end token hasn't been generated
while(next_token[0] != 2):
gen_word = index_to_word[next_token[0][0]]
if gen_word == utils.unknown_token:
gen_word = unknown_lookup[np.random.randint(len(unknown_lookup))]
sentence.append(gen_word)
preds, next_LSTM_state = sess.run(['logits_final:0', 'H:0'], feed_dict={'Placeholder:0':next_token, 'Placeholder_2:0':next_LSTM_state})
# sample from probabilities
p = np.squeeze(preds[0]) # get the first row... can delete when you fix variable batch size...
p[np.argsort(p)][:-top_n] = 0 # set the first n - top_n indices to 0
p = p/np.sum(p)
index = np.random.choice(vocab_size, 1, p=p)[0]
next_token = np.ones((128,1))*index
counter += 1
if counter > max_tweet_len: # let's say tweets can't be > 20 words...
break
if counter < max_tweet_len:
num_tweets -= 1
sentence = sentence[1:] # get rid of the <s> token
print(postprocess(sentence))
#print(" ".join(sentence)) |
from bs4 import BeautifulSoup
filePath = r"/home/huizi/文档/test.html"
file = open(filePath,'r')
html = file.read()
bs = BeautifulSoup(html,'html.parser')
print(bs.title)
# print(bs.prettify()) # 格式化html结构
print(bs.find_all('span'))
file.close()
|
import pyglet
import robocute.sprite
from robocute.base import *
LAYER_ANY = -1
LAYER_DEFAULT = 0
class Layer(Base):
def __init__(self, parent, name = None, order = LAYER_ANY):
super().__init__()
self.parent = parent
self.name = name
self.order = order
if parent:
self.root = parent.root
else:
self.root = self
self.layers = []
self.orderIncrement = 1
self.orderCount = self.order + self.orderIncrement
def create_layer(self, name = None, order = LAYER_ANY):
if order == LAYER_ANY:
self.orderCount += self.orderIncrement
order = self.orderCount
layer = Layer(self, name, order)
self.layers.append(layer)
return layer
def draw(self, graphics):
pass
class NodeLayer(Layer):
def __init__(self, parent, name, order):
super().__init__(parent, name, order)
self.nodes = []
def add_node(self, node):
self.nodes.append(node)
def remove_node(self, node):
self.nodes.remove(node)
def draw(self, graphics):
g = graphics.copy()
for node in self.nodes:
vu = node.vu
if(vu != None):
t = node.get_transform()
g.translate(t.x, t.y)
vu.draw(g)
class AbstractGroupLayer(Layer):
def __init__(self, parent = None, name = None, order = LAYER_DEFAULT):
super().__init__(parent, name, order)
self.group = None
def create_layer(self, name = None, order = LAYER_ANY):
if order == LAYER_ANY:
self.orderCount += self.orderIncrement
order = self.orderCount
layer = GroupLayer(self, name, order)
self.layers.append(layer)
return layer
class GroupLayer(AbstractGroupLayer):
def __init__(self, parent, name, order):
super().__init__(parent, name, order)
self.group = pyglet.graphics.OrderedGroup(order, self.root.group)
self.groups = {}
def register_group(self, group):
if group in self.groups:
#index = self.groups.index(group)
#group = self.groups[index]
group = self.groups[group]
else:
#self.groups.append(group)
self.groups[group] = group
return group
class BatchLayer(AbstractGroupLayer):
def __init__(self, name):
super().__init__(None, name)
self.batch = None
self.group = pyglet.graphics.Group()
self.reset()
def reset(self):
self.batch = pyglet.graphics.Batch()
def draw(self, graphics):
self.batch.draw()
class RootLayer(Layer):
def __init__(self, name):
super().__init__(None, name)
|
import ssl
import socket
from pprint import pprint
HOSTNAME = "www.google.com"
context = ssl.create_default_context()
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.check_hostname = True
#context.load_verify_locations("/etc/ssl/certs/ca-bundle.crt")
conn = context.wrap_socket(socket.socket(socket.AF_INET), server_hostname=HOSTNAME)
conn.connect((HOSTNAME, 443))
cert = conn.get_channel_binding()
pprint(cert)
|
"""Play the actual game."""
from modules.board import Board
from modules.baseboard import BeyondBoardError, OccupiedCellError, \
print_color, input_color
UI_C = (120, 255, 200)
def play_game():
"""Activate the game logic."""
board = Board()
print_color("Game starts!", fg=UI_C)
# select order
while True:
first = input_color("Who starts first? (ai/me)\n", fg=UI_C)
if first == "ai" or first == "me":
break
print_color("Try again!", fg=UI_C)
if first == "me":
turn_flag = True
else:
turn_flag = False
# start main loop
print(f"{'=' * 5}\n{board}\n{'=' * 5}")
while True:
if turn_flag:
# start player input loop
while True:
try:
turn = input_color("Your turn (two comma-separated "
f"integers in range(0, "
f"{board.DIM_HINT})): ", fg=UI_C)
turn = tuple(map(int, turn.split(",")))
assert len(turn) == 2, "Please provide only two ints!"
board.make_move(*turn, symbol=board.HUMAN)
except ValueError:
print_color("Input should be of form: 2,1", fg=UI_C)
except (AssertionError, BeyondBoardError,
OccupiedCellError) as e:
print_color(e.args[0], fg=UI_C)
else:
print_color("Successful turn!",
fg=board.COLOR_MAP[board.HUMAN])
break
print(f"{'='*5}\n{board}\n{'='*5}")
else:
# make ai move
board.make_move(*board.get_ai_move(verbose=True), symbol=board.AI)
print_color("this one!", fg=board.COLOR_MAP[board.AI])
print(f"{'='*5}\n{board}\n{'='*5}")
# check game state; exit if over
state = board.game_state()
if state == board.HUMAN:
print_color("\nYou win!", fg=board.COLOR_MAP[board.HUMAN])
break
elif state == board.AI:
print_color("\nAI wins!", fg=board.COLOR_MAP[board.AI])
break
elif state == board.EMPTY:
print_color("\nIt's a draw!", fg=board.COLOR_MAP[board.EMPTY])
break
else:
turn_flag = not turn_flag
print_color("Thank you for the game!", fg=UI_C)
if __name__ == '__main__':
play_game()
|
import os
from flask_sqlalchemy import SQLAlchemy
db_path = os.environ['DATABASE_URL']
# comment out the line above
# and uncomment the line below to prepare for local development and/or testing
# db_path=UNCOMMENT THIS LINE AND INSERT THE PATH TO YOUR DATABASE HERE
db = SQLAlchemy()
def setup_db(app, database_path=db_path):
app.config["SQLALCHEMY_DATABASE_URI"] = database_path
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
db.app = app
db.init_app(app)
db.create_all()
game_ownership = db.Table(
'member_game', db.Column(
'member_id', db.Integer, db.ForeignKey('member.id')), db.Column(
'game_id', db.Integer, db.ForeignKey('game.id')))
games_at_event = db.Table(
'game_event', db.Column(
'event_id', db.Integer, db.ForeignKey('event.id')), db.Column(
'game_id', db.Integer, db.ForeignKey('game.id')))
players_at_event = db.Table(
'member_event', db.Column(
'member_id', db.Integer, db.ForeignKey('member.id')), db.Column(
'event_id', db.Integer, db.ForeignKey('event.id')))
class Club(db.Model):
__tablename__ = 'club'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), nullable=False)
img_link = db.Column(db.String(200), nullable=True)
h1 = db.Column(db.String(120), nullable=True)
welcoming_text = db.Column(db.String(1000), nullable=True)
class Game(db.Model):
__tablename__ = 'game'
id = db.Column(db.Integer, primary_key=True)
link = db.Column(db.String(200), nullable=False)
title = db.Column(db.String(120), nullable=False, unique=True)
games_to_be_played = db.relationship(
'Event',
secondary=games_at_event,
backref=db.backref('games'))
class Member(db.Model):
__tablename__ = 'member'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(64), nullable=False)
img_link = db.Column(db.String(200), nullable=True)
admin = db.Column(db.Boolean, default=False)
member = db.Column(db.Boolean, default=False)
description = db.Column(db.String(1000), nullable=True)
auth0_user_id = db.Column(db.String(64), unique=True, nullable=False)
first_name = db.Column(db.String(64), nullable=True)
last_name = db.Column(db.String(64), nullable=True)
phone = db.Column(db.String(30), nullable=True)
email = db.Column(db.String(64), nullable=True)
address = db.Column(
db.Integer,
db.ForeignKey('location.id'),
nullable=True)
event_creation = db.relationship('Event', backref=db.backref('host'))
ownership = db.relationship(
'Game',
secondary=game_ownership,
backref=db.backref('owners'))
participation = db.relationship(
'Event',
secondary=players_at_event,
backref=db.backref('players'))
class Event(db.Model):
__tablename__ = 'event'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(120), nullable=False)
time = db.Column(db.DateTime, nullable=False)
max_players = db.Column(db.Integer, nullable=False)
description = db.Column(db.String(1000), nullable=True)
host_id = db.Column(db.Integer, db.ForeignKey('member.id'), nullable=True)
location_id = db.Column(
db.Integer,
db.ForeignKey('location.id'),
nullable=False)
class Location(db.Model):
__tablename__ = 'location'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(120), unique=True, nullable=False)
country = db.Column(db.String(64), nullable=False)
city = db.Column(db.String(32), nullable=False)
street = db.Column(db.String(64), nullable=False)
house_num = db.Column(db.String(10), nullable=False)
appartment_num = db.Column(db.Integer, nullable=True)
habitat = db.relationship('Member', backref=db.backref('home_address'))
place = db.relationship('Event', backref=db.backref('location'))
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('about', views.about, name='about'),
path('gethelp', views.gethelp, name='gethelp'),
path('volunteer', views.volunteer, name='volunteer'),
path('blogs', views.blogs, name='blogs'),
path('contact', views.contact, name='contact'),
path('profile', views.profile, name='profile'),
path('signout',views.signoutView, name='signout'),
path('customSignUp',views.customSignUp, name='customSignUp'),
path('newBlog',views.newBlog, name='newBlog'),
path('customLogin',views.customLogin, name='customLogin'),
# path('editTarget',views.editTarget, name='editTarget'),
]
# <li><a href="/gethelp">Get Help</a></li>
# <li><a href="/volunteer">Volunteer</a></li>
# <li><a href="/blogs">Blogs</a></li>
# <li><a href="/contact">Contact</a></li>
|
'''
https://docs.python.org/3/library/stat.html
https://www.tutorialspoint.com/python/os_stat.htm
https://www.geeksforgeeks.org/python-os-stat-method/
https://docs.python.org/3/library/stat.html?highlight=filemode
https://kb.iu.edu/d/abdb
Permission Number
Read (r) 4
Write (w) 2
Execute (x) 1
'''
# !/usr/bin/python
import os, sys
# showing stat information of file "a2.py"
fileName = 'OsStat_Example.py'
statinfo = os.stat(fileName)
print (statinfo)
print(statinfo[0])
print(oct(statinfo[0]))
#fMode = stat.ST_MODE(fileName)
#print(stat.filemode(statinfo[0]))
os.stat
os.system("ls -l OsStat_Example.py")
'''
os.stat_result(st_mode=33188, st_ino=18058077, st_dev=16777228, st_nlink=1, st_uid=501, st_gid=20, st_size=725, st_atime=1621911217, st_mtime=1621911215, st_ctime=1621911215)
33188
0o100644
-rw-r--r-- 1 rduvalwa2 staff 725 May 24 19:53 OsStat_Example.py
''' |
from django.http import HttpResponse, HttpResponseRedirect
from django.contrib import messages
from django.contrib.auth import login
from django.contrib.auth.decorators import login_required
from django.db import transaction
from django.db.models import Count
from django.shortcuts import get_object_or_404, redirect, render
from django.urls import reverse, reverse_lazy
from django.utils.decorators import method_decorator
from django.views.generic import CreateView, ListView, UpdateView
import urllib.parse
import html
import json
import base64
import numpy as np
from ..decorators import labeler_required
from ..models import User, Datasets
import sys
sys.path.append('../')
from mongoDbUtils import *
from solveSpeakerDuplicates import *
def listAudios(request):
if request.user.is_authenticated and request.user.is_labeler:
db = getDB()
audios = []
lang = request.user.language.name
datasetsCursor = Datasets.objects.filter(language__name = lang)
counter = 0
for dataset in datasetsCursor:
audiosCursor = db[dataset.name].find({'speakersDiarized':True, 'downloaded':True, 'labelsConfirmed' : False}).limit(10)
for i in audiosCursor:
audios.append({
'videoId': i['videoId'],
'channelTitles': i['channelTitles'],
'titles': html.unescape(urllib.parse.unquote(i['titles']) ),
'dbName': dataset.name,
'dataIndex': counter
})
counter += 1
if audios:
break
return render(request, 'web_interface/labeler/audio_list.html',
{'audios': audios })
else:
return redirect('login')
def labelAudio(request):
if request.user.is_authenticated and request.user.is_labeler:
db = getDB()
if request.method == 'GET':
dbName = request.GET.get('dbName')
videoId = request.GET.get('videoId')
audios = []
jsonSpeakerSlice = db[dbName].find_one({'videoId': videoId})['jsonSpeakerSlice']
speakerSlice= json.loads(jsonSpeakerSlice)
for label, names in speakerSlice.items():
for name in names:
audios.append({
'videoId': videoId,
'dbName': dbName.replace(" ", "_"),
'preLabel': label,
'name': name,
})
return render(request, 'web_interface/labeler/label_audio.html',
{'audios': audios,
"speakerCount": [i+1 for i in list(range(len(speakerSlice)))],
'videoId': videoId,
'dbName': dbName
})
elif request.method == 'POST':
# print(request.POST)
labelList = request.POST.get('labelList')
videoId = request.POST.get('videoId')
dbName = request.POST.get('dbName')
collection = db[dbName]
collection.update_one({'videoId' : videoId},
{"$set" : {'labelsConfirmed' : True,
'labelList' : labelList }})
db.web_interface_datasets.update(
{'name': dbName},
{"$inc": { 'countOfLabeleds': 1} }
)
similarSpeakers = speakerDuplicatesExists(db, videoId, dbName)
# print(similarSpeakers)
if similarSpeakers:
print('find possible similar speaker')
request.session['similarSpeakers'] = similarSpeakers
return HttpResponseRedirect(reverse('labeler:solveSpeakerDuplicate'))
else:
print('no similiar speaker')
deleteFiles(dbName, videoId)
return redirect('labeler:listAudios')
else:
return redirect('login')
def solveSpeakerDuplicate(request):
if request.user.is_authenticated and request.user.is_labeler:
db = getDB()
if request.method == 'GET':
similarSpeakers = request.session.get('similarSpeakers')
for similarSpeaker in similarSpeakers:
datasetName = similarSpeaker['datasetName']
sampleVideoId = similarSpeaker['sampleVideoId']
speakerIdOnSampleVideo = similarSpeaker['speakerIdOnSampleVideo']
sampleAudios = []
sampleLabelListJson = db[datasetName].find_one({'videoId': sampleVideoId})['labelList']
sampleLabelList = json.loads(sampleLabelListJson)
for fileId, speakerId in sampleLabelList.items():
if speakerId == speakerIdOnSampleVideo and len(sampleAudios) < 10:
sampleAudios.append({
'videoId': sampleVideoId,
'dbName': datasetName.replace(" ", "_"),
'name': fileId,
})
similarSpeaker['sampleAudios'] = sampleAudios
compareVideoId = similarSpeaker['compareVideoId']
compareSpeakerId = similarSpeaker['compareSpeakerId']
compareAudios = []
compareLabelListJson = db[datasetName].find_one({'videoId': compareVideoId})['labelList']
compareLabelList = json.loads(compareLabelListJson)
for fileId, speakerId in compareLabelList.items():
if speakerId == compareSpeakerId and len(compareAudios) < 10:
compareAudios.append({
'videoId': compareVideoId,
'dbName': datasetName.replace(" ", "_"),
'name': fileId,
})
similarSpeaker['compareAudios'] = compareAudios
# similarSpeakers = [similarSpeakers[0], similarSpeakers[0]]
# print(similarSpeakers, len(similarSpeakers))
encodedBytes = base64.b64encode(json.dumps(similarSpeakers).encode("utf-8"))
postData = str(encodedBytes, "utf-8")
return render(request, 'web_interface/labeler/solve_speaker_duplicate.html',
{ 'similarSpeakers': similarSpeakers[:1],
'postData': postData })
if request.method == 'POST':
postData = request.POST.get('postData')
isSameSpeaker = request.POST.get('isSameSpeaker')
encodedBytes = base64.b64decode(json.dumps(postData).encode("utf-8"))
postData = str(encodedBytes, "utf-8")
postData = json.loads(postData)
# print(postData, isSameSpeaker)
speaker = postData[0]
similarSpeakers = postData[1:]
datasetName = speaker['datasetName']
collection = db[datasetName]
speakers = db[datasetName+"_speakers"]
video = collection.find_one({'videoId' : speaker['compareVideoId']})
if isSameSpeaker == 'false':
speakers = db[datasetName+"_speakers"]
totalSpeakerCount = speakers.count_documents({})
# print(video, totalSpeakerCount)
ID = str(totalSpeakerCount)
print('added new speaker with ID {} !'.format(ID))
doc = {"_id": ID,
# "encoding": json.dumps(np.array(speaker['encoding'][0]).reshape(1, -1).tolist()),
"encoding": speaker['encoding'][0],
'totalEmbeds': speaker['totalEmbeds'],
'channelId': speaker['channelId'],
'channelTitles': video['channelTitles'],
'sampleVideoId': speaker['compareVideoId'],
'speakerIdOnSampleVideo': speaker['compareSpeakerId'],
}
speakers.insert_one(doc)
else:
similarSpeakerId = speaker['similarSpeakerId']
similarSpeaker = speakers.find_one({'_id': str(similarSpeakerId)})
w1 = similarSpeaker['totalEmbeds']
a1 = np.array(json.loads(similarSpeaker['encoding'])).reshape(1, -1)
w2 = speaker['totalEmbeds']
a2 = np.array(json.loads(speaker['encoding'][0])).reshape(1, -1)
finalTotalEmbed = w1 + w2
avarageEncoding = (a1*w1 + a2*w2)/finalTotalEmbed
ID = str(similarSpeakerId)
speakers.update_one({'_id': ID},
{"$set": {'encoding': json.dumps(avarageEncoding.tolist()), 'totalEmbeds': finalTotalEmbed } })
labelList = json.loads(video['labelList'])
moveFiles(labelList , datasetName, speaker['compareVideoId'], ID, speaker['compareSpeakerId'])
collection.update_one({'videoId' : speaker['compareVideoId']},
{"$set" : {'speakersAdded' : True }})
if similarSpeakers:
request.session['similarSpeakers'] = similarSpeakers
return HttpResponseRedirect(reverse('labeler:solveSpeakerDuplicate'))
else:
deleteFiles(datasetName, speaker['compareVideoId'])
return redirect('labeler:listAudios')
else:
return redirect('login')
|
n,k,p=map(int,input().split(" "))
a=list(map(int,input().split(" ")))
s=input()
def count(a):
for i in range(len(s)):
if(s[i]=='?'):
count(a,k)
else:
c=a[n-1]
for j in range(n-1):
|
from django.shortcuts import render, redirect
from django.contrib.auth import authenticate, login, logout
from dashboard.forms import NewGroupForm, NewStudentForm, NewTeacherForm,\
AddHomeworkForm, NewPasswordForm
from django.core.urlresolvers import reverse
from common.models import Group, Teacher, GroupMembers, Student, \
AssignHomework, Exercise, Quiz, Course
from django.contrib.auth.models import User
from django.http import HttpResponse
#Accueil du dashboard
def home(request):
voyelle = 'aeiouyàäâéèëêîïíìôöõòûüùúAEIOUY' #Pour déterminer si le template affiche De ou D'
user = Teacher.objects.get(user = request.user)
firstLetter = request.user.username[0]#Idem
return render(request, 'dashboard/templates/dashboard/index.html', locals())
#Exercices, quiz et cours
def exercises(request):
user = Teacher.objects.get(user = request.user)
return render(request, 'dashboard/templates/dashboard/exercises.html', locals())
#Création de groupe
def newgroup(request):
success = False
user = Teacher.objects.get(user = request.user)
if request.method == "POST":
form = NewGroupForm(request.POST)
if form.is_valid():
group_name = form.cleaned_data["group_name"]
newGroup = Group.objects.create(name = group_name)
newGroup.save()
#Lie le Teacher et le groupe à travers la table intermédiaire
teacherToGroup = GroupMembers(teacher = user, group = newGroup)
teacherToGroup.save()
success = True #Pour retourner le message de confirmation
else:
form = NewGroupForm()
return render(request, "dashboard/templates/dashboard/newclass.html", locals())
#Changement de mot de passe
def profil(request):
user = Teacher.objects.get(user = request.user)
success = ''
if request.method == "POST":
userProfile = request.user
form = NewPasswordForm(request.POST)
if form.is_valid():
password = form.cleaned_data["password"]
passwordConfirm = form.cleaned_data["passwordConfirm"]
if password != passwordConfirm:
success = False #Message d'erreur
else:
success = True #Message de confirmation
u = request.user
u.set_password(password)
u.save()
else:
form = NewPasswordForm()
return render(request, 'dashboard/templates/dashboard/profile.html', locals())
def group(request, group_id):
user = Teacher.objects.get(user = request.user)
group = Group.objects.get(id = group_id)
studentList = group.student.all()
teacherList = group.teacher.all()
homeworkExList = group.homeworkExercise.all()#
homeworkQuList = group.homeworkQuiz.all() # Pour avoir la liste des devoirs
homeworkCoList = group.homeworkCourse.all() # selon les genres d'activités
deleteConfirmation = False #Pour supprimer une classe
if request.method == "POST":
#Ajouter un professeur au groupe
if 'addTeacher' in request.POST:
erreurTeacher = False
formTeacher = NewTeacherForm(request.POST)
if formTeacher.is_valid():
newTeacher = formTeacher.cleaned_data["nickname"]
try:
try:
teacherUser = User.objects.get(username = newTeacher)
teacher = Teacher.objects.get(user = teacherUser)
newTeacherToGroup = GroupMembers(teacher = teacher, group = group)
newTeacherToGroup.save()
except User.DoesNotExist:
erreurTeacher = True #Message d'erreur
except Teacher.DoesNotExist:
erreurTeacher = True #Idem
#Ajouter un élève au groupe
elif 'addStudent' in request.POST:
formStudent = NewStudentForm(request.POST)
erreurStudent = False
if formStudent.is_valid():
try:
try:
newStudent = formStudent.cleaned_data["nickname"]
studentUser = User.objects.get(username = newStudent)
student = Student.objects.get(user = studentUser)
newStudentToGroup = GroupMembers(student = student, group = group)
newStudentToGroup.save()
except User.DoesNotExist:
erreurStudent = True #Message d'erreur
except Student.DoesNotExist:
erreurStudent = True #Idem
#Assigner un devoir
elif 'assignHomework' in request.POST:
formHomework = AddHomeworkForm(request.POST)
erreur = False
if formHomework.is_valid():
homeworkid = formHomework.cleaned_data["homeworkid"]
genre = formHomework.cleaned_data["genre"]
#Cherche l'activité selon le genre choisi
if genre == "exercise":
try:
exercise = Exercise.objects.get(id = homeworkid)
newHomework = AssignHomework(exercise = exercise, group = group)
newHomework.save()
except Exercise.DoesNotExist:
erreur = True #Message d'erreur
if genre == "quiz":
try:
quiz = Quiz.objects.get(id = homeworkid)
newHomework = AssignHomework(quiz = quiz, group = group)
newHomework.save()
except Quiz.DoesNotExist:
erreur = True #Idem
if genre == "course":
try:
cours = Course.objects.get(id = homeworkid)
newHomework = AssignHomework(course = cours, group = group)
newHomework.save()
except Course.DoesNotExist:
erreur = True #Idem
elif 'deleteClass' in request.POST:
deleteConfirmation = True #Fait apparaître le deuxième bouton de confirmation
#Supprime la classe
elif 'deleteClassConfirm' in request.POST:
group = Group.objects.get(id = group_id)
group.delete()
return redirect('home')
formStudent = NewStudentForm()
formTeacher = NewTeacherForm()
formHomework = AddHomeworkForm()
else:
formStudent = NewStudentForm()
formTeacher = NewTeacherForm()
formHomework = AddHomeworkForm()
return render(request, 'dashboard/templates/dashboard/classe.html', locals())
#Retirer d'un groupe
def deleteFromGroup(request, member_id, group_id):
if request.method == "POST":
#Selon élève ou professeur
if 'deleteStudent' in request.POST:
student = Student.objects.get(id = member_id)
group = Group.objects.get(id = group_id)
studentToGroup = GroupMembers.objects.get(student = student, group = group)
studentToGroup.delete()
elif 'deleteTeacher' in request.POST:
teacher = Teacher.objects.get(id = member_id)
group = Group.objects.get(id = group_id)
teacherToGroup = GroupMembers.objects.get(teacher = teacher, group = group)
teacherToGroup.delete()
return redirect('group_view', group_id = group_id)
#Supprimer une activité
def deleteActivity(request, activity_id):
if request.method == "POST":
#Selon exercice, quiz ou cours
if 'deleteExercise' in request.POST:
exercise = Exercise.objects.get(id = activity_id)
exercise.delete()
if 'deleteQuiz' in request.POST:
quiz = Quiz.objects.get(id = activity_id)
quiz.delete()
if 'deleteCourse' in request.POST:
course = Course.objects.get(id = activity_id)
course.delete()
return redirect('exercises')
#Retirer un devoir
def deleteHomework(request, group_id, homework_id):
if request.method == "POST":
#Selon exercice, quiz ou cours
if 'deleteHomeworkEx' in request.POST:
exercise = Exercise.objects.filter(id = homework_id)
exercise = exercise[0]
group = Group.objects.get(id = group_id)
assignedHomework = AssignHomework.objects.filter(group = group, exercise = exercise)
assignedHomework = assignedHomework[0]
assignedHomework.delete()
if 'deleteHomeworkQu' in request.POST:
quiz = Quiz.objects.filter(id = homework_id)
quiz = quiz[0]
group = Group.objects.get(id = group_id)
assignedHomework = AssignHomework.objects.filter(group = group, quiz = quiz)
assignedHomework = assignedHomework[0]
assignedHomework.delete()
if 'deleteHomeworkCo' in request.POST:
course = Course.objects.filter(id = homework_id)
course = course[0]
group = Group.objects.get(id = group_id)
assignedHomework = AssignHomework.objects.filter(group = group, course = course)
assignedHomework = assignedHomework[0]
assignedHomework.delete()
return redirect('group_view', group_id = group_id) |
import json
from pprint import pprint
import requests
from praw import Reddit
from dotenv import load_dotenv
import os
import shutil
from config import dotenv_path, submission_download_dir
from log import get_logger
logger = get_logger(__name__)
load_dotenv(dotenv_path)
reddit = Reddit(client_id=os.environ.get('REDDIT_CLIENT_ID'),
client_secret=os.environ.get('REDDIT_CLIENT_SECRET'),
user_agent='glue-gun-bot',
username=os.environ.get('REDDIT_USERNAME'),
password=os.environ.get('REDDIT_PASSWORD'))
def get_video_data(post_id):
subm = reddit.submission(id=post_id)
return dict(
id=post_id,
url=subm.media['reddit_video']['fallback_url'],
duration=subm.media['reddit_video']['duration'],
height=subm.media['reddit_video']['height'],
width=subm.media['reddit_video']['width']
)
def download_video(submission_id, video_url):
dir_path = os.path.join(submission_download_dir, submission_id)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
video_fp = os.path.join(dir_path, 'video.mp4')
with requests.get(video_url, stream=True) as r:
with open(video_fp, 'wb') as f:
shutil.copyfileobj(r.raw, f)
return video_fp
else:
logger.info(f'Submission "{submission_id}" already downloaded')
def get_new_submissions(subreddit):
subs = reddit.subreddit(subreddit).hot(limit=None)
to_return = []
for sub in subs:
if sub.over_18 or not sub.is_video:
continue
to_return.append(sub)
return to_return
def main():
# download_video_from_submission('9qcbr9')
subs = get_new_submissions('diwhy')
subs = list(subs)
print(subs[0])
if __name__ == '__main__':
main()
|
import pygame
import sys
data = [
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
]
class Square:
"""사각형 클래스"""
def __init__(self, screen, x_pos, y_pos, color):
"""초기화"""
self.screen = screen
self.length = 25
self.x = x_pos
self.y = y_pos
self.color = color
self.rect = pygame.Rect(self.x * (self.length + 5),
self.y * (self.length + 5),
self.length, self.length)
def draw(self):
"""현재 위치에 사각형을 그린다."""
pygame.draw.rect(self.screen, self.color, self.rect)
class Block:
"""블록 클래스"""
def __init__(self):
"""초기화"""
pass
def draw_wall(screen):
"""벽을 그린다."""
walls = []
for y in range(21):
for x in range(12):
if data[y][x] == 1:
Square(screen, x, y, "white").draw()
def main():
"""메인 함수"""
pygame.init() # 초기화
screen = pygame.display.set_mode((600, 700))
pygame.display.set_caption("Tetris")
draw_wall(screen) # 벽 그리기
while True:
# 이벤트 탐지
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
pygame.display.flip() # 화면 그리기
main()
|
"""cowork URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
"""
# Django imports
from django.conf.urls import include, url
from django.urls import include, path
from django.contrib import admin
from django.contrib.auth import views as auth_views
from apps.user.views import FacebookLogin, GoogleLogin, AppleLogin
from apps.user.admin import event_admin_site
urlpatterns = [
# Examples:
# url(r'^blog/', include('blog.urls', namespace='blog')),
# provide the most basic login/logout functionality
url(r'^login/$', auth_views.LoginView.as_view(template_name='core/login.html'),
name='core_login'),
url(r'^logout/$', auth_views.LogoutView.as_view(), name='core_logout'),
url(r'^grappelli/', include('grappelli.urls')), # grappelli URLS
url(r'^admin/', admin.site.urls), # admin site
path(r'merchant/portal/', event_admin_site.urls),
url(r'api/auth/registration/', include('rest_auth.registration.urls')),
path(r'api/auth/', include('rest_auth.urls')),
path(r'api/auth/facebook/', FacebookLogin.as_view(), name='fb_login'),
path(r'api/auth/google/', GoogleLogin.as_view(), name='google_login'),
path(r'api/auth/apple/', AppleLogin.as_view(), name='google_login'),
]
|
"""
lab 3
"""
# 3.1
str_list = ['a','d','e','b','c']
print(str_list)
str_list.sort()
print(str_list)
# 3.2
str_list.append('f')
print(str_list)
# 3.3
str_list.remove('d')
print(str_list)
# 3.4
print(str_list[2])
# 3.5
my_list = ['a','123',123,'b','B','False',False,123,None,'None']
print(len(set(my_list)))
# 3.6
print(len('This is my third python lab'.split()))
# 3.7
num_list = [12,32,43,35]
num_list.sort()
print(num_list)
print(num_list[0]) #min
print(num_list[-1]) #max
# 3.8
game_board = [
[0,0,0],
[0,0,0],
[0,0,0]
]
print(game_board)
game_board[1][1]=1
print(game_board)
|
#!/usr/bin/python3
# OOP
def fishing():
print('Fishing')
class GoldenFish:
def __init__(self):
weight = 0.13
def swim(self):
print('Swimming')
def eat(self):
print('Eating')
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Filename @ spark_conf.py
# Author @ gouhao
# Create date @ 2017-07-28
"""
The LAST_TIME_PATH is the middle file used to record the last completed application finished time.
First collection should run a long time due to default last time is null
which make the collector to fetch the whole application records
"""
import os
Property = ['SPARK_HOST','SPARK_RUNNING_PORT','SPARK_HISTORY_SERVER_HOST',
'SPARK_HISTORY_SERVER_PORT','LAST_TIME_PATH']
SPARK_HOST = 'localhost'
SPARK_RUNNING_PORT = 4040
SPARK_HISTORY_SERVER_HOST = 'localhost'
SPARK_HISTORY_SERVER_PORT = 18080
# 2017-08-04T03:20:50.804GMT ---> 时间格式
LAST_TIME_PATH = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) + '/data/spark.data'
# COMPLETED_PATH = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) +'/data/COMPLETED.log'
# RUNNING_PATH = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) +'/data/RUNNING.log'
# spark_config_path = '/root/spark-2.2.0-bin-hadoop2.7/conf/'
|
#!/usr/bin/env python3
# Copyright (C) 2019 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from os import sys, path
import synth_common
trace = synth_common.create_trace()
trace.add_gpu_log(ts=1, severity=1, tag="tag0", message="message0")
trace.add_gpu_log(ts=2, severity=2, tag="tag0", message="message1")
trace.add_gpu_log(ts=3, severity=3, tag="tag0", message="message2")
trace.add_gpu_log(ts=4, severity=4, tag="tag0", message="message3")
trace.add_gpu_log(ts=4, severity=5, tag="tag0", message="message4")
trace.add_gpu_log(ts=5, severity=1, tag="tag1", message="message5")
sys.stdout.buffer.write(trace.trace.SerializeToString())
|
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 2 18:15:45 2020
@author: IKM1YH
"""
# Imports
import sys
import numpy as np
import scipy
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.axes_grid1 import make_axes_locatable
import matplotlib.gridspec as gridspec
import seaborn as sns
sns.set_style('darkgrid')
np.random.seed(42)
#
# Define the exponentiated quadratic
def exponentiated_quadratic(xa, xb):
"""Exponentiated quadratic with σ=1"""
# L2 distance (Squared Euclidian)
sq_norm = -0.5 * scipy.spatial.distance.cdist(xa, xb, 'sqeuclidean')
return np.exp(sq_norm)
# Sample from the Gaussian process distribution
nb_of_samples = 41 # Number of points in each function
number_of_functions = 5 # Number of functions to sample
# Independent variable samples
X = np.expand_dims(np.linspace(-4, 4, nb_of_samples), 1)
Σ = exponentiated_quadratic(X, X) # Kernel of data points
# Draw samples from the prior at our data points.
# Assume a mean of 0 for simplicity
ys = np.random.multivariate_normal(
mean=np.zeros(nb_of_samples), cov=Σ,
size=number_of_functions)
# Plot the sampled functions
plt.figure(figsize=(6, 4), dpi=100)
for i in range(number_of_functions):
plt.plot(X, ys[i], linestyle='-', marker='o', markersize=3)
plt.xlabel('$x$', fontsize=13)
plt.ylabel('$y = f(x)$', fontsize=13)
plt.title((
'5 different function realizations at 41 points\n'
'sampled from a Gaussian process with exponentiated quadratic kernel'))
plt.xlim([-4, 4])
plt.show()
#
# Gaussian process posterior
def GP(X1, y1, X2, kernel_func):
"""
Calculate the posterior mean and covariance matrix for y2
based on the corresponding input X2, the observations (y1, X1),
and the prior kernel function.
"""
# Kernel of the observations
Σ11 = kernel_func(X1, X1)
# Kernel of observations vs to-predict
Σ12 = kernel_func(X1, X2)
# Solve
solved = scipy.linalg.solve(Σ11, Σ12, assume_a='pos').T
# Compute posterior mean
μ2 = solved @ y1
# Compute the posterior covariance
Σ22 = kernel_func(X2, X2)
Σ2 = Σ22 - (solved @ Σ12)
return μ2, Σ2 # mean, covariance
# Compute the posterior mean and covariance
# Define the true function that we want to regress on
f_sin = lambda x: (np.sin(x)).flatten()
n1 = 8 # Number of points to condition on (training points)
n2 = 75 # Number of points in posterior (test points)
ny = 5 # Number of functions that will be sampled from the posterior
domain = (-6, 6)
# Sample observations (X1, y1) on the function
X1 = np.random.uniform(domain[0]+2, domain[1]-2, size=(n1,1))
y1 = f_sin(X1)
# Predict points at uniform spacing to capture function
X2 = np.linspace(domain[0], domain[1], n2).reshape(-1,1)
# Compute posterior mean and covariance
μ2, Σ2 = GP(X1, y1, X2, exponentiated_quadratic)
# Compute the standard deviation at the test points to be plotted
σ2 = np.sqrt(np.diag(Σ2))
# Draw some samples of the posterior
y2 = np.random.multivariate_normal(mean=μ2, cov=Σ2, size=ny)
# Plot the postior distribution and some samples
fig, (ax1, ax2) = plt.subplots(
nrows=2, ncols=1, figsize=(6, 6), dpi=100)
# Plot the distribution of the function (mean, covariance)
ax1.plot(X2, f_sin(X2), 'b--', label='$sin(x)$')
ax1.fill_between(X2.flat, μ2-2*σ2, μ2+2*σ2, color='red',
alpha=0.15, label='$2 \sigma_{2|1}$')
ax1.plot(X2, μ2, 'r-', lw=2, label='$\mu_{2|1}$')
ax1.plot(X1, y1, 'ko', linewidth=2, label='$(x_1, y_1)$')
ax1.set_xlabel('$x$', fontsize=13)
ax1.set_ylabel('$y$', fontsize=13)
ax1.set_title('Distribution of posterior and prior data.')
ax1.axis([domain[0], domain[1], -3, 3])
ax1.legend()
# Plot some samples from this function
ax2.plot(X2, y2.T, '-')
ax2.set_xlabel('$x$', fontsize=13)
ax2.set_ylabel('$y$', fontsize=13)
ax2.set_title('5 different function realizations from posterior')
ax1.axis([domain[0], domain[1], -3, 3])
ax2.set_xlim([-6, 6])
plt.tight_layout()
plt.show()
#
#####################################################################################
# Gaussian process posterior with noisy obeservations
def GP_noise(X1, y1, X2, kernel_func, σ_noise):
"""
Calculate the posterior mean and covariance matrix for y2
based on the corresponding input X2, the noisy observations
(y1, X1), and the prior kernel function.
"""
# Kernel of the noisy observations
Σ11 = kernel_func(X1, X1) + ((σ_noise ** 2) * np.eye(n1))
# Kernel of observations vs to-predict
Σ12 = kernel_func(X1, X2)
# Solve
solved = scipy.linalg.solve(Σ11, Σ12, assume_a='pos').T
# Compute posterior mean
μ2 = solved @ y1
# Compute the posterior covariance
Σ22 = kernel_func(X2, X2)
Σ2 = Σ22 - (solved @ Σ12)
return μ2, Σ2 # mean, covariance
# Compute the posterior mean and covariance
σ_noise = 1. # The standard deviation of the noise
# Add noise kernel to the samples we sampled previously
y1 = y1 + ((σ_noise ** 2) * np.random.randn(n1))
# Compute posterior mean and covariance
μ2, Σ2 = GP_noise(X1, y1, X2, exponentiated_quadratic, σ_noise)
# Compute the standard deviation at the test points to be plotted
σ2 = np.sqrt(np.diag(Σ2))
# Draw some samples of the posterior
y2 = np.random.multivariate_normal(mean=μ2, cov=Σ2, size=ny)
# Plot the postior distribution and some samples
fig, (ax1, ax2) = plt.subplots(
nrows=2, ncols=1, figsize=(6,6), dpi=100)
# Plot the distribution of the function (mean, covariance)
ax1.plot(X2, f_sin(X2), 'b--', label='$sin(x)$')
ax1.fill_between(X2.flat, μ2-2*σ2, μ2+2*σ2, color='red',
alpha=0.15, label='$2\sigma_{2|1}$')
ax1.plot(X2, μ2, 'r-', lw=2, label='$\mu_{2|1}$')
ax1.plot(X1, y1, 'ko', linewidth=2, label='$(x_1, y_1)$')
ax1.set_xlabel('$x$', fontsize=13)
ax1.set_ylabel('$y$', fontsize=13)
ax1.set_title('Distribution of posterior and prior data')
ax1.axis([domain[0], domain[1], -3, 3])
ax1.legend()
# Plot some samples from this function
ax2.plot(X2, y2.T, '-')
ax2.set_xlabel('$x$', fontsize=13)
ax2.set_ylabel('$y$', fontsize=13)
ax2.set_title('5 different function realizations from posterior')
ax1.axis([domain[0], domain[1], -3, 3])
ax2.set_xlim([-6, 6])
plt.tight_layout()
plt.show()
# |
from ..types import register, PlumModule, HP, P, props
from .activation_function import ActivationFunction
import torch
from .functional import linear, dropout
@register("layers.fully_connected")
class FullyConnected(PlumModule):
in_feats = HP(type=props.POSITIVE)
out_feats = HP(type=props.POSITIVE)
has_bias = HP(default=True)
dropout = HP(default=0, type=props.NON_NEGATIVE, tags=["dropout"])
in_dim = HP(default=None, required=False)
activation = HP(default=ActivationFunction(name="tanh"))
weight = P("out_feats", "in_feats",
tags=["weight", "fully_connected"])
bias = P("out_feats", conditional="has_bias",
tags=["bias", "fully_connected"])
def forward(self, inputs):
preact = linear(inputs, self.weight, self.bias, self.in_dim)
act = self.activation(preact)
output = dropout(act, p=self.dropout, training=self.training)
return output
|
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
#Item A
def f(x): #defino a funcao primeiro quadrante do circulo unitario
return np.sqrt(1-x**2)
def monte_carlo(seed_x, seed_y):
cont_dentro = 0 #contador para pontos dentro do circulo
cont_total = 0 #contador para o total de pontos
a=16807
m=2147483647
for i in range(0,100):
seed_x = (a*seed_x)%m #aplico LCG para x
rand_x = seed_x/m
seed_y = (a*seed_y)%m #aplico LCG para y
rand_y = seed_y/m
if rand_y <= f(rand_x): #se o ponto gerado esta dentro do circulo:
cont_dentro+=1 #somo o numero de pontos dentro e o total
cont_total+=1
else: #se não:
cont_total+=1
return 4*(cont_dentro/cont_total) #retorna 4x a razao entre os pontos dentro e o total
#Item B
seed_x = 10300636 #seed para os valores de x
seed_y = 10030065 #seed para os valores de y
print(monte_carlo(seed_x, seed_y))
#Item C
dados = [] #lista de dados para criar tabela
n = 2 #iteracao para imprimir tabela
I_m = 0 #valor medio da integral
soma_I = 0 #variavel para somar valores de I_m
dif_I = 0 #diferenca entre I_m e I
soma_dif = 0 #variavel para somar valores de dif_I
while n<=131072:
seed_x += 1 #modifico a seed para x
seed_y += 1 #modifico a seed para y
I = monte_carlo(seed_x, seed_y) #calculo a integral pelo metodo de monte carlo
soma_I += I #calculo a soma para os valores de I
I_m = soma_I/n #e a media
dif_I = (I_m - I)**2 #calculo a diferenca para calcular sigma
soma_dif += dif_I #calculo a soma para os valores de dif_I
σ = np.sqrt(soma_dif/(n-1)) #calculo σ
σ_m = σ/np.sqrt(n) #calculo σ_m
if (n & (n-1) == 0) and n != 0: #se a iteracao eh uma potencia de 2
dados.append([n, I_m, σ, σ_m]) #adiciono os valores calculados na lista de dados
n += 1
#Motando a tabela
col = ('N_tent','I_m','σ','σ_m')
table = plt.table(cellText=dados,colLabels=col,loc='top')
plt.axis('off')
table.scale(2.0,1.5)
table.auto_set_font_size(False)
table.set_fontsize(11)
plt.show |
# Copyright (c) 2014 Alcatel-Lucent Enterprise
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from facette.utils import *
import json
SOURCE_NAME = "name"
SOURCE_ORIGINS = "origins"
SOURCE_UPDATED = "updated"
class Source:
def __init__(self, js):
self.source = {}
self.name = facette_to_json(SOURCE_NAME, js, self.source)
self.origins = facette_to_json(SOURCE_ORIGINS, js, self.source)
self.updated = facette_to_json(SOURCE_UPDATED, js, self.source)
def __str__(self):
return json.dumps(self.source)
|
# -*- coding:utf-8 -*-
from pykafka import KafkaClient
import codecs
import logging
logging.basicConfig(level=logging.INFO)
class kafka(object):
def __init__(self, ip, port, topic):
"""
:param ip: kafka的IP
:param port: kafka的port
:param topic: kafka的topic
"""
self.client = KafkaClient(hosts="%s:%s" % (ip, port))
self.topic = self.client.topics[topic]
def produce_kafka_data(self, message=""):
"""
:param message: 指定produce的数据
:return:
"""
with self.topic.get_sync_producer() as producer:
producer.produce(message)
def produce_kafka_file(self, filename):
"""
:param filename: 指定文件名
:return:
"""
with self.topic.get_producer() as producer:
with codecs.open(filename, "r") as rf:
for line in rf.readlines():
line = line.strip()
if not line:
continue
producer.produce(str(line))
def consume_simple_kafka(self, timeout):
consumer = self.topic.get_simple_consumer(consumer_timeout_ms = timeout)
for message in consumer:
if message is not None:
print message.offset, message.value
# def consume_kafka(self, zkhost):
# balanced_consumer = self.topic.get_balanced_consumer(
# consumer_group="testgroup",
# auto_commit_enable=False,
# zookeeper_connect=zkhost,
# #zookeeper=zkhost,
# zookeeper_connection_timeout_ms=6000,
# consumer_timeout_ms=10000,
# )
# for message in balanced_consumer:
# if message is not None:
# print message.offset, message.value |
from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect, response
from django.urls import reverse
from .models import Quizapplication
# Create your views here.
def home(request):
app = Quizapplication.objects.all()
if request.method == 'POST':
answer = request.POST
print(answer)
Total_ques = len(answer)-1
print(Total_ques)
score = 0
for i in answer:
if i.isnumeric():
result = Quizapplication.objects.filter(id = i)[0].correct_ans
if result == answer[i]:
score += 1
else:
print(result)
return render(request, 'index.html', {'score':score, 'ques_answered':Total_ques, 'Total_ques':len(app), "app":app})
return render(request, "index.html", {
"app": app
})
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Program: hola_test.py
# Description: Este programa prueba el modulo hola usando UnitTest
# Author: Diego Fernando Marin
# Standard Test imports
from __future__ import unicode_literals
import unittest
import sys
# Add the module location to the search path
sys.path.insert(0, '..') # the parent directory
# Module to be tested
import hola
class HolaTests(unittest.TestCase):
def test_hola_noarg(self):
self.assertEqual( 'Hola, Mundo!', hola.saludo() )
def test_hola_none(self):
self.assertEqual( 'Hola, Mundo!', hola.saludo(None) )
def test_hola_empty(self):
self.assertEqual( 'Hola, Mundo!', hola.saludo('') )
def test_hola_01(self):
self.assertEqual( 'Hola, Juana!', hola.saludo('Juana') )
def test_hola_02(self):
self.assertEqual( 'Hola, Diego!', hola.saludo('Diego') )
if __name__ == "__main__":
unittest.main()
|
#suponiendo que me acuerdo bien del ejercicio
from validadorDatos import validarDatoNumerico
def validarDatoBool(mensaje):
while True:
try:
dato = str(input(mensaje))
if dato.upper() == "S" or dato == "N":
break
else:
raise Exception
except:
print("solo se permite S/N")
return dato
def validaringrediente(mensaje):
while True:
try:
datoNumerico = int(input(mensaje))
if datoNumerico in range(0, 7):
break
else:
raise Exception
except:
print("Debe ingresar un dato NUMERICO y que este en la lista de ingredientes")
return datoNumerico
def validarDatoString(mensaje):
while True:
try:
dato = input(mensaje)
if dato.isdigit():
raise Exception
else:
break
except:
print("solo se permite texto")
return dato
ing =[]
ingredientes = ["Peperoni (0)", "Queso (1)", "Choclo (2)", "Tomate(3)", "Champiñones (4)", "Tocino (5)", "Salchicha(6)"]
print("-------------------------------- * ------------------------")
print("Bienvenido, arme su pizza, primero escoja la cantidad de ingredientes")
print("1 ingrediente = 3000")
print("2 ingredientes = 4000")
print("3 ingredientes = 5000")
#armar la pizaa
CantIngredientes = validarDatoNumerico("escoja la cantidad de ingredientes que quiere en su pizza \ncada ingrediente adicional tiene un valor de $1500: ")
print("los ingredientes disponibles son:")
if CantIngredientes == 0:
exit()
for z in ingredientes:
print(z)
for x in range(1, CantIngredientes+1):
ing.append(ingredientes[validaringrediente(f"ingrese el ingrdiente N°{x}: ")])
print("su pizza contiene:")
for i in ing:
print(i)
#agregar otro ingrediente
while True:
pregunta = validarDatoBool("desea agregar otro ingrediente (S/N): ")
if pregunta.upper() == "S":
CantIngredientes += 1
for x in range(CantIngredientes, CantIngredientes+1):
ing.append(ingredientes[validaringrediente(f"ingrese el ingrdiente N°{x}: ")])
print("su pizza contiene ahora:")
for i in ing:
print(i)
else:
break
#calcular precio
if CantIngredientes < 4:
precio = 2000 + (1000*CantIngredientes)
print("el total de su pizza es", precio)
else:
precio = 5000 + ((CantIngredientes-3)*1000)
print("el total de su pizza es", precio)
exit() |
from keras.models import Sequential
import keras
from keras.utils import to_categorical
from keras.layers import Dense, Conv1D, Conv2D, Flatten, Reshape, MaxPooling1D, MaxPooling2D, GlobalAveragePooling1D, Dropout, BatchNormalization
import pandas as pd
from sklearn.model_selection import train_test_split
from joblib import dump, load
import numpy as np
# Normalizing X considering the absolute values of the coordinates
highest_latitude = 22.836006
lowest_latitude = 22.801396
highest_longitude = 47.095658
lowest_longitude = 47.046078
hightest_time = 86400
lowest_time = 0
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.model_selection import train_test_split
from math import sin, cos, sqrt, atan2, radians
from joblib import dump, load
import numpy as np
import sys
import math
def adjustTimeColumn(time):
if time == 0:
return 0
return (time - lowest_time)/(hightest_time - lowest_time)
def adjustLatitudeColumn(latitude):
if latitude == 0:
return 0
return (latitude - lowest_latitude)/(highest_latitude - lowest_latitude)
def adjustLongitudeColumn(longitude):
if longitude == 0:
return 0
return (longitude - lowest_longitude)/(highest_longitude - lowest_longitude)
def adjustTrainDf(df):
for column in df:
# For time columns
if column % 3 == 0:
df[column] = df[column].apply(adjustTimeColumn)
elif column % 3 == 1:
df[column] = df[column].apply(adjustLatitudeColumn)
elif column % 3 == 2:
df[column] = df[column].apply(adjustLongitudeColumn)
return df
def convertTo3D(df):
time_train_df = []
lat_train_df = []
lon_train_df = []
for line in range(len(df.index)):
time_train_df.append(df.iloc[line][df.iloc[line].index % 3 == 0].to_numpy())
lat_train_df.append(df.iloc[line][df.iloc[line].index % 3 == 1].to_numpy())
lon_train_df.append(df.iloc[line][df.iloc[line].index % 3 == 2].to_numpy())
new_df = np.array([np.array(time_train_df), np.array(lat_train_df), np.array(lon_train_df)])
return new_df.reshape((new_df.shape[1], 60, 3))
# Constants and global variables
best_nn_location = '../data/best_rio_nn_cross_rio_bus_data.h5'
train_df = []
print("lendo dados...")
paths_df = pd.read_csv('../data/rio_train_df_60_labeled.csv', header=None)
y_df = paths_df[60*3]
X_df = paths_df.drop(columns=[60*3])
X_train, X_test, y_train, y_test = train_test_split(X_df, y_df, random_state=1, test_size=0.2, stratify=y_df)
print("dados lidos!")
#X_test = adjustTrainDf(X_test.abs())
y_train = to_categorical(y_train.apply(lambda x: x-1))
y_test = to_categorical(y_test.apply(lambda x: x-1))
X_train = convertTo3D(X_train)
X_test = convertTo3D(X_test)
print("treinando...")
model = Sequential()
model.add(Conv1D(64, 3, activation='relu', input_shape=(60,3)))
model.add(MaxPooling1D(pool_size=2))
model.add(Conv1D(128, 3, activation='relu'))
model.add(MaxPooling1D(pool_size=2))
model.add(Flatten())
model.add(Dense(2000, activation='sigmoid'))
model.add(Dense(2000, activation='sigmoid'))
model.add(Dense(19, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
model.fit(X_train, y_train, batch_size=500, epochs=50, validation_split=0.2, verbose=1)
acc = model.evaluate(X_test, y_test)[1]
print('acc: ' + str(acc))
model.save(best_nn_location) |
"""
A collection of EOTasks for feature manipulation
"""
from .bands_extraction import EuclideanNormTask, NormalizedDifferenceIndexTask
from .blob import BlobTask, DoGBlobTask, DoHBlobTask, LoGBlobTask
from .clustering import ClusteringTask
from .doubly_logistic_approximation import DoublyLogisticApproximationTask
from .feature_manipulation import FilterTimeSeriesTask, LinearFunctionTask, SimpleFilterTask, ValueFilloutTask
from .haralick import HaralickTask
from .hog import HOGTask
from .interpolation import (
AkimaInterpolationTask,
BSplineInterpolationTask,
CubicInterpolationTask,
CubicResamplingTask,
InterpolationTask,
KrigingInterpolationTask,
LinearInterpolationTask,
LinearResamplingTask,
NearestResamplingTask,
ResamplingTask,
SplineInterpolationTask,
)
from .local_binary_pattern import LocalBinaryPatternTask
from .radiometric_normalization import (
BlueCompositingTask,
HistogramMatchingTask,
HOTCompositingTask,
MaxNDVICompositingTask,
MaxNDWICompositingTask,
MaxRatioCompositingTask,
ReferenceScenesTask,
)
from .temporal_features import (
AddMaxMinNDVISlopeIndicesTask,
AddMaxMinTemporalIndicesTask,
AddSpatioTemporalFeaturesTask,
)
__version__ = "1.4.2"
|
str = "Manohar Singh"
#str = "Manohar Singh"
print(str)
#str[0] = "M"
print(str[0])
#str[0:5] = "Manoh"
print(str[0:5])
#str[0:] = "Manohar Singh"
print(str[0:])
#str[:5:2] = ""
print(str[::-1]) |
import tweepy
import csv
import pandas as pd
# so that emojis don't
import sys
non_bmp_map = dict.fromkeys(range(0x10000, sys.maxunicode + 1), 0xfffd)
def collect(csvFileName,startTime,endTime):
####input your credentials here
#For account @TeamBot123 , TRXBot
consumer_key = 'aD0VdJEhOmG27pALUanzBwvuv'
consumer_secret = 'ovVI2CXSwJ4FayLI7BrusCxQHZje01PEfrbKRmrJ6GYMwh2FZx'
access_token = '1057247540154900480-604iTbnSXZ5mCK8up01VXZ4vtrj1Rr'
access_token_secret = 'xpDen6yBtswbAmEhNRqDuCBMXzECmksrbsZxfhPVnmFyR'
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth,wait_on_rate_limit=True)
#####trx since inception
# opens or creates files to append
csvFile = open(csvFileName, 'a')
#Use CSV Writer
csvWriter = csv.writer(csvFile)
for tweet in tweepy.Cursor(api.search,q="#TRX",count=100,
lang="en",
since = startTime
).items():
if (not tweet.retweeted) and ('RT @' not in tweet.text) and (tweet.created_at > endTime): #excludes retweets
print (tweet.created_at, tweet.text.translate(non_bmp_map))
csvWriter.writerow([tweet.text.encode('utf-8')])
|
from django.shortcuts import render
from django.http import HttpResponse
import mysql.connector
from datetime import datetime
# Create your views here.
def index(request):
if request.method == 'POST':
konu = request.POST['konu']
katılımcılar = request.POST['katılımcılar']
bsaat = request.POST['bsaat']
ssaat = request.POST['ssaat']
tarih = request.POST['tarih']
kal=1
connection = mysql.connector.connect(host="localhost", user="root", password="24541048", database="toplantım")
mycursor = connection.cursor()
sql = "INSERT INTO toplantı_toplantı(name,description,tarih,isPublished,bsal,ssal) VALUES (%s,%s,%s,%s,%s,%s)"
value = (konu,katılımcılar,tarih,kal,bsaat,ssaat)
mycursor.execute(sql, value)
try:
connection.commit()
print(f'{mycursor.rowcount} tane kayıt eklendi.')
except mysql.connector.Error as err:
print('hata:', err)
finally:
connection.close()
return render(request, 'pages/index.html')
# return HttpResponse('<h1>Hello from pages app</h1>')
def about(request):
if request.method == 'POST':
if 'düzenle' in request.POST:
id=request.POST['id']
konu = request.POST['konu']
katılımcılar = request.POST['katılımcılar']
bsaat = request.POST['bsaat']
ssaat = request.POST['ssaat']
tarih = request.POST['tarih']
kal=1
connection = mysql.connector.connect(host="localhost", user="root", password="24541048", database="toplantım")
mycursor = connection.cursor()
sql = "update toplantı_toplantı set name=%s,description=%s,tarih=%s,isPublished=%s,bsal=%s,ssal=%s where id=%s"
value = (konu,katılımcılar,tarih,kal,bsaat,ssaat,id)
mycursor.execute(sql, value)
try:
connection.commit()
print(f'{mycursor.rowcount} tane kayıt eklendi.')
except mysql.connector.Error as err:
print('hata:', err)
finally:
connection.close()
elif 'sil' in request.POST:
id = request.POST['id']
connection = mysql.connector.connect(host="localhost", user="root", password="24541048",database="toplantım")
mycursor = connection.cursor()
sql = "delete from toplantı_toplantı where id=%s"
value = (id,)
mycursor.execute(sql, value)
try:
connection.commit()
print(f'{mycursor.rowcount} tane kayıt silindi')
except mysql.connector.Error as err:
print('hata:', err)
finally:
connection.close()
return render(request, 'pages/about.html') |
#
# @SI_COPYRIGHT@
# @SI_COPYRIGHT@
#
import os
import stack.commands
from stack.exception import *
import struct
import socket
from itertools import groupby
from operator import itemgetter
class Command(stack.commands.Command,
stack.commands.HostArgumentProcessor):
"""
Output the PXE file for a host
<arg name="host" type="string" repeat="1">
One or more hostnames
</arg>
<param name="action" type="string" optional="0">
Generate PXE file for a specified action
</param>
"""
def getHostHexIP(self, host):
"""
Return list of IP's (in hex format) for each interface where pxe=true for `host`
"""
hex_ip_list = []
for iface in self.host_interfaces[host]:
if iface['ip'] and iface['pxe']:
# Compute the HEX IP filename for the host
# inet_aton('a.b.c.d') -> binary (bytes) repr of (long) int
# struct unpacks that into a python Long, which we then cast to a hex value
hex_ip_list.append("%08X" % struct.unpack('!L', socket.inet_aton(iface['ip']))[0])
return hex_ip_list
def getBootParams(self, host, action):
for row in self.call('list.host', [ host ]):
if action == 'install':
bootaction = row['installaction']
else:
bootaction = row['runaction']
kernel = ramdisk = args = None
for row in self.call('list.bootaction'):
if row['action'] == bootaction:
kernel = row['kernel']
ramdisk = row['ramdisk']
args = row['args']
return (kernel, ramdisk, args)
def run(self, params, args):
# Get a list of hosts
hosts = self.getHostnames(args, managed_only=True)
(action, ) = self.fillParams([
('action',None)])
# since we'll look up iface data for every host in 'hosts' anyway, get it all at once
# stored as a class-level variable in a dict[hostname] -> [list of iface dicts]
self.host_interfaces = dict(
(k,list(v)) for k,v in groupby(
self.call('list.host.interface', hosts + ['expanded=True']),
itemgetter('host')
))
self.beginOutput()
self.runPlugins([hosts, action])
self.endOutput(padChar='', trimOwner=(len(hosts) == 1))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2019/8/8
@Author : AnNing
"""
import numpy as np
from pyproj import Proj, transform
# 角度 -> 弧度
DEGREES_TO_RADIANS = np.pi / 180.
# 弧度 -> 角度
RADIANS_TO_DEGREES = 180. / np.pi
# 地球平均半径
EARTH_MEAN_RADIUS_KM = 6371.009
# 地球极半径
EARTH_POLAR_RADIUS_KM = 6356.752
# 地球赤道半径
EARTH_EQUATOR_RADIUS_KM = 6378.137
WGS84_A = 6378137.0
WGS84_F = 1.0 / 298.257223563
WGS84_B = WGS84_A * (1.0 - WGS84_F)
WGS84_E2 = 2 * WGS84_F - WGS84_F ** 2
# Rotational angular velocity of Earth in radians/sec from IERS
# Conventions (2003).
ANGVEL = 7.2921150e-5
def degree2meter(degree):
return degree * np.pi * EARTH_EQUATOR_RADIUS_KM * 1000. / 180.
def meter2degree(meter):
return (meter * 180) / (np.pi * EARTH_EQUATOR_RADIUS_KM * 1000)
class ProjCore:
"""
投影公共类
"""
def __init__(self, projstr, res, unit,
row=None, col=None, pt_tl=None, pt_br=None,):
"""
[args]:
projstr proj4投影参数字符串
res 分辨率
unit 分辨率单位,支持 m km deg, 确保单位与投影方式一致
row 行数
col 列数
pt_tl 左上角经纬度元组, 形式如 (lon, lat)
pt_br 右下角经纬度元组, 形式如 (lon, lat)
row、 col 和 pt_tl、 pt_br 两对里必须传一对,用以确定网格大小, 不能都是None
projstr 样例:
1. 等经纬
"+init=epsg:4326" or "+proj=longlat +datum=WGS84 +no_defs"
# "+proj=eqc +lat_ts=0 +lat_0=0 +lon_0=0 +x_0=0 +y_0=0 +datum=WGS84 +x_0=-half_res +y_0=half_res"
2. 极射赤面
"+proj=stere +ellps=clrk66 +lat_0=90 +lat_ts=70 +lon_0=0 +k_0=0.969858730377 +a=6371000 +units=m"
3. 兰勃特等面积
"+proj=laea +lat_0=-74.180000 +lon_0=-146.620000 +x_0=0 +y_0=0 +ellps=WGS84"
4. 阿伯斯 (常用于中国区域)
"+proj=aea +lat_0=0 +lon_0=105 +lat_1=25 +lat_2=47 +x_0=0 +y_0=0 +ellps=krass +a=6378245.0 +b=6356863.0"
5. 待补充
"""
self.proj4str = projstr
self.pfunc = Proj(self.proj4str) # 转换函数
if unit == "km":
self.res = res * 1000
self.unit = "m"
elif unit == "deg":
self.res = np.deg2rad(res)
self.unit = unit
else:
self.unit = unit
self.res = res
if row is not None and col is not None:
self.row = row
self.col = col
self.x_tl = -(self.col - 1) / 2 * self.res
self.y_tl = (self.row - 1) / 2 * self.res
elif pt_tl is not None and pt_br is not None:
self.x_tl, self.y_tl = self.pfunc(*pt_tl)
x_br, y_br = self.pfunc(*pt_br)
self.row = int(round((self.y_tl - y_br) / self.res)) + 1
self.col = int(round((x_br - self.x_tl) / self.res)) + 1
else:
raise ValueError("row、 col 和 pt_tl、 pt_br 两对里必须传一对,用以确定网格大小, 不能都是None")
self.lons = None
self.lats = None
self.grid_lonslats()
def lonslats2ij(self, lons, lats):
"""
'经纬度转行列号 lons,lats -> i,j
'参数是n维数组 经纬度
'返回值是n维数组 行列号
"""
if isinstance(lons, (list, tuple)):
lons = np.array(lons)
if isinstance(lats, (list, tuple)):
lats = np.array(lats)
if isinstance(lons, np.ndarray):
assert lons.shape == lats.shape, \
"lons and lats must have same shape."
args_shape = lons.shape
# 转成1维,因为proj只接收1维参数
lons = lons.reshape((-1))
lats = lats.reshape((-1))
# 通过平面坐标系计算投影后的行和列
x, y = self.pfunc(lons, lats)
i = self.__y2i(y)
j = self.__x2j(x)
return i.reshape(args_shape), j.reshape(args_shape)
else:
x, y = self.pfunc(lons, lats)
i = self.__y2i(y)
j = self.__x2j(x)
return i, j
def __y2i(self, y):
"""
y 转 行号
"""
if isinstance(y, (list, tuple)):
y = np.array(y)
return np.rint((self.y_tl - y) / self.res).astype(int)
def __x2j(self, x):
"""
x 转 列号
"""
if isinstance(x, (list, tuple)):
x = np.array(x)
return np.rint((x - self.x_tl) / self.res).astype(int)
def grid_lonslats(self):
"""
'生成投影后网格 各格点的经纬度
"""
# 制作一个2维的矩阵
i, j = np.mgrid[0:self.row:1, 0:self.col:1]
y = self.__i2y(i)
x = self.__j2x(j)
# 把二维的x,y 转成1维,因为proj只接收1维参数
x = x.reshape((-1))
y = y.reshape((-1))
lons, lats = self.pfunc(x, y, inverse=True)
# 转回2维
self.lons = lons.reshape((self.row, self.col))
self.lats = lats.reshape((self.row, self.col))
def __i2y(self, i):
"""
'行号 转 y
"""
if isinstance(i, (list, tuple)):
i = np.array(i)
y = self.y_tl - i * self.res
return y
def __j2x(self, j):
"""
'列号 转 x
"""
if isinstance(j, (list, tuple)):
j = np.array(j)
x = j * self.res + self.x_tl
return x
def create_lut(self, lons, lats):
"""
'创建投影查找表, (字典)
'即 源数据经纬度位置与投影后位置的对应关系
"""
if isinstance(lons, (list, tuple)):
lons = np.array(lons)
if isinstance(lats, (list, tuple)):
lats = np.array(lats)
assert lons.shape == lats.shape, "Lons and Lats must have same shape."
# 投影后必是2维的,行列 proj1_i,proj1_j
prj_i, prj_j = self.lonslats2ij(lons, lats)
#
# valid_index = np.logical_and.reduce((prj_i >= 0, prj_i < self.row,
# prj_j >= 0, prj_j < self.col))
if lons.ndim == 1:
pre_n = np.arange(0, lons.size, 1, "i4")
# # 投影方格以外的数据过滤掉
# prj_i = prj_i[valid_index]
# prj_j = prj_j[valid_index]
# pre_n = pre_n[valid_index]
return {"pre_n": pre_n, "prj_i": prj_i, "prj_j": prj_j}
elif lons.ndim == 2:
pre_row, pre_col = lons.shape
pre_i, pre_j = np.mgrid[0:pre_row:1, 0:pre_col:1]
# # 投影方格以外的数据过滤掉
# prj_i = prj_i[valid_index]
# prj_j = prj_j[valid_index]
# pre_i = pre_i[valid_index]
# pre_j = pre_j[valid_index]
return {"pre_i": pre_i, "pre_j": pre_j, "prj_i": prj_i, "prj_j": prj_j}
def transform2ij(self, proj_str1, x1, y1):
"""
'不同投影方式之间转换
'返回值是整数
"""
args_shape = x1.shape
x1 = np.array(x1).reshape((-1)) # 转成1维
y1 = np.array(y1).reshape((-1))
p1 = Proj(proj_str1)
x2, y2 = transform(p1, self.pfunc, x1, y1)
i = self.__y2i(y2)
j = self.__x2j(x2)
return i.reshape(args_shape), j.reshape(args_shape)
class ProjGLL:
"""
等经纬度区域类
"""
def __init__(self, nlat=90., slat=-90., wlon=-180., elon=180., res_lat=None, res_lon=None, row_max=None,
col_max=None):
"""
nlat, slat, wlon, elon: 北纬, 南纬, 西经, 东经
resLat: 纬度分辨率(度)
resLon: 经度分辨率(度)
"""
self.nlat = float(nlat) # 北纬
self.slat = float(slat) # 南纬
self.wlon = float(wlon) # 西经
self.elon = float(elon) # 东经
if res_lat is None and row_max is None:
raise ValueError("resLat and rowMax must set one")
if res_lon is None and col_max is None:
raise ValueError("resLon and colMax must set one")
if res_lat is None:
self.rowMax = int(row_max)
self.resLat = (self.nlat - self.slat) / self.rowMax
else:
self.resLat = float(res_lat)
self.rowMax = int(
round((self.nlat - self.slat) / self.resLat)) # 最大行数
if res_lon is None:
self.colMax = int(col_max)
self.resLon = (self.elon - self.wlon) / self.colMax
else:
self.resLon = float(res_lon)
self.colMax = int(
round((self.elon - self.wlon) / self.resLon)) # 最大列数
def generate_lats_lons(self):
lats, lons = np.mgrid[
self.nlat - self.resLat / 2.: self.slat + self.resLat * 0.1:-self.resLat,
self.wlon + self.resLon / 2.: self.elon - self.resLon * 0.1: self.resLon]
return lats, lons
def lonslats2ij(self, lons, lats):
j = self.lons2j(lons)
i = self.lats2i(lats)
return i, j
def lons2j(self, lons):
"""
lons: 输入经度
ret: 返回 输入经度在等经纬度网格上的列号,以左上角为起点0,0
"""
if isinstance(lons, (list, tuple)):
lons = np.array(lons)
if isinstance(lons, np.ndarray):
idx = np.isclose(lons, 180.)
lons[idx] = -180.
return np.floor((lons - self.wlon) / self.resLon).astype(int) # 列号
def lats2i(self, lats):
"""
lats: 输入纬度
ret: 返回 输入纬度在等经纬度网格上的行号,以左上角为起点0,0
"""
if isinstance(lats, (list, tuple)):
lats = np.array(lats)
return np.floor((self.nlat - lats) / self.resLat).astype(int) # 行号
def fill_2d(array2d, mask, use_from):
"""
2维矩阵无效值补点
array2d 2维矩阵
mask 无效值掩模矩阵
useFrom u/d/l/r, 用上/下/左/右的点来补点
"""
assert len(array2d.shape) == 2, \
"array2d must be 2d array."
assert array2d.shape == mask.shape, \
"array2d and musk must have same shape."
condition = np.empty_like(mask)
# 用上方的有效点补点
if use_from == 'up' or use_from == 'u':
condition[1:, :] = mask[1:, :] * (~mask)[:-1, :]
condition[0, :] = False
index = np.where(condition)
array2d[index[0], index[1]] = array2d[index[0] - 1, index[1]]
# 用右方的有效点补点
elif use_from == 'right' or use_from == 'r':
condition[:, :-1] = mask[:, :-1] * (~mask)[:, 1:]
condition[:, -1] = False
index = np.where(condition)
array2d[index[0], index[1]] = array2d[index[0], index[1] + 1]
# 用下方的有效点补点
elif use_from == 'down' or use_from == 'd':
condition[:-1, :] = mask[:-1, :] * (~mask)[1:, :]
condition[-1, :] = False
index = np.where(condition)
array2d[index[0], index[1]] = array2d[index[0] + 1, index[1]]
# 用左方的有效点补点
elif use_from == 'left' or use_from == 'l':
condition[:, 1:] = mask[:, 1:] * (~mask)[:, :-1]
condition[:, 0] = False
index = np.where(condition)
array2d[index[0], index[1]] = array2d[index[0], index[1] - 1]
def fill_points_2d(array2d, invalid_value=0):
"""
2维矩阵无效值补点
array2d 2维矩阵
invalidValue 无效值
"""
# 用右方的有效点补点
mask = np.isclose(array2d, invalid_value)
fill_2d(array2d, mask, 'r')
# 用左方的有效点补点
mask = np.isclose(array2d, invalid_value)
fill_2d(array2d, mask, 'l')
# 用上方的有效点补点
mask = np.isclose(array2d, invalid_value)
fill_2d(array2d, mask, 'u')
# 用下方的有效点补点
mask = np.isclose(array2d, invalid_value)
fill_2d(array2d, mask, 'd')
def fill_points_2d_nan(array2d):
"""
2维矩阵无效值补点
array2d 2维矩阵
invalidValue 无效值
"""
# 用右方的有效点补点
mask = np.isnan(array2d)
fill_2d(array2d, mask, 'r')
# 用左方的有效点补点
mask = np.isnan(array2d)
fill_2d(array2d, mask, 'l')
# 用上方的有效点补点
mask = np.isnan(array2d)
fill_2d(array2d, mask, 'u')
# 用下方的有效点补点
mask = np.isnan(array2d)
fill_2d(array2d, mask, 'd')
if __name__ == '__main__':
ps = "+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs"
r = meter2degree(4000)
print(r)
p = ProjCore(ps, r, unit="deg", pt_tl=(-179.5, 89.5), pt_br=(179.5, -89.5)) # 角点也要放在格点中心位置
print(p.col)
print(p.row)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 23 20:46:50 2018
@author: shubham
"""
n = int(input())
for _ in range(n):
N = int(input())
if 360 % N == 0:
ans = 'y'
else:
ans = 'n'
if N <= 360:
ans1 = 'y'
else:
ans1 = 'n'
if (N*(N+1)/2) <= 360:
ans2 = 'y'
else:
ans2 = 'n'
print(ans, ans1, ans2) |
#!/usr/bin/env python
# Script for localizing the Campus Rover using April Tags. For this script to work, tags
# need to be postioned on a premade map and coordiantes need to be found (these are then stored
# in sendTransform call at the end of the script, currently hardcoded for testing).
import rospy
import tf
from tf import TransformerROS, Transformer
from geometry_msgs.msg import PoseStamped, Point, PoseWithCovarianceStamped
from apriltags2_ros.msg import AprilTagDetectionArray
from std_msgs.msg import Header
import numpy as np
rospy.init_node('tag_localization')
transformer = TransformerROS()
listener = tf.TransformListener()
def tag_detection_callback(msg):
print msg
if msg.detections:
robot_to_tag_pose = PoseStamped(msg.detections[0].pose.header, msg.detections[0].pose.pose.pose)
print robot_to_tag_pose
pos = robot_to_tag_pose.pose.position
orientation = robot_to_tag_pose.pose.orientation
robot_x = -7.38 + pos.x
robot_y = -0.20 + pos.y
angle = np.arctan(robot_x / robot_y)
robot_angle = 90 - angle
robot_pose = PoseWithCovarianceStamped()
robot_pose.header = Header()
robot_pose.header.stamp = rospy.Time.now()
robot_pose.header.frame_id = 'map'
robot_pose.pose.pose.position = Point(robot_x, robot_y, pos.z)
robot_pose.pose.pose.orientation.w = 1.0
print "====="
#print robot_pose
robot_localization_pub.publish(robot_pose)
tag_detection_sub = rospy.Subscriber('/tag_detections', AprilTagDetectionArray, tag_detection_callback)
robot_localization_pub = rospy.Publisher('/initialpose', PoseWithCovarianceStamped)
br = tf.TransformBroadcaster()
rate = rospy.Rate(5)
#tag 13; x=-1.47 y=1.61 z=0.52 Quaternion: -0.44 0.54 0.56 -0.43
#tag 14; x=-7.38 y=-0.20 z=0.58 Quaternion: 0.51 0.48 0.47 0.52
#tag 12; x=-5.20 y=-6.12 z=0.39 Quaternion: 0.04 0.71 0.70 0.08
#tag 10; x=-2.86 y=-3.53 z=0.51 Quaternion: -0.47 0.55 0.54 -0.43
#tag 7; x=1.00 y=3.82 z=0.49 Quaternion: 0.70 -0.10 -0.09 0.67
#tag 11; x=-1.55 y=1.88 z=0.37 Quaternion: 0.56 0.43 0.46 0.52
#tag 8: x=-2.91 y=-3.12 z=0.49 Q 0.53 0.47 0.46 0.52
while not rospy.is_shutdown():
br.sendTransform(
(-7.38, -0.20, -0.58),
(0.51, 0.48, 0.47, 0.52),
rospy.Time.now(),
'tag_14',
'map'
)
rate.sleep()
|
from collections import namedtuple
from suds import WebFault
from api_exception import api_exception
from entity import entity
import util
class enum_zone(entity):
"""An ENUM Zone object in BAM
ENUM zones provide voice over IP (VoIP) functionality within a DNS server.
The system requires DNS to manage the phone numbers associated with client end points; Address
Manager provides an E164 or ENUM zone type for this purpose. The ENUM zone represents the area
code for the phone prefixes and numbers stored in it. ENUM zones contain special sub-zones called
prefixes that represent telephone exchanges and can contain the records for the actual devices.
VoIP devices are addressed in several ways. A uniform resource identifier (URI) string provides custom
forward locator references for these devices as covered in RFC 3401. Reverse DNS is used to discover
the relevant information for a device based on its phone number. Name authority pointer (NAPTR) records
are used to represent this information.
"""
ServiceData = namedtuple('ServiceData', 'service, uri, comment, ttl')
ServiceType = namedtuple('ServiceType',
"H323, \
SIP, \
ifax_mailto, \
pres, \
web_http, \
web_https, \
ft_ftp, \
email_mailto, \
fax_tel, \
sms_tel, \
sms_mailto, \
ems_tel, \
ems_mailto, \
mms_tel, \
mms_mailto, \
VPIM_MAILTO, \
VPIM_LDAP, \
voice_tel, \
pstn_tel, \
pstn_sip, \
xmpp, \
im"
)
service_type = ServiceType(
'H323',
'SIP',
'ifax mailto',
'pres',
'web http',
'web https',
'ft ftp',
'email mailto',
'fax tel',
'sms tel',
'sms mailto',
'ems tel',
'ems mailto',
'mms tel',
'mms mailto',
'VPIM MAILTO',
'VPIM LDAP',
'voice tel',
'pstn tel',
'pstn sip',
'xmpp',
'im'
)
def __init__(self, api, soap_entity, soap_client):
"""Instantiate an ENUM Zone object.
:param api:
:param soap_entity:
:param soap_client:
"""
super(enum_zone, self).__init__(api, soap_entity, soap_client)
def create_service_data(self, service_type, uri, comment='', ttl=-1):
"""
:param service_type:
:param uri:
:param comment:
:param ttl:
:return:
"""
return self.ServiceData(service_type, uri, comment, ttl)
def add_enum_number(self, number, service_data, name='', **properties):
"""Add an ENUM number object to the ENUM zone.
:param number: The ENUM phone number.
:param service_data: List of enum_number.ServiceData objects.
:param name: Optional name for the ENUM number.
:param properties: Adds object properties, including user-defined fields.
:return: An instance of the new enum_number.
"""
data = ''
for d in service_data:
data = ','.join([str(x) for x in d]) + ','
properties['data'] = data
if name:
properties['name'] = name
try:
return self._api.get_entity_by_id(
self._soap_client.service.addEnumNumber(self.get_id(), number,
util.parse_properties(properties))
)
except WebFault as e:
raise api_exception(e.message)
def get_enum_number(self, number):
"""Get an EnumNumber object
:param number: The number of the EnumNumber object.
:return An instance of enum_number.
"""
return self.get_child_by_name(number, self.EnumNumber)
def get_enum_numbers(self):
"""Get List of EnumNumber objects."""
return self.get_children_of_type(self.EnumNumber)
class enum_number(entity):
"""An ENUM number in BAM
ENUM number objects represent VoIP phone numbers within Address Manager. This functionality is
provided as an alternative to using raw NAPTR records.
"""
def __init__(self, api, soap_entity, soap_client):
"""Instantiate an ENUM Number object.
:param api:
:param soap_entity:
:param soap_client:
"""
super(enum_number, self).__init__(api, soap_entity, soap_client)
class service_type(object):
"""Services supported by ENUM number objects"""
H323 = 'H323'
SIP = 'SIP'
ifax_mailto = 'ifax mailto'
pres = 'pres'
web_http = 'web http'
web_https = 'web https'
ft_ftp = 'ft ftp'
email_mailto = 'email mailto'
fax_tel = 'fax tel'
sms_tel = 'sms tel'
sms_mailto = 'sms mailto'
ems_tel = 'ems tel'
ems_mailto = 'ems mailto'
mms_tel = 'mms tel'
mms_mailto = 'mms mailto'
VPIM_MAILTO = 'VPIM MAILTO'
VPIM_LDAP = 'VPIM LDAP'
voice_tel = 'voice tel'
pstn_tel = 'pstn tel'
pstn_sip = 'pstn sip'
xmpp = 'xmpp'
im = 'im'
|
import os
from flask import Flask, render_template, redirect, url_for, escape, request
from datetime import datetime
import sqlite3 as sql
app = Flask(__name__)
@app.route('/')
def new():
return render_template('home.html')
@app.route('/enternew')
def new_entry():
return render_template('entry.html')
@app.route('/addrec',methods = ['POST', 'GET'])
def addrec():
if request.method == 'POST':
item = request.form['item']
quant = request.form['quant']
with sql.connect("mydatabase.db") as con:
cur = con.cursor()
cur.execute("INSERT INTO grp (item,quant)VALUES (?,?)",(item,quant))
con.commit()
ms = "Record added"
return render_template("result.html",ms = ms)
con.close()
@app.route('/list')
def list():
con = sql.connect("mydatabase.db")
con.row_factory = sql.Row
cur = con.cursor()
cur.execute("select * from grp")
rows = cur.fetchall();
return render_template("list.html",rows = rows)
if __name__ == "__main__":
app.run(debug=True,host='0.0.0.0', port=1000)
|
from distutils.core import setup
from setuptools import find_packages
setup(name='hello-zmq',
version='0.1',
description='python implementation of RFC-424242',
packages=find_packages(),
requires=[
'pyzmq',
'docopt',
]
)
|
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 23 17:16:11 2019
@author: sunyue
"""
"""
步骤:
1. 输入X,Y
2. 计算协方差矩阵
3. 将协方差矩阵进行特征值分解
4. 取特征值和特征向量。如取10个特征,就选择最大的10个特征值和其对应的特征向量 n_components = 10
5. 将得到的特征值和特征向量向原空间映射
"""
"""
下面代码是使用sklearn中的一个样例数据集进行PCA,将64维矩阵化为2维并展示出来
"""
import numpy as np
from sklearn import datasets
import matplotlib.pyplot as plt
import matplotlib.colors as colors
"""
生成PCA的类
"""
class PCA():
def calculate_covariance_matrix(self, X, Y=None):
# 2.计算协方差矩阵
m = X.shape[0]
X = X - np.mean(X, axis=0)
Y = X if Y == None else Y - np.mean(Y, axis=0)
print(X.shape, Y.shape)
return 1 / m * np.matmul(X.T, Y)
def transform(self, X, n_components):
# 设n=X.shape[1],将n维数据降维成n_component维
# 2. 计算协方差矩阵
covariance_matrix = self.calculate_covariance_matrix(X)
# 3. 获取特征值,和特征向量
eigenvalues, eigenvectors = np.linalg.eig(covariance_matrix)
# 4. 对特征向量排序,并取最大的前n_component组
idx = eigenvalues.argsort()[::-1]
eigenvectors = eigenvectors[:, idx]
eigenvectors = eigenvectors[:, :n_components]
# 5. 转换
return np.matmul(X, eigenvectors)
"""
PCA结束
"""
# 1. 读取数据
data = datasets.load_digits()
X = data.data
y = data.target
# PCA
# 原题中此处nComponent应为10
# nCoponentms = 2
nCoponentms = 2
X_trans = PCA().transform(X, nCoponentms)
"""
PCA 效果展示
"""
x1 = X_trans[:, 0]
x2 = X_trans[:, 1]
cmap = plt.get_cmap('viridis')
colors = [cmap(i) for i in np.linspace(0, 1, len(np.unique(y)))]
class_distr = []
# Plot the different class distributions
for i, l in enumerate(np.unique(y)):
_x1 = x1[y == l]
_x2 = x2[y == l]
_y = y[y == l]
class_distr.append(plt.scatter(_x1, _x2, color=colors[i]))
# Add a legend
plt.legend(class_distr, y, loc=1)
# Axis labels
plt.suptitle("PCA Dimensionality Reduction")
plt.title("Digit Dataset")
plt.xlabel('Principal Component 1')
plt.ylabel('Principal Component 2')
plt.show()
|
import numpy as np
import matplotlib.pyplot as plt
#Draw graph of pseudo-delta functions
xs=np.arange(-5,5.1,.1)
ks=[1,2,4]
for k in ks:
ys=[1/(1+np.exp(k*x)) for x in xs]
plt.plot(xs,ys,label='k='+str(k))
plt.legend()
plt.grid()
plt.show()
|
# Generated by Django 3.0.8 on 2020-09-23 02:13
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('auction', '0022_auto_20200921_2249'),
]
operations = [
migrations.AddField(
model_name='bids',
name='Bid_No',
field=models.IntegerField(default=0),
),
migrations.AlterField(
model_name='listing',
name='Time',
field=models.TimeField(default=datetime.datetime(2020, 9, 22, 19, 13, 52, 224926)),
),
migrations.AlterField(
model_name='watch',
name='Time',
field=models.TimeField(default=datetime.datetime(2020, 9, 22, 19, 13, 52, 224926)),
),
]
|
#Importations And Value Table Creation#
from random import randint
bot_field = [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
player_field = [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
bot_sonar = [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
player_sonar = [["-", "-", "-", "-", "-", "-", "-", "-", "-", "-"],
["-", "-", "-", "-", "-", "-", "-", "-", "-", "-"],
["-", "-", "-", "-", "-", "-", "-", "-", "-", "-"],
["-", "-", "-", "-", "-", "-", "-", "-", "-", "-"],
["-", "-", "-", "-", "-", "-", "-", "-", "-", "-"],
["-", "-", "-", "-", "-", "-", "-", "-", "-", "-"],
["-", "-", "-", "-", "-", "-", "-", "-", "-", "-"],
["-", "-", "-", "-", "-", "-", "-", "-", "-", "-"],
["-", "-", "-", "-", "-", "-", "-", "-", "-", "-"],
["-", "-", "-", "-", "-", "-", "-", "-", "-", "-"]]
def printField(A):#Special print command for outputing the above arrays.
if str.lower(A) == "a":
print("\n---Players Ships---")
for i in range(10):
for j in range(10):
print(player_field[i][j], "", end = "")
print()
if str.lower(A) == "b":
print("\n---Players Ships--------Tracer Field---")
for i in range(10):
for j in range(10):
print(player_field[i][j], "", end = "")
for k in range(10):
print(player_sonar[i][k], "", end = "")
print()
print()
def emptyShips(A):#Self check command which helps determine when the game ends.
isEmpty = True
for i in range(5):
if A == "a":
if bot_ships[i] != 0:
isEmpty = False
if A == "b":
if player_ships[i] != 0:
isEmpty = False
return isEmpty
bot_ships = [2, 3, 3, 4, 5]
player_ships = [2, 3, 3, 4, 5]
ship_names = ["Destroyer", "Submarine", "Cruiser", "Battleship", "Aircraft Carrier"]
#End#
#Intro#
print("Welcome to Battleship: Python Edition.")
print("\nIf you are new to the game of Battleship, get instructions by typing \"Help\".")
start_Condition = input("\nOtherwise, if you know how to play already then simply press the Enter key.")
if str.lower(start_Condition) == "help":
print("""\nThe game of Battleship is simple. There are two players who each have 5 ships.
Setup:
These ships are your Destroyer - 2 Units long, Submarine - 3 Units long, Cruiser - 3 Units long,
Battleship - 4 Units long, and your Aircraft Carrier - 5 Units long. To start the game both
players begin by placing their ships onto their 10x10 field. You can place your ships either
horrizontally or vertically.
Gameplay:
Each tern a player may fire one shot at the enemy player at any location they choose. If you
hit one of the enimy players ships, they must announce \"Hit!\". After you hit the ship in
all of its sections then the ship is \"sunk\" and is no longer in play. After you have sunk
all of the other players ships then you have won. Likewise if the other player sinks all of
your ships then you lose. Both players also have a second map to keep track of where they have
shot before.""")
input("\nPress Enter to continue. ")
#End#
#Bot Ship Placement#
i = 0
while i < 5:
x = randint(0,9)#X-Location
y = randint(0,9)#Y-Location
z = randint(0,1)#Is Vertical?
if z == 1:
if y + bot_ships[i] <= 9:#Checks if there is enough horizontal space.
checkVerified = True
for j in range(bot_ships[i]):#Checks if any other ships are present.
if bot_field[y + j][x] != 0:
checkVerified = False
if checkVerified == True:#Places current ship and increments i only if all previous checks were successful.
for j in range(bot_ships[i]):
bot_field[y + j][x] = i + 1
i += 1
if z == 0:
if x + bot_ships[i] <= 9:#Checks if there is enough vertical space.
checkVerified = True
for j in range(bot_ships[i]):#Checks if any other ships are present.
if bot_field[y][x + j] != 0:
checkVerified = False
if checkVerified == True:#Places current ship and increments i only if all previous checks were successful.
for j in range(bot_ships[i]):
bot_field[y][x + j] = i + 1
i += 1
#End#
#Player Ship Placement#
i = 0
for i in range(5):
suc = False
if i == 0:
print("You may now begin placing your ships. Ships are placed onto your map using coordinates.")
print("Remember, your coordinate plane is in the 4th quadrant. That means that your Y-Coordinate")
print("increases as you go down on the plane, not up.\n")
while suc == False:
suc = True
print("You may now place your", ship_names[i], "which is", player_ships[i], "units long.\n")
try:
x = int(input("Please choose your X-Coordinate (1 - 10): ")) - 1
if x < 0:
print("Error: You attempted to place a ship using a negative number. This would potentially result in boat wrapping and is not allowed.")
suc = False
continue
except ValueError:
print("Error: You entered something that was not an acceptable value. Integers only please.")
suc = False
continue
try:
y = int(input("Please choose your Y-Coordinate (1 - 10): ")) - 1
if y < 0:
print("Error: You attempted to place a ship using a negative number. This would potentially result in boat wrapping and is not allowed.")
suc = False
continue
except ValueError:
print("Error: You entered something that was not an acceptable value. Integers only please.")
suc = False
continue
try:
z = int(input("Are you placing horizontally(0) or vertically (1)?"))
if z < 0 or z > 1:
suc = False
print("Error: You attempted to place a ship in the 3rd or higher dimension. This is a two dimensional game, please select one of the two.")
continue
except ValueError:
print("Error: You entered something that was not an acceptable value. Integers only please.")
suc = False
continue
if z == 1:
succ = True
for j in range(player_ships[i]):#Checks if any other ships are present.
if suc == False:
continue
try:
if player_field[y + j][x] != 0:
print("Error: You attempted to place a ship on top of an existing ship. Please choose another spot.")
succ = False
suc = False
except IndexError:
print("Error: You attempted to place a ship outside of the arena bounds.")
suc = False
succ = False
if succ == True:
for j in range(player_ships[i]):
player_field[y + j][x] = i + 1
if z == 0:
succ = True
for j in range(player_ships[i]):#Checks if any other ships are present.
if suc == False:
continue
try:
if player_field[y][x + j] != 0:
print("Error: You attempted to place a ship on top of an existing ship. Please choose another spot.")
succ = False
suc = False
except IndexError:
print("Error: You attempted to place a ship outside of the arena bounds.")
suc = False
succ = False
if succ == True:
for j in range(player_ships[i]):
player_field[y][x + j] = i + 1
if i != 4:
printField("a")
#End#
#Begin Gameplay#
gameOver = False
playerWins = False
playerTurn = True
#Bot Values#
originalShot = [0, 0]
previousShot = [0, 0]
firingMode = 0
#End#
while gameOver == False:
#Begin Player Turn#
if playerTurn == True:
printField("b")
print("It is now your turn.\n")
try:
x = int(input("Choose the X-Coordinate (1 - 10) for your shot: ")) - 1
y = int(input("Choose the Y-Coordinate (1 - 10) for your shot: ")) - 1
except ValueError:
print("\nError: You entered something that was not an acceptable value. Integers only please.")
continue
if (x < 0 or x > 9) or (y < 0 or y > 9):
print("\nError: You attempted to fire a shot outside of the arena bounds.")
continue
if bot_field[y][x] != 0:#If a ship was hit...
player_sonar[y][x] = "X"#Updates Players Hit Map
print("\nYou hit the bots", ship_names[bot_field[y][x] - 1] + ".")#Informs which ship was hit.
bot_ships[bot_field[y][x] - 1] -= 1#Updates the ship ledger.
if bot_ships[bot_field[y][x] - 1] == 0:#Checks if the ship was destroyed.
print("\nYou have destroyed the bots", ship_names[bot_field[y][x] - 1] + ".")
if emptyShips("a") == True:#Checks if all bot ships have been destroyed and ends the game if they have.
gameOver = True
playerWins = True
bot_field[y][x] = 0#Updates the bots map.
playerTurn = False
continue
else:
player_sonar[y][x] = "O"
print("\nYour shot missed.")
playerTurn = False
continue
#End#
#Begin Bot Turn#
if playerTurn == False:
x = randint(0, 9)
y = randint(0, 9)
if firingMode == 1:#Fires Up
x = previousShot[1]
y = previousShot[0] - 1
previousShot = [y, x]
if firingMode == 2:#Fires Down
x = previousShot[1]
y = previousShot[0] + 1
previousShot = [y, x]
if firingMode == 3:#Fires Left
x = previousShot[1] - 1
y = previousShot[0]
previousShot = [y, x]
if firingMode == 4:#Fires Right
x = previousShot[1] + 1
y = previousShot[0]
previousShot = [y, x]
if (x < 0 or x > 9) or (y < 0 or y > 9):#Shot impossible, change firing mode
firingMode += 1
previousShot = originalShot
continue
if bot_sonar[y][x] == 0: #If the bot hasn't shot here before...
if firingMode == 0: #This is a whole new ship.
originalShot = [y, x]
previousShot = [y, x]
bot_sonar[y][x] = 1#Register shot.
if player_field[y][x] != 0:#If A Hit...
print("\nThe bot fired at ", x + 1,", ", y + 1, " and hit your ", ship_names[player_field[y][x] - 1], ".", sep = "")#Informs the player.
player_ships[player_field[y][x] - 1] -= 1#Updates ship ledger.
if firingMode == 0:
firingMode = 1#Updates firing mode.
if player_ships[player_field[y][x] - 1] == 0:#Checks if ship was destroyed
print("\nThe bot has destroyed your", ship_names[player_field[y][x] - 1] + ".\n")
firingMode = 0
if emptyShips("b") == True: #Checks if all players ships have been destroyed.
gameOver = True #If they have, then end the game.
player_field[y][x] = 0 #Updates player map.
playerTurn = True #Informs the swapper that it is now the players turn.
else: #If the bot missed, then change the firing mode.
print("\nThe bot fired at ", x + 1,", ", y + 1, " and missed.", sep = "")
playerTurn = True #Informs the swapper that it is now the players turn.
if firingMode != 0: #If the bot is already tracking a ship, then migrate to the next mode.
firingMode += 1
previousShot = originalShot #Resets previousShot values for new firing mode to use.
else:#If the bot has shot there before, shoot the next spot.
continue
#End
#End
if playerWins == True:
print("\nGame Over: The Player Wins")
else:
print("\nGame Over: The Bot Wins")
input("\nPress enter to close.")#In terminal the program would close automatically.
exit()#The input is added to give a break point and the exit command will make the close out work in IDLE.
|
import unicodedata
import ast
import file_save_load as fsl
######################################################
# adding new budgets
######################################################
fileNameDataset = 'imdb_dataset_v7_no_plots'
fileNameBudgets = '_wiki_plot_for_' + fileNameDataset
actors_amount = 6
movies = fsl.read_from_file(fileNameDataset, actors_amount)
new_plots = {}
with open('files/' + fileNameBudgets) as file:
for entry in file:
temp = ast.literal_eval(entry)
title = temp[1]
plot = temp[0]
new_plots[title] = plot
c = 0
for title in movies:
if title in new_plots:
movies[title]["plot"] = unicodedata.normalize('NFKD', new_plots[title]).encode('ascii','ignore')
c += 1
print "new plot found for:", c
print "amount of movies with plot" , len(movies)
fsl.save_to_dataset(movies,actors_amount)
|
import torch
from torch.autograd import grad
import funcsigs
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import truncnorm
import implementation.pytorch_autograd.aux_funcs_torch as fs
from scipy.io import loadmat, savemat
from implementation.pytorch_autograd.nuts import NUTS, Metropolis
### FUNCTIONS
def positive_logpost_wrap(par_value, par_name, other_pars):
# wraps the objective function for par_name
names = funcsigs.signature(log_posterior).parameters.keys()
par_dict = {n: None for n in names}
par_tensor = torch.from_numpy(par_value).requires_grad_(True)
# forward pass
for n in names:
if n == par_name:
par_dict[n] = par_tensor
else:
par_dict[n] = other_pars[n]
ll = log_posterior(par_dict['X'], par_dict['eta_t'], par_dict['alpha_t'], par_dict['c_t'], par_dict['gamma_t'],
par_dict['beta_t'], par_dict['B_t'], par_dict['tau_t'], par_dict['height_t'],
par_dict['steep_t'], par_dict['w'], par_dict['K'], par_dict['l_base'])
# backprop
par_grad = grad(ll, par_tensor)[0] # par_value is tensor, which is why this works
ll_detach = ll.detach()
grad_detach = par_grad.detach()
return ll_detach.numpy(), grad_detach.numpy()
def log_posterior(X, eta_t, alpha_t, c_t, gamma_t, beta_t, B_t, tau_t, height_t, steep_t, w, K, l_base=torch.tensor(5.0).double()):
W, N = X.size()
mean_TB = torch.tensor([0.0]).double()
#mean_TB = torch.median(X*X)
alpha0 = torch.tensor(0.2)
mu_c = torch.tensor(W/2.)
tau_c = torch.tensor(0.005)
#beta0 = torch.tensor(1.0)
a_tau = torch.tensor(7.5)
b_tau = torch.tensor(1.0)
#l_base = torch.tensor(5).double().requires_grad_(False)
# parameter transformations8
alpha = torch.exp(alpha_t).reshape(K,N)
gamma = torch.exp(gamma_t)
eta = fs.general_sigmoid(eta_t, 1, 1)
beta = torch.exp(beta_t)
c = fs.general_sigmoid(c_t, W, 0.025)
tau = torch.exp(tau_t)
height = fs.general_sigmoid(height_t, 1000, 0.007)
steep = fs.general_sigmoid(steep_t, 2.0, 1.0)
l = fs.length_scale(c, 3*gamma,steep,w,height, base=l_base)
sigma = 1/tau
covB = fs.gibbs_kernel(w,l,sigma)
cholB = torch.cholesky(covB)
B = torch.mv(cholB, B_t) + mean_TB
# likelihood
V = fs.pseudo_voigt(w,c,gamma,eta)
I = torch.mm(V,alpha) + torch.ger(B,beta)
ll = torch.distributions.normal.Normal(I, 1/tau).log_prob(X).sum()
prior_alpha = torch.distributions.exponential.Exponential(alpha0).log_prob(alpha).sum() + alpha_t.sum()
#prior_alpha = fs.truncated_normal_lpdf(alpha, torch.tensor(5.0).double(), torch.tensor(1.5).double(), torch.tensor(0.0).double(), torch.tensor(float('Inf')).double()).sum() + \
#alpha_t.sum()
prior_gamma = fs.truncated_normal_lpdf(gamma, torch.tensor(10.).double(), torch.tensor(1.0/6.0).double(), torch.tensor(0.0).double(), torch.tensor(float('Inf')).double()).sum() + \
gamma_t.sum()
prior_beta = fs.truncated_normal_lpdf(beta, torch.tensor(0.5), torch.tensor(0.02), 0, torch.tensor(float('Inf')).double()).sum() + beta_t.sum()
prior_tau = torch.distributions.gamma.Gamma(a_tau,b_tau).log_prob(tau).sum() + tau_t
prior_eta = torch.log(fs.dgen_sigmoid(eta_t, 1,1)).sum()
# torch.distributions.normal.Normal(torch.tensor(20.0), torch.tensor(5.0)).log_prob(height) +
prior_height = torch.log(fs.dgen_sigmoid(height_t, 1000,0.007)).sum()
prior_steep = fs.truncated_normal_lpdf(steep, torch.tensor(0.2).double(),torch.tensor(.5).double(),
torch.tensor(0.).double(),torch.tensor(5.).double()) + torch.log(fs.dgen_sigmoid(steep_t, 2.0, 1.0))
prior_B = -0.5 * torch.dot(B_t,B_t)
prior_c = fs.truncated_normal_lpdf(c, mu_c, 1.0 / tau_c, 0, torch.tensor(W).double()).sum() + torch.log(fs.dgen_sigmoid(c_t, W, 0.025)).sum()
#prior_c = torch.log(fs.dgen_sigmoid(c_t, W, 0.025)).sum()
logpost = ll + prior_alpha + prior_gamma + prior_beta + prior_tau + prior_eta + \
prior_height + prior_B + prior_c + prior_steep
return logpost
#### SETUP
mats = loadmat('/home/david/Documents/Universitet/5_aar/PseudoVoigtMCMC/implementation/data/25x25x300_K1_2hot.mat')
X = torch.from_numpy(mats['X'].T).double()
gen = mats['gendata']
W, N = X.size()
K = 1
X.shape
K = 1
w = torch.arange(X.shape[0]).double()
true_alpha = gen['A'][0][0]
#true_vp = gen['vp'][0][0]
true_c = gen['theta'][0][0][0][0]
true_eta = gen['theta'][0][0][0][2]
true_gamma = gen['theta'][0][0][0][1]
true_B = gen['B'][0][0]
true_beta = gen['b'][0][0]
true_sigma = gen['sig'][0][0]
true_vp = fs.pseudo_voigt(w, torch.tensor([true_c]).double(), torch.tensor([true_gamma]).double(), torch.tensor([true_eta]).double())
plt.figure()
plt.plot(true_vp.numpy())
plt.title('True Voigt')
plt.show()
plt.figure()
plt.plot(true_alpha)
plt.title('True alpha')
plt.show()
plt.figure()
plt.plot(true_B)
plt.title('True background')
plt.show()
plt.figure()
plt.plot(true_beta)
plt.title('True beta')
plt.show()
print(f"True c: {true_c}")
print(f"True gamma: {true_gamma}")
print(f"True eta: {true_eta}")
print(f"True noise: {true_sigma}")
# convert to tensors
ta = torch.from_numpy(true_alpha.T).double()
tgamma = torch.from_numpy(np.array([true_gamma])).double()
tc = torch.from_numpy(np.array([true_c])).double()
teta = torch.from_numpy(np.array([true_eta])).double()
#tsig = 1.0 /torch.from_numpy(true_sigma[0]).double()
tsig = torch.tensor(1.0).double()
tB = torch.from_numpy(true_B.ravel()).double()
tbeta = torch.from_numpy(true_beta.ravel()).double()
#tV = torch.from_numpy(true_vp.T)
tV = true_vp
#alpha_t = torch.log(ta)
alpha_t = torch.log(torch.tensor(np.random.exponential(0.2, size=K*N)).double())
gamma_t = torch.log(tgamma)
c_t = fs.inv_gen_sigmoid(tc, W, 0.025)
eta_t = fs.inv_gen_sigmoid(teta, 1, 1)
tau_t = torch.log(tsig)
beta_t = torch.log(tbeta)
height = torch.tensor(150).double()
height_t = torch.unsqueeze(fs.inv_gen_sigmoid(height, 1000, 0.007), 0).double()
steep = torch.tensor(.1).double()
steep_t = torch.unsqueeze(fs.inv_gen_sigmoid(steep, 2, 1), 0).double()
l_base = torch.tensor(60).double()
lt = fs.length_scale(tc, 3* tgamma, steep, w, height, base=l_base)
plt.figure()
plt.plot(lt.detach().numpy())
covB = fs.gibbs_kernel(w, lt, tsig)
cholB = torch.cholesky(covB) # not the same in inference test
cholInv = torch.inverse(cholB)
mean_tB = torch.tensor(0).double()
GP = torch.distributions.multivariate_normal.MultivariateNormal(mean_tB.unsqueeze(0), covariance_matrix=covB.double())
print(GP.log_prob(tB))
plt.figure()
plt.plot(GP.sample([5]).numpy().T)
plt.title('samples')
plt.axvline((tc - 3*tgamma).numpy())
plt.axvline((tc + 3*tgamma).numpy())
plt.figure()
plt.plot(tB.numpy())
plt.title('True background')
B_t = torch.mv(cholInv, (tB-mean_tB))
plt.figure()
plt.plot(B_t.numpy())
plt.title('True background transformed')
plt.axvline((tc - 3*tgamma).numpy())
plt.axvline((tc + 3*tgamma).numpy())
plt.figure()
plt.plot((torch.mv(cholB, B_t) + mean_tB).numpy())
plt.title('hard coded')
# prior for transformed variable is prop to -0.5*(B_t - mu)^T* (B_t-mu)!
print(-0.5*torch.dot(B_t - mean_tB, (B_t - mean_tB)))
par_dict = {
'eta_t': eta_t,
'alpha_t': alpha_t,
'c_t': c_t,
'gamma_t': gamma_t,
'beta_t': beta_t,
#'B_t': B_t,
'B_t' : torch.randn(W).double(),
'tau_t': tau_t,
'height_t': height_t,
'steep_t': steep_t,
'X': X,
'w': w,
'K': K,
'l_base' : l_base
}
par_dict_orig = par_dict
#%% INFERENCE
par_dict = par_dict_orig
# DUAL AVERAGING
M_adapt = 30
NUTS_alpha = NUTS(positive_logpost_wrap, M_adapt+1,M_adapt, par_dict['alpha_t'].numpy().ravel(), 'alpha_t', par_dict)
NUTS_B = NUTS(positive_logpost_wrap, M_adapt+1, M_adapt,par_dict['B_t'].detach().numpy(), 'B_t', par_dict)
NUTS_beta = NUTS(positive_logpost_wrap, M_adapt+1, M_adapt, par_dict['beta_t'].numpy(), 'beta_t', par_dict)
#NUTS_gamma = NUTS(positive_logpost_wrap, M_adapt+1, M_adapt, par_dict['gamma_t'].numpy(), 'gamma_t', par_dict)
#NUTS_gamma.sample()
NUTS_alpha.sample()
NUTS_B.sample()
NUTS_beta.sample()
eps_alpha = np.mean(NUTS_alpha.eps_list[-20])
eps_B = np.mean(NUTS_B.eps_list[-20])
eps_beta = np.mean(NUTS_beta.eps_list[-20])
#eps_gamma = np.mean(NUTS_gamma.eps_list[-20])
#%% SAMPLES
par_dict = par_dict_orig
def logp_height(h, par_dict):
h_t = fs.inv_gen_sigmoid(torch.from_numpy(np.array(h)).double(), 1000, 0.007).double().detach().numpy()
val, _ = positive_logpost_wrap(h_t, 'height_t', par_dict)
return val
def prop_h(h):
#return np.random.normal(h,10)
return np.random.uniform(0,200)
def prop_c(c):
return np.random.uniform(0,W,size=len(c))
#return np.random.multivariate_normal(c, 100*np.eye(len(c)))
def logp_c(c, par_dict):
c_t = fs.inv_gen_sigmoid(torch.from_numpy(c), W, 0.025).detach().numpy()
val, _ = positive_logpost_wrap(c_t, 'c_t', par_dict)
return val
def logp_gamma(g, par_dict):
g_t = np.log(g)
val,_ = positive_logpost_wrap(g_t, 'gamma_t', par_dict)
return val
def prop_gamma(g):
# a, b = (myclip_a - my_mean) / my_std, (myclip_b - my_mean) / my_std
a,b = (0.01 - g) / 10, (100 - g) / 10
return truncnorm.rvs(a,b,g,10, size=len(g))
def logp_eta(eta, par_dict):
eta_t = fs.inv_gen_sigmoid(torch.from_numpy(eta), 1,1).double().numpy()
val, _ = positive_logpost_wrap(eta_t, 'eta_t', par_dict)
return val
def prop_eta(eta):
return np.random.uniform(1e-8,1, size=len(eta))
num_samples = 125
live_plot = True
metropolisC = Metropolis(logp_c, np.array([200.0]), prop_c, par_dict)
metropolisGamma = Metropolis(logp_gamma, np.array([10.0]), prop_gamma, par_dict)
metropolisH = Metropolis(logp_height, np.array([30.0]), prop_h, par_dict)
metropolisEta = Metropolis(logp_eta, np.array([0.5]), prop_eta, par_dict)
samples_dict = {'c':np.zeros((num_samples,K)),
'gamma':np.zeros((num_samples,K)),
'B':np.zeros((num_samples,W)),
'height':np.zeros((num_samples)),
'alpha' : np.zeros((num_samples, K*N)),
'beta' : np.zeros((num_samples, N)),
'eta' : np.zeros((num_samples, K))}
# initial sample
#NUTS_gamma = NUTS(positive_logpost_wrap, 2,0,par_dict['gamma_t'].numpy(), 'gamma_t', par_dict, start_eps=0.5)
#NUTS_gamma.sample(override_M=2, override_Madapt=0)
NUTS_B = NUTS(positive_logpost_wrap, 2,0,par_dict['B_t'].detach().numpy(), 'B_t', par_dict, start_eps=eps_B)
NUTS_B.sample(override_M=2, override_Madapt=0)
#NUTS_alpha = NUTS(positive_logpost_wrap, 2,0, par_dict['alpha_t'].numpy().ravel(), 'alpha_t', par_dict, start_eps=eps_alpha)
NUTS_alpha = NUTS(positive_logpost_wrap, 2,0, np.random.exponential(0.2, size=(K*N)), 'alpha_t', par_dict, start_eps=eps_alpha)
NUTS_alpha.sample()
NUTS_beta = NUTS(positive_logpost_wrap, 2, 0, par_dict['beta_t'].numpy(), 'beta_t', par_dict, start_eps=eps_beta)
NUTS_beta.sample()
#NUTS_c = NUTS(positive_logpost_wrap, 2,0,fs.inv_gen_sigmoid(torch.tensor([1.0]).double(), W, 0.025).numpy(),'c_t', par_dict, start_eps=eps_c)
#NUTS_c.sample()
metropolisC.sample(override_M=1)
metropolisH.sample(override_M=1)
metropolisGamma.sample(override_M=1)
metropolisEta.sample(override_M=1)
samples_dict['c'][0,:] = metropolisC.samples[0]
#samples_dict['c'][0,:] = fs.general_sigmoid(torch.from_numpy(NUTS_c.samples[1,:]), W,0.025)
samples_dict['height'][0] = metropolisH.samples[0]
samples_dict['B'][0,:] = NUTS_B.samples[1,:]
samples_dict['gamma'][0,:] = metropolisGamma.samples[0,:]
#samples_dict['gamma'][0,:] = np.exp(NUTS_gamma.samples[1,:])
samples_dict['alpha'][0,:] = NUTS_alpha.samples[1,:]
samples_dict['beta'][0,:] = NUTS_beta.samples[1,:]
samples_dict['eta'][0,:] = metropolisEta.samples[0,:]
if live_plot:
plt.figure()
plt.plot((ta[:, 117] * fs.pseudo_voigt(w, tc.double(),
tgamma,
teta)).numpy())
V_plot = plt.plot((ta[:,117]*fs.pseudo_voigt(w, tc.double(),
tgamma,
teta)).numpy())
plt.pause(0.5)
for s in range(1,num_samples):
print(f'Iteration {s}')
#NUTS_c = NUTS(positive_logpost_wrap, 2,0,fs.inv_gen_sigmoid(torch.from_numpy(samples_dict['c'][s-1,:]).double(),W,0.025).numpy(),'c_t', par_dict, start_eps=eps_c)
#NUTS_c.sample()
print("SAMPLE C\n\n")
for ic in range(1):
metropolisC = Metropolis(logp_c, samples_dict['c'][s-1,:], prop_c, par_dict)
metropolisC.sample(override_M=1)
#if metropolisC.acc_rate > 0:
# break
NUTS_B = NUTS(positive_logpost_wrap, 2, 0, samples_dict['B'][s-1,:], 'B_t', par_dict, start_eps=eps_B)
NUTS_alpha = NUTS(positive_logpost_wrap, 2, 0, samples_dict['alpha'][s-1,:], 'alpha_t', par_dict,
start_eps=eps_alpha)
NUTS_beta = NUTS(positive_logpost_wrap, 2, 0, samples_dict['beta'][s-1,:], 'beta_t', par_dict, start_eps=eps_beta)
NUTS_beta.sample()
#NUTS_gamma = NUTS(positive_logpost_wrap, 2,0, np.log(samples_dict['gamma'][s-1,:]), 'gamma_t', par_dict, start_eps=0.5)
#NUTS_gamma.sample()
print("SAMPLE GAMMA \n\n")
for gi in range(1):
metropolisGamma = Metropolis(logp_gamma, samples_dict['gamma'][s-1,:], prop_gamma, par_dict)
metropolisGamma.sample(override_M=1)
#if metropolisGamma.acc_rate > 0:
# break
NUTS_B.sample()
NUTS_alpha.sample()
metropolisEta = Metropolis(logp_eta, samples_dict['eta'][s - 1, :], prop_eta, par_dict)
metropolisH = Metropolis(logp_height, np.array([samples_dict['height'][s-1]]), prop_h, par_dict)
print("SAMPLE H\n\n")
metropolisH.sample(override_M=1)
print("SAMPLE ETA\n\n")
metropolisEta.sample(override_M=1)
#samples_dict['c'][s, :] = fs.general_sigmoid(torch.from_numpy(NUTS_c.samples[1,:]),W,0.025).numpy()
samples_dict['c'][s,:] = metropolisC.samples[0,:]
print(samples_dict['c'][s,:])
samples_dict['height'][s] = metropolisH.samples[0]
samples_dict['B'][s, :] = NUTS_B.samples[1, :]
print(NUTS_B.samples[1,:])
samples_dict['gamma'][s,:] = metropolisGamma.samples[0, :]
#samples_dict['gamma'] [s,:] = np.exp(NUTS_gamma.samples[1,:])
samples_dict['alpha'][s, :] = NUTS_alpha.samples[1,:]
samples_dict['beta'][s,:] = NUTS_beta.samples[1,:]
samples_dict['eta'][s,:] = metropolisEta.samples[0,:]
if live_plot:
V_plot[0].remove()
V = fs.pseudo_voigt(w,torch.from_numpy(samples_dict['c'][s,:]),torch.from_numpy((samples_dict['gamma'][s,:])), torch.from_numpy(samples_dict['eta'][s,:]))
V_plot = plt.plot((torch.exp(torch.tensor(samples_dict['alpha'][s,117]))*V).numpy(),color='C1')
print(f"alpha diff: {ta[:,117].numpy() - np.exp(samples_dict['alpha'][s,117])}\n")
# V_plot = plt.plot((V).numpy(),color='C1')
plt.draw()
plt.pause(0.001)
par_dict['c_t'] = fs.inv_gen_sigmoid(torch.from_numpy(samples_dict['c'][s,:]), W, 0.025)
par_dict['gamma_t'] = torch.log(torch.from_numpy(samples_dict['gamma'][s,:]).double())
#par_dict['gamma'] = torch.from_numpy(samples_dict['gamma'][s,:])
par_dict['height_t'] = fs.inv_gen_sigmoid(torch.from_numpy(np.array(samples_dict['height'][s])), 1000, 0.007).double()
par_dict['B_t'] = torch.from_numpy(samples_dict['B'][s,:])
par_dict['alpha_t'] = torch.from_numpy(samples_dict['alpha'][s,:])
par_dict['beta_t'] = torch.from_numpy(samples_dict['beta'][s,:])
par_dict['eta_t'] = fs.general_sigmoid(torch.from_numpy(samples_dict['eta'][s,:]),1,1)
|
import uuid
from django.db import models
from django.contrib.auth.models import AbstractUser
from django.db.models import F
from django_extensions.db.models import TimeStampedModel
from django.utils.crypto import get_random_string
def generate_api_token():
return get_random_string(64)
class Project(TimeStampedModel):
project_id = models.UUIDField(default=uuid.uuid4, editable=False, unique=True)
owner = models.ForeignKey('frontend.User')
category = models.ForeignKey('frontend.Category')
project_name = models.TextField()
project_source = models.TextField()
description = models.TextField()
views = models.IntegerField(default=0)
hash = models.TextField()
project_slug = models.TextField()
latest_version = models.TextField()
class Meta:
unique_together = ("owner", "project_slug")
def serialize(self):
return {
"id": str(self.project_id),
"project_name": self.project_name,
"description": self.description,
"source": self.project_source,
"slug": self.project_slug,
"frida_version": self.latest_version
}
@staticmethod
def generate_slug(name):
return name.replace(' ', '-').lower()
def _human_format(self, num):
magnitude = 0
while abs(num) >= 1000:
magnitude += 1
num /= 1000.0
# add more suffixes if you need them
return '%d%s' % (num, ['', 'K', 'M', 'G', 'T', 'P'][magnitude])
@property
def vote_count(self):
return self._human_format(self.liked_by.count())
@property
def view_count(self):
return self._human_format(self.views)
@property
def frida_command(self):
return "$ frida --codeshare {}/{}".format(self.owner.nickname, self.slug)
def increment_view(self):
self.views = F('views') + 1
self.save()
def is_owned_by(self, user):
return user == self.owner
def is_liked_by(self, user):
return user.id in self.liked_by.values_list('id', flat=True)
class User(AbstractUser, TimeStampedModel):
nickname = models.TextField(null=True)
api_token = models.TextField(unique=True, default=generate_api_token)
liked_projects = models.ManyToManyField(Project, related_name="liked_by")
def mark_as_admin(self):
self.is_superuser = True
self.is_staff = True
self.save()
class Category(TimeStampedModel):
category_id = models.UUIDField(default=uuid.uuid4, editable=False, unique=True)
name = models.TextField() |
#!/usr/bin/env python
# encoding: utf-8
"""
@author: zhanghe
@software: PyCharm
@file: weixin.py
@time: 16-7-13 下午6:20
"""
import json
import time
import hashlib
from config import APPID, APPSECRET, WECHAT_URL, WECHAT_TOKEN
from app.lib.sign import Sign
from flask import Blueprint, request, make_response, render_template, redirect, url_for, session
import xml.etree.ElementTree as ET
from requests import get, post
from urllib import quote_plus
from tools.wechat import get_access_token, get_jsapi_ticket, xml_rep_text, make_xml_response, dict_to_xml, xml_to_dict, check_required_params, create_nonce_str, create_sign, check_sign, get_prepay_id, get_js_api_parameters, create_oauth_url_for_code, create_oauth_url_for_openid
import logging
log = logging.getLogger('app')
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
weixin_bp = Blueprint('weixin', __name__, url_prefix='/weixin')
@weixin_bp.route('/')
def demo():
"""
JS-SDK Demo
http://zhanghe.ngrok.cc/weixin
"""
sign = Sign(get_jsapi_ticket(), request.url.split('#')[0])
sign.sign()
data = {
'appId': APPID,
'timestamp': sign.ret['timestamp'],
'nonceStr': sign.ret['nonceStr'],
'signature': sign.ret['signature']
}
return render_template('demo.html', **data)
@weixin_bp.route('/callback', methods=['GET', 'POST'])
def callback():
"""
验证服务器地址的有效性
接口配置信息
URL
http://zhanghe.ngrok.cc/weixin/callback
Token
wechat_token
GET /weixin?signature=0a96c67c0adf58d79ee57d5ee6837f896f70f9ec&echostr=601962190953118907×tamp=1467559097&nonce=1527000568 HTTP/1.0
"""
if request.method == 'GET':
token = WECHAT_TOKEN # your token
query = request.args # GET 方法附上的参数
signature = query.get('signature', '')
timestamp = query.get('timestamp', '')
nonce = query.get('nonce', '')
echostr = query.get('echostr', '')
s = [timestamp, nonce, token]
s.sort()
s = ''.join(s)
if hashlib.sha1(s).hexdigest() == signature:
return make_response(echostr)
else:
return make_response(u'验证失败')
if request.method == 'POST':
# rec = request.stream.read()
# xml_rec = ET.fromstring(rec)
xml_rec = ET.fromstring(request.data)
from_user = xml_rec.find('FromUserName').text
to_user = xml_rec.find('ToUserName').text
create_time = xml_rec.find('CreateTime').text
msg_type = xml_rec.find('MsgType').text
content = u''
# 处理事件推送
if msg_type == 'event':
event = xml_rec.find('Event').text
# 订阅/关注
if event == 'subscribe':
content = u'欢迎关注本微信'
log.info(u'欢迎关注本微信')
# 取消订阅/取消关注
if event == 'unsubscribe':
content = u'我们会慢慢改进,欢迎您以后再来'
log.info(u'我们会慢慢改进,欢迎您以后再来')
# 点击菜单拉取消息时的事件推送
if event == 'CLICK':
event_key = xml_rec.find('EventKey').text
print event_key # 自定义菜单接口中KEY值
log.info(event_key)
# 点击菜单跳转链接时的事件推送
if event == 'VIEW':
event_key = xml_rec.find('EventKey').text
print event_key # 跳转URL
# 上报地理位置事件
if event == 'LOCATION':
latitude = xml_rec.find('Latitude').text # 纬度
longitude = xml_rec.find('Longitude').text # 经度
precision = xml_rec.find('Precision').text # 精度
print latitude
print longitude
print precision
# 模板消息发送完成 是否送达成功通知
if event == 'TEMPLATESENDJOBFINISH':
status = xml_rec.find('Status').text
# 'success' 发送状态为成功
# 'failed:user block' 发送状态为用户拒绝接收
# 'failed: system failed' 发送状态为发送失败(非用户拒绝)
print status
# 处理文本消息
if msg_type == "text":
msg_id = xml_rec.find('MsgId').text
content = xml_rec.find('Content').text
# 处理图片消息
if msg_type == 'image':
msg_id = xml_rec.find('MsgId').text
media_id = xml_rec.find('MediaId').text
pic_url = xml_rec.find('PicUrl').text
# 处理语音消息
if msg_type == 'voice':
msg_id = xml_rec.find('MsgId').text
media_id = xml_rec.find('MediaId').text
Format = xml_rec.find('Format').text # 语音格式,如amr,speex等
# 处理视频消息
if msg_type == 'video':
msg_id = xml_rec.find('MsgId').text
media_id = xml_rec.find('MediaId').text # 视频消息媒体id
thumb_media_id = xml_rec.find('ThumbMediaId').text # 视频消息缩略图的媒体id
# 处理小视频消息
if msg_type == 'shortvideo':
msg_id = xml_rec.find('MsgId').text
media_id = xml_rec.find('MediaId').text # 视频消息媒体id
thumb_media_id = xml_rec.find('ThumbMediaId').text # 视频消息缩略图的媒体id
# 处理地理位置消息
if msg_type == 'location':
msg_id = xml_rec.find('MsgId').text
location_x = xml_rec.find('Location_X').text # 维度
location_y = xml_rec.find('Location_Y').text # 经度
scale = xml_rec.find('Scale').text # 地图缩放大小
label = xml_rec.find('Label').text # 地理位置信息
# 处理链接消息
if msg_type == 'link':
msg_id = xml_rec.find('MsgId').text
title = xml_rec.find('Title').text
description = xml_rec.find('Description').text
url = xml_rec.find('Url').text
return make_xml_response(xml_rep_text, from_user, to_user, str(int(time.time())), content)
@weixin_bp.route('/create_menu', methods=['GET', 'POST'])
def create_menu():
"""
创建自定义菜单
http://zhanghe.ngrok.cc/weixin/create_menu
{
"errcode": 0,
"errmsg": "ok"
}
"""
access_token = get_access_token()
url = 'https://api.weixin.qq.com/cgi-bin/menu/create?access_token=%s' % access_token
data = {
'button': [
{
'type': 'click',
'name': '今日歌曲',
'key': 'V1001_TODAY_MUSIC'
},
{
'type': 'click',
'name': '歌手简介',
'key': 'V1001_TODAY_SINGER'
},
{
'name': '菜单',
'sub_button': [
{
'type': 'view',
'name': '搜索',
'url': 'http://www.soso.com/'
},
{
'type': 'view',
'name': '视频',
'url': 'http://v.qq.com/'
},
{
'type': 'click',
'name': '赞一下我们',
'key': 'V1001_GOOD'
},
{
'type': 'view',
'name': 'demo',
'url': 'http://zhanghe.ngrok.cc/weixin'
}]
}]
}
res = post(url, data=json.dumps(data, ensure_ascii=False))
return json.dumps(res.json())
@weixin_bp.route('/oauth')
@weixin_bp.route('/oauth/<scope>')
def oauth(scope='snsapi_base'):
"""
网页授权入口页面
http://zhanghe.ngrok.cc/weixin/oauth
:param scope:
:return:
"""
oauth_callback_url = url_for('.oauth_callback', _external=True)
oauth_url = create_oauth_url_for_code(oauth_callback_url, scope)
return redirect(oauth_url)
@weixin_bp.route('/oauth_callback')
def oauth_callback():
"""
网页授权回调地址
获取 openid access_token
(获取 code 之后的回调地址, 不能单独调用, 因code只能使用一次,5分钟未被使用自动过期)
正确返回:
{
"access_token": "yZ37EaD08h2vG4Qq-GSEFmMTKpDcrdOuZK4mqh4JfUf46ui6sga022bPMhqHNnHQFSn1UGHsVuSZDtSDVen-94KiCmiEoBHwRoGcfizhosQ",
"expires_in": 7200,
"openid": "o9XD1weif6-0g_5MvZa7Bx6OkwxA",
"refresh_token": "TCfFOMfSXbN5uSXbn9aaGzZBu7PsaN7iZZWvZKT2MpDaBl0aBO5itwe-1B7POcRxz_EAX6EuOGYt_aw0Smz9HCx-QDyqAewnhZSp5p2oNG4",
"scope": "snsapi_base"
}
错误返回:
{"errcode":40029,"errmsg":"invalid code"}
"""
code = request.args.get('code')
next_url = request.args.get('next_url')
# 用户拒绝, 调回首页
if code is None:
return next_url or url_for('.demo')
oauth_callback_url = create_oauth_url_for_openid(code)
res = get(oauth_callback_url)
result = json.dumps(res.json())
log.info(result)
session['openid'] = res.json().get('openid')
return redirect(next_url or url_for('.demo'))
@weixin_bp.route('/get_code')
@weixin_bp.route('/get_code/<scope>')
def get_code(scope='snsapi_base'):
"""
网页授权获取用户基本信息 - 用户同意授权,获取 code
http://zhanghe.ngrok.cc/weixin/get_code
http://zhanghe.ngrok.cc/weixin/get_code/snsapi_base
http://zhanghe.ngrok.cc/weixin/get_code/snsapi_userinfo
首先设置开发者中心页配置授权回调域名
snsapi_base 返回结构:
{'state': '', 'code': ''}
snsapi_userinfo 返回结构:
{'state': '', 'code': '', 'nsukey': ''}
"""
redirect_uri = url_for('.get_openid_access_token', _external=True)
# 微信会对授权链接做正则强匹配校验,链接的参数顺序固定
url = 'https://open.weixin.qq.com/connect/oauth2/authorize?appid=%s&redirect_uri=%s&response_type=%s&scope=%s&state=%s#wechat_redirect' % (
APPID, # APPID
quote_plus(redirect_uri), # REDIRECT_URI
'code', # response_type
scope, # SCOPE (snsapi_base/snsapi_userinfo)
time.time() # STATE
)
# return url
return redirect(url)
@weixin_bp.route('/get_openid_access_token')
def get_openid_access_token():
"""
获取 openid (获取 code 之后的回调地址, 不能单独调用, 因code只能使用一次,5分钟未被使用自动过期)
http://zhanghe.ngrok.cc/get_openid_access_token
正确返回:
{
"access_token": "yZ37EaD08h2vG4Qq-GSEFmMTKpDcrdOuZK4mqh4JfUf46ui6sga022bPMhqHNnHQFSn1UGHsVuSZDtSDVen-94KiCmiEoBHwRoGcfizhosQ",
"expires_in": 7200,
"openid": "o9XD1weif6-0g_5MvZa7Bx6OkwxA",
"refresh_token": "TCfFOMfSXbN5uSXbn9aaGzZBu7PsaN7iZZWvZKT2MpDaBl0aBO5itwe-1B7POcRxz_EAX6EuOGYt_aw0Smz9HCx-QDyqAewnhZSp5p2oNG4",
"scope": "snsapi_base"
}
错误返回:
{"errcode":40029,"errmsg":"invalid code"}
"""
code = request.args.get('code')
# 用户拒绝, 调回首页
if code is None:
return url_for('.demo', _external=True)
url = 'https://api.weixin.qq.com/sns/oauth2/access_token?appid=%s&secret=%s&code=%s&grant_type=authorization_code' % (
APPID,
APPSECRET,
code
)
res = get(url)
return json.dumps(res.json())
@weixin_bp.route('/get_user_info')
def get_user_info():
"""
基于微信网页授权 获取用户信息(关注公众号之后才有权限)
http://zhanghe.ngrok.cc/weixin/get_user_info?access_token=ACCESS_TOKEN&openid=OPENID
正确返回:
{
"province": "上海",
"openid": "o9XD1weif6-0g_5MvZa7Bx6OkwxA",
"headimgurl": "http://wx.qlogo.cn/mmopen/ALImIJLVKZtPiaaVkcKFR58xpgibiaxabiaStZYcwVNIfz4Tl8VkqzqpV5fKiaibbRGfkY2lDR9SlibQvVm2ClHD6AIhBYQeuy32qaj/0",
"language": "zh_CN",
"city": "闸北",
"privilege": [],
"country": "中国",
"nickname": "碎ping子",
"sex": 1
}
错误返回:
{"errcode":40003,"errmsg":"invalid openid"}
"""
access_token = request.args.get('access_token')
openid = request.args.get('openid')
lang = 'zh_CN' # 返回国家地区语言版本,zh_CN 简体,zh_TW 繁体,en 英语
url = 'https://api.weixin.qq.com/sns/userinfo?access_token=%s&openid=%s&lang=%s' % (access_token, openid, lang)
res = get(url)
res.encoding = 'utf-8' # 需要设置, 否则乱码
return json.dumps(res.json(), ensure_ascii=False)
@weixin_bp.route('/auth_access_token')
def auth_access_token():
"""
http://zhanghe.ngrok.cc/weixin/auth_access_token?access_token=ACCESS_TOKEN&openid=OPENID
正确返回:
{"errcode":0,"errmsg":"ok"}
错误返回:
{"errcode":40003,"errmsg":"invalid openid"}
"""
access_token = request.args.get('access_token')
openid = request.args.get('openid')
url = 'https://api.weixin.qq.com/sns/auth?access_token=%s&openid=%s' % (access_token, openid)
res = get(url)
return json.dumps(res.json())
@weixin_bp.route('/send_tpl_msg/<openid>', methods=['GET', 'POST'])
def send_tpl_msg(openid):
"""
发送模板消息
http://zhanghe.ngrok.cc/weixin/send_tpl_msg/o9XD1weif6-0g_5MvZa7Bx6OkwxA
{
"msgid": 413348094,
"errcode": 0,
"errmsg": "ok"
}
"""
access_token = get_access_token()
url = 'https://api.weixin.qq.com/cgi-bin/message/template/send?access_token=%s' % access_token
data = {
'touser': str(openid),
'template_id': '-5GfH3t-ofZooFA3CkPin8k-G0vb0_kJBcwcUmxcfEs',
'url': 'http://weixin.qq.com/download',
'data': {
'first': {
'value': '恭喜你购买成功!',
'color': '#173177'
},
'product': {
'value': '巧克力',
'color': '#173177'
},
'price': {
'value': '39.8元',
'color': '#173177'
},
'time': {
'value': '2014年9月22日',
'color': '#173177'
},
'remark': {
'value': '欢迎再次购买!',
'color': '#173177'
}
}
}
res = post(url, data=json.dumps(data, ensure_ascii=False))
return json.dumps(res.json())
@weixin_bp.route('/create_qrcode/<int:scene_id>', methods=['GET', 'POST'])
def create_qrcode(scene_id):
"""
账号管理 - 生成带参数的二维码(临时/永久)
http://zhanghe.ngrok.cc/weixin/create_qrcode/123
一、创建二维码 ticket
正确返回:
{
"url": "http://weixin.qq.com/q/LDrqzO-kgnL7ZxnNsRQx",
"expire_seconds": 604800,
"ticket": "gQH47joAAAAAAAAAASxodHRwOi8vd2VpeGluLnFxLmNvbS9xL0xEcnF6Ty1rZ25MN1p4bk5zUlF4AAIEak96VwMEgDoJAA=="
}
错误返回:
{"errcode":40013,"errmsg":"invalid appid"}
二、通过 ticket 换取二维码
"""
access_token = get_access_token()
# 创建二维码 ticket
url = 'https://api.weixin.qq.com/cgi-bin/qrcode/create?access_token=%s' % access_token
data = {
'expire_seconds': 604800,
'action_name': 'QR_SCENE',
'action_info': {
'scene': {
'scene_id': scene_id
}
}
}
res = post(url, data=json.dumps(data, ensure_ascii=False))
result = res.json()
if 'errcode' in result:
return json.dumps(result)
# 通过 ticket 换取二维码
url = 'https://mp.weixin.qq.com/cgi-bin/showqrcode?ticket=%s' % result['ticket']
res = get(url)
response = make_response(res.content)
response.headers['Content-Type'] = 'image/jpeg'
return response
@weixin_bp.route('/short_url', methods=['GET', 'POST'])
def short_url():
"""
长链接转短链接接口
http://zhanghe.ngrok.cc/weixin/short_url?long_url=LONG_URL
正确返回:
{"errcode":0,"errmsg":"ok","short_url":"http:\/\/w.url.cn\/s\/AvCo6Ih"}
错误返回:
{"errcode":40013,"errmsg":"invalid appid"}
"""
access_token = get_access_token()
url = 'https://api.weixin.qq.com/cgi-bin/shorturl?access_token=%s' % access_token
data = {
'action': 'long2short',
'long_url': request.args.get('long_url', '')
}
res = post(url, data=json.dumps(data, ensure_ascii=False))
return json.dumps(res.json())
@weixin_bp.route('/order_unified', methods=['GET', 'POST'])
def order_unified():
"""
微信支付 - 生成预支付交易单
http://zhanghe.ngrok.cc/weixin/order_unified
参考: https://pay.weixin.qq.com/wiki/doc/api/jsapi.php?chapter=9_1
字段名 变量名 必填 类型 描述
公众账号ID appid 是 String(32)
商户号 mch_id 是 String(32)
设备号 device_info 否 String(32) 终端设备号(门店号或收银设备ID),注意:PC网页或公众号内支付请传"WEB"
随机字符串 nonce_str 是 String(32)
签名 sign 是 String(32)
商品描述 body 是 String(128)
商品详情 detail 否 String(6000) 商品详细列表,使用Json格式
附加数据 attach 否 String(127)
商户订单号 out_trade_no 是 String(32)
货币类型 fee_type 否 String(16) 符合ISO 4217标准的三位字母代码,默认人民币:CNY
总金额 total_fee 是 Int 订单总金额,单位为分
终端IP spbill_create_ip 是 String(16)
交易起始时间 time_start 否 String(14) 格式为yyyyMMddHHmmss
交易结束时间 time_expire 否 String(14) 格式为yyyyMMddHHmmss
商品标记 goods_tag 否 String(32)
通知地址 notify_url 是 String(256)
交易类型 trade_type 是 String(16) 取值如下:JSAPI,NATIVE,APP
商品ID product_id 否 String(32) trade_type=NATIVE,此参数必传。此id为二维码中包含的商品ID,商户自行定义。
指定支付方式 limit_pay 否 String(32) no_credit--指定不能使用信用卡支付
用户标识 openid 否 String(128) trade_type=JSAPI,此参数必传,用户在商户appid下的唯一标识。
detail = {
'goods_detail': [
{
'goods_id': '', # String 必填 32 商品的编号
'wxpay_goods_id': '', # String 可选 32 微信支付定义的统一商品编号
'goods_name': '', # String 必填 256 商品名称
'goods_num': '', # Int 必填 商品数量
'price': '', # Int 必填 商品单价,单位为分
'goods_category': '', # String 可选 32 商品类目ID
'body': '' # String 可选 1000 商品描述信息
}
]
}
"""
if request.method == 'POST':
dict_data = request.form.to_dict()
dict_data['spbill_create_ip'] = request.headers.get('X-Forwarded-For', request.remote_addr)
# 获取预支付交易会话标识
prepay_id = get_prepay_id(dict_data)
# 获取jsapi的参数
js_api_parameters = get_js_api_parameters(prepay_id)
return json.dumps(js_api_parameters)
@weixin_bp.route('/order_query', methods=['GET', 'POST'])
def order_query():
"""
查询订单
http://zhanghe.ngrok.cc/weixin/order_query
参考: https://pay.weixin.qq.com/wiki/doc/api/jsapi.php?chapter=9_2
"""
url = 'https://api.mch.weixin.qq.com/pay/orderquery'
pass
@weixin_bp.route('/order_close', methods=['GET', 'POST'])
def order_close():
"""
查询订单(订单生成后不能马上调用关单接口,最短调用时间间隔为5分钟。)
http://zhanghe.ngrok.cc/weixin/order_close
参考: https://pay.weixin.qq.com/wiki/doc/api/jsapi.php?chapter=9_3
"""
url = 'https://api.mch.weixin.qq.com/pay/closeorder'
pass
@weixin_bp.route('/refund', methods=['GET', 'POST'])
def refund():
"""
申请退款(请求需要双向证书)
http://zhanghe.ngrok.cc/weixin/refund
参考: https://pay.weixin.qq.com/wiki/doc/api/jsapi.php?chapter=9_4
"""
url = 'https://api.mch.weixin.qq.com/secapi/pay/refund'
pass
@weixin_bp.route('/refund_query', methods=['GET', 'POST'])
def refund_query():
"""
申请退款(请求需要双向证书)
http://zhanghe.ngrok.cc/weixin/refund_query
参考: https://pay.weixin.qq.com/wiki/doc/api/jsapi.php?chapter=9_5
"""
url = 'https://api.mch.weixin.qq.com/pay/refundquery'
pass
@weixin_bp.route('/download_bill', methods=['GET', 'POST'])
def download_bill():
"""
下载对账单
1、未成功下单的交易不会出现在对账单中。支付成功后撤销的交易会出现在对账单中,跟原支付单订单号一致,bill_type为REVOKED;
2、微信在次日9点启动生成前一天的对账单,建议商户10点后再获取;
3、对账单中涉及金额的字段单位为“元”。
4、对账单接口只能下载三个月以内的账单。
http://zhanghe.ngrok.cc/weixin/download_bill
参考: https://pay.weixin.qq.com/wiki/doc/api/jsapi.php?chapter=9_6
"""
url = 'https://api.mch.weixin.qq.com/pay/downloadbill'
pass
@weixin_bp.route('/pay_notify_callback', methods=['GET', 'POST'])
def pay_notify_callback():
"""
支付通知回调
http://zhanghe.ngrok.cc/weixin/pay_notify_callback
"""
if request.method == 'POST':
result = {
'return_code': 'SUCCESS'
}
res_dict = xml_to_dict(request.data)
try:
# 记录回调通知信息
log.info(json.dumps(res_dict, ensure_ascii=False))
if res_dict.get('return_code') == 'FAIL': # 此字段是通信标识,非交易标识,交易是否成功需要查看result_code来判断
raise Exception(u'请求服务失败')
if res_dict.get('result_code') == 'FAIL': # 业务结果
raise Exception(res_dict.get('err_code_des') or u'申请支付失败')
# 验证签名
res_sign_str = res_dict.pop('sign')
if create_sign(res_dict) != res_sign_str:
raise Exception(u'签名失败')
# 订单支付成功处理
# todo
except Exception as e:
result['return_code'] = 'FAIL'
result['return_msg'] = e.message
# 订单支付失败处理
# todo
finally:
return dict_to_xml(result)
|
matriz = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
for l in range(0, 3):
for c in range(0, 2):
if l == c:
matriz[l][c] = 1
else:
matriz[l][c] = (l+1) ** 2
print('=-'* 15)
for l in range (0, 3):
for c in range(0, 2):
print(f'[{matriz[l][c]:^5}]', end='')
print()
|
#!/usr/bin/python3
# import mysql.connector as ms
import os
from termcolor import colored,cprint
from colorama import init
import pyfiglet
from basic_op import addrec,delrec,searchrec,disprec,mailfun
from modfun import modrec
from tqdm import tqdm,trange
from time import sleep
from filereader import freader
from prettytable import PrettyTable
from covidinfo import coronainfo
from plyer import notification as nt # Notification system
from beepy import beep #Notification sound
x=PrettyTable()
init() #WINDOWS PLATFORM FOR PRINIRING COLOrs on cmd/powershell prompt!
x.field_names=['EmpName','Gender','EmpId','Designation','Age','Emp_Mail','Salary']
# ATTRIBUTES OF TABLE
def progressbar():
for i in trange(100):
pass
sleep(0.02) #LOADING INTERFACE FUNCTION
def menu():
cprint(colored('[1] Enter 1 to use Employee Management System ','white','on_red'))
print()
cprint(colored('[2] Enter 2 to use File Reader ','magenta','on_white'))
print()
cprint(colored('[3] Enter 3 to use Covid Notification System ','grey','on_green'))
print()
a=pyfiglet.figlet_format('MENU')
print()
print('=================================================')
print(a)
menu()
choice=int(input('Enter your choice:'))
print()
print('-------------------------------------------------')
if choice==1:
def sqlmenu():
print(
'''
[1]. ADD RECORD
[2]. DELETE RECORD
[3]. MODIFY RECORD
[4]. DISPLAY ALL RECORDS
[5]. SEARCH RECORDS
[6]. SEND A MAIL TO EMPLOYEE
[7]. EXIT THE PROGRAM''')
print() #FORMATTING PURPOSE
print()#fORMATTING
print("[!] Connecting to Database !")
sleep(1.0)
print("[!] Initializing Database !")
sleep(1)
print('[!] Loading database records !')
progressbar()
sleep(1)
print('DATABASE LOADED SUCCESSULLY !')
cprint(colored('******EMPLOYEE MANAGEMENT SYSTEM******','white','on_red'))
banner=pyfiglet.figlet_format('EMS')
print(banner)
cprint(colored("options:",'blue','on_white'))
print()
ans='y'
while ans=='y' or ans=='Y':
sqlmenu()
menu=int(input("Type your option : "))
if menu==1:
addrec()
print()# For some gap b/w records and y/n input
elif menu==2:
delrec()
print()# For some gap b/w records and y/n input
elif menu==3:
modrec()
print()# For some gap b/w records and y/n input
elif menu==4:
try:
L=disprec() #L WILL STORE NESTED LIST OF RECORDS
for data in L:
x.add_row(data) # It will throw error if no records ,it takes list
print(x)
nt.notify(title='Success',message='RECORDS FOUND',app_icon='gtick.ico',timeout=3)
beep(sound='ping')
x.clear_rows()
except Exception as e: #RUNTIME ERROR IF THERE ARE NO RECORDS
cprint(colored('ERROR!!','white','on_red'))
nt.notify(title='Database Empty',message='No records to display',app_icon='error.ico',timeout=3)
beep(sound='error')
print()# For some gap b/w records and y/n input
elif menu==5:
try:
L=searchrec() #L WILL STORE LIST OTHERWISE NONE
x.add_row(L)
print(x)
nt.notify(title='Success',message='RECORD FOUND',app_icon='gtick.ico',timeout=3)
beep(sound='ping')
x.clear_rows()
except Exception as e:
cprint(colored('ERROR!!','white','on_red'))
print()# For some gap b/w records and y/n input
nt.notify(title='Failure',message='Error',app_icon='error.ico',timeout=3)
beep(sound='error')
elif menu==6:
try:
mailfun()
except Exception as e:
cprint(colored("MAIL NOT SENT , PLEASE CONNECT TO INTERNET!",'white','on_blue'))
nt.notify(title='Failure',message='Please connect to Internet',app_icon='error.ico',timeout=3)
beep(sound="error")# Notifiication sound
elif menu==7:
cprint(colored('BYE USER !!','white','on_red'))
nt.notify(title='Exit',message='BYE USER',app_icon='redbell.ico',timeout=3)
beep(sound='success')
sleep(4)
exit()
else:
print('Invalid option !!')
print()# For some gap b/w records and y/n input
nt.notify(title='Invalid ',message='Invalid option !',app_icon='error.ico',timeout=3)
beep(sound='error')
ans=input("Want to continue type[y], else[n] :")
elif choice==2:
freader() # FILE READER
elif choice==3:
print()
cprint(colored('COVID__INFO__NOTIFIER__SYSTEM','white','on_red'))
print()
coronainfo()
print('''After typing y you can close this program and
notification will be sent to you every 15 minutes ''')
print()
ch3=input("If you want notification every 15 minute type[y],else[n]:")
if ch3=='y' or 'Y':
os.system('scheduler.bat')
cprint(colored('Notification Scheduled for every 15 minutes !','white','on_red'))
else:
cprint(colored('Bye User','white','on_red'))
else:
cprint(colored('Error , Invalid option!','white','on_red'))
nt.notify(title='Invalid ',message='Invalid option !',app_icon='error.ico',timeout=3)
beep(sound='error')
|
bicycles=['trek','cannondale','readline','specialized']
too_expensive='readline'
bicycles.remove(too_expensive)
print('\nA '+too_expensive+' is too expensive to me.')
|
#!/usr/bin/env python3
import sys
import os
import re
filename_fa = sys.argv[1]
filename_base = re.sub(r'.fa$', '', os.path.basename(filename_fa))
# Kmer length
kmer_len = int(sys.argv[2])
kmer_tag = '%dmer' % kmer_len
seq_list = dict()
f_fa = open(filename_fa, 'r')
for line in f_fa:
if line.startswith('>'):
seq_h = line.strip().lstrip('>')
seq_list[seq_h] = []
else:
seq_list[seq_h].append(line.strip())
f_fa.close()
# Targeting 16S rRNA
min_seqlen = 1000
max_seqlen = 2000
total_target_list = []
kmer_list = dict()
for tmp_h in seq_list.keys():
tmp_seq = ''.join(seq_list[tmp_h])
tmp_seqlen = len(tmp_seq)
if tmp_seqlen < min_seqlen or tmp_seqlen > max_seqlen:
continue
tmp_seqId = tmp_h.split('.')[0]
total_target_list.append(tmp_seqId)
for tmp_i in range(0, tmp_seqlen-kmer_len):
tmp_kmer = tmp_seq[tmp_i:tmp_i + kmer_len]
if tmp_kmer not in kmer_list:
kmer_list[tmp_kmer] = []
kmer_list[tmp_kmer].append(tmp_seqId)
f_freq = open('%s.%s_freq' % (filename_base, kmer_tag), 'w')
f_missed = open('%s.%s_missed' % (filename_base, kmer_tag),'w')
f_freq.write("#Kmer\tKmer_count\tKmer_coverage\n")
f_missed.write("#Kmer\tMissedTargets\n")
total_target_set = set(total_target_list)
len_total_target_list = len(list(total_target_set))
sys.stderr.write('Total Target Seq: %d\n' % len_total_target_list)
for tmp_kmer in sorted(kmer_list.keys(),
key=lambda kmer: len(set(kmer_list[kmer])),
reverse=True):
tmp_target_set = set(kmer_list[tmp_kmer])
tmp_target_list = sorted(list(tmp_target_set))
len_tmp_target_list = len(tmp_target_list)
tmp_kmer_pct = len_tmp_target_list*100.0/len_total_target_list
f_freq.write("%s\t%d\t%.2f\n" %
(tmp_kmer, len_tmp_target_list, tmp_kmer_pct))
if tmp_kmer_pct > 95:
tmp_missed_set = total_target_set - tmp_target_set
tmp_missed_list = sorted(list(tmp_missed_set))
f_missed.write("%s\t%s\n" % (tmp_kmer, ';'.join(tmp_missed_list)))
f_missed.close()
f_freq.close()
|
#! /usr/bin/env python3
if(__name__ == "__main__"):
nums = input()
list1 = list(map(int, nums.split(' ')))
list2 = []
list2.append(str(list1[0] % list1[1]))
list2.append(str(list1[0] // list1[1]))
list2.reverse()
print(' '.join(list2)) |
import time
import krpc
import math
kp = 0.02
kd = 0.5
ki = 0.00001
s = (0, 0, 0)
target_port_selected = False
# 连接服务器
conn = krpc.connect(name='Docking Test')
vessel = conn.space_center.active_vessel
vessel_port = vessel.parts.docking_ports[0]
target_vessel = conn.space_center.target_vessel
#target_port = target_vessel.parts.docking_ports[0]
#target_port = conn.space_center.target_vessel.parts.docking_ports[0]
target_port = conn.space_center.target_docking_port
#target_frame = target_port.reference_frame
target_frame = target_vessel.reference_frame
vessel_frame = vessel_port.reference_frame
time.sleep(1)
vessel.auto_pilot.engage()
v_dir = target_port.direction(vessel.auto_pilot.reference_frame)
vessel.auto_pilot.target_direction = (-v_dir[0], -v_dir[1], -v_dir[2])
vessel.auto_pilot.target_roll = 0
vessel.auto_pilot.wait()
vessel.auto_pilot.sas = True
time.sleep(0.5)
target_vessel.auto_pilot.engage()
t_dir = vessel_port.direction(target_vessel.auto_pilot.reference_frame)
target_vessel.auto_pilot.target_direction = (-t_dir[0], -t_dir[1], -t_dir[2])
target_vessel.auto_pilot.target_roll = 0
target_vessel.auto_pilot.wait()
target_vessel.auto_pilot.sas = True
time.sleep(0.5)
print('开始对接')
if vessel_port.shielded:
vessel_port.shielded = False
while True:
if not target_port_selected:
if target_port is None:
print('选定对接目标')
target_port_selected = True
p = target_port.position(vessel_frame)
#current_position = current.position(target.reference_frame)
v = target_port.part.velocity(vessel_frame)
#velocity = current.part.velocity(target.reference_frame)
s = s+p
time.sleep(0.2)
f0 = (2 * kp * p[0] + kd * v[0]+ki * s[0]) #x轴左右方向
f1 = (kp*p[1]+kd*v[1]+ki*s[1]) #y轴前后方向
f2 = (2*kp*p[2]+kd*v[2]+ki*s[2]) #z轴上下方向
vessel.control.foward = 1*f1
vessel.control.up = 3*f2
vessel.control.right = -3*f0
print('对接完成')
|
# steps[n] is the steps required for n, or 0 if not yet computed
steps = {1: 0}
def count(n):
if n not in steps:
if n%2==0:
steps[n] = 1 + count(n//2)
else:
steps[n] = 1 + count(3*n+1)
return steps[n]
best_n, best_count = 0, 0
for n in range(2, 1000000):
c = count(n)
if c > best_count:
best_n, best_count = n, c
print(best_n)
|
from sklearn.model_selection import train_test_split, StratifiedKFold
import pandas as pd
from pytorch_tabular import TabularModel
from pytorch_tabular.config import DataConfig, OptimizerConfig, TrainerConfig
from pytorch_tabular.models import TabNetModelConfig, TabNetModel, NodeConfig
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split, StratifiedKFold
from pytorch_tabular.utils import get_class_weighted_cross_entropy
from sklearn.model_selection import train_test_split, StratifiedKFold, cross_val_predict
from sklearn.metrics import accuracy_score, f1_score, confusion_matrix, roc_auc_score
import random
import numpy as np
import os
from pytorch_tabular.utils import get_class_weighted_cross_entropy
from pytorch_tabular import TabularModel
from pytorch_tabular.models import CategoryEmbeddingModelConfig
from pytorch_tabular.config import DataConfig, OptimizerConfig, TrainerConfig, ExperimentConfig
from pytorch_tabnet.tab_model import TabNetClassifier
import torch
from pytorch_tabnet.pretraining import TabNetPretrainer
from sklearn.preprocessing import LabelEncoder
def data_load():
train_path = "Data/train.csv"
test_path = "Data/test.csv"
df_train = pd.read_csv(train_path, index_col=0)
df_test = pd.read_csv(test_path, index_col=0)
# data["target"] = data.target.astype(int)
CAT_COLS = list(df_train.filter(like="cat").columns)
NUM_COLS = list(df_train.filter(like="cont").columns)
LOW_FREQ_THRESH = 50
encoders = {}
# Categorical features need to be LabelEncoded
for cat_col in CAT_COLS:
label_enc = LabelEncoder()
# Group low frequencies into one value
value_counts = df_train[cat_col].value_counts()
is_low_frequency = value_counts < LOW_FREQ_THRESH
low_freq_values = value_counts.index[is_low_frequency]
if len(low_freq_values) > 0:
df_train.loc[df_train[cat_col].isin(low_freq_values), cat_col] = "low_frequency"
# update test set as well
df_test.loc[df_test[cat_col].isin(low_freq_values), cat_col] = "low_frequency"
df_train[cat_col] = label_enc.fit_transform(df_train[cat_col])
encoders[cat_col] = label_enc
# Encode test set
for cat_col in CAT_COLS:
label_enc = encoders[cat_col]
le_dict = dict(zip(label_enc.classes_, label_enc.transform(label_enc.classes_)))
# Replace unknown values by the most common value
# Changing this to another value might make more sense
if le_dict.get("low_frequency") is not None:
default_val = le_dict["low_frequency"]
else:
default_val = df_train[cat_col].mode().values[0]
df_test[cat_col] = df_test[cat_col].apply(lambda x: le_dict.get(x, default_val))
# Clip numerical features in test set to match training set
for num_col in NUM_COLS:
df_test[num_col] = np.clip(df_test[num_col], df_train[num_col].min(), df_train[num_col].max())
# Taken from https://www.kaggle.com/siavrez/kerasembeddings
df_train[f'q_{num_col}'], bins_ = pd.qcut(df_train[num_col], 25, retbins=True, labels=[i for i in range(25)])
df_test[f'q_{num_col}'] = pd.cut(df_test[num_col], bins=bins_, labels=False, include_lowest=True)
CAT_COLS.append(f'q_{num_col}')
return df_train, df_test, CAT_COLS, NUM_COLS
def print_metrics(y_true, y_pred, tag):
if isinstance(y_true, pd.DataFrame) or isinstance(y_true, pd.Series):
y_true = y_true.values
if isinstance(y_pred, pd.DataFrame) or isinstance(y_pred, pd.Series):
y_pred = y_pred.values
if y_true.ndim > 1:
y_true = y_true.ravel()
if y_pred.ndim > 1:
y_pred = y_pred.ravel()
val_acc = accuracy_score(y_true, y_pred)
val_f1 = f1_score(y_true, y_pred)
print(f"{tag} Acc: {val_acc} | {tag} F1: {val_f1}")
def main():
# Generate Synthetic Data
data, test_data, cat_col_names, num_col_names = data_load()
cat_dims = data[cat_col_names].nunique().to_list()
cat_idxs = [(cat_col_names+num_col_names).index(cat_col) for cat_col in cat_col_names]
cat_emb_dims = np.ceil(np.log(cat_dims)).astype(np.int).tolist()
cat_emb_dims = np.ceil(np.clip((np.array(cat_dims)) / 2, a_min=1, a_max=50)).astype(np.int).tolist()
FEATURES = cat_col_names+num_col_names
df_sub = pd.read_csv('Data/sample_submission.csv')
bsize = 2500 * 2
# ##########Define the Configs############
N_D = 16
N_A = 16
N_INDEP = 2
N_SHARED = 2
N_STEPS = 1 # 2
MASK_TYPE = "sparsemax"
GAMMA = 1.5
BS = 512
MAX_EPOCH = 21 # 20
PRETRAIN = True
X = data[FEATURES].values
y = data["target"].values
X_test = test_data[FEATURES].values
if PRETRAIN:
pretrain_params = dict(n_d=N_D, n_a=N_A, n_steps=N_STEPS, # 0.2,
n_independent=N_INDEP, n_shared=N_SHARED,
cat_idxs=cat_idxs,
cat_dims=cat_dims,
cat_emb_dim=cat_emb_dims,
gamma=GAMMA,
lambda_sparse=0., optimizer_fn=torch.optim.Adam,
optimizer_params=dict(lr=2e-2),
mask_type=MASK_TYPE,
scheduler_params=dict(mode="min",
patience=3,
min_lr=1e-5,
factor=0.5, ),
scheduler_fn=torch.optim.lr_scheduler.ReduceLROnPlateau,
verbose=1,
)
pretrainer = TabNetPretrainer(**pretrain_params)
pretrainer.fit(X_train=X_test,
eval_set=[X],
max_epochs=MAX_EPOCH,
patience=25, batch_size=BS, virtual_batch_size=BS, # 128,
num_workers=0, drop_last=True,
pretraining_ratio=0.5 # The bigger your pretraining_ratio the harder it is to reconstruct
)
# Training the Model
# tabular_mode.fit(train=train, validation=val)
# # Evaluating the Model
# # #Loss and Metrics on New Data¶
# result = tabular_mode.evaluate(test)
cv = StratifiedKFold(n_splits=10, shuffle=True, random_state=777)
BS = 2048
MAX_EPOCH = 20
# skf = StratifiedKFold(n_splits=5, random_state=2021, shuffle=True)
data['oof_preds'] = np.nan
for fold_nb, (train_index, valid_index) in enumerate(cv.split(X, y)):
X_train, X_valid = X[train_index], X[valid_index]
y_train, y_valid = y[train_index], y[valid_index]
tabnet_params = dict(n_d=N_D,
n_a=N_A,
n_steps=N_STEPS, gamma=GAMMA,
n_independent=N_INDEP, n_shared=N_SHARED,
lambda_sparse=1e-5,
seed=0,
clip_value=2,
cat_idxs=cat_idxs,
cat_dims=cat_dims,
cat_emb_dim=cat_emb_dims,
mask_type=MASK_TYPE,
device_name='auto',
optimizer_fn=torch.optim.Adam,
optimizer_params=dict(lr=5e-2, weight_decay=1e-5),
scheduler_params=dict(max_lr=5e-2,
steps_per_epoch=int(X_train.shape[0] / BS),
epochs=MAX_EPOCH,
# final_div_factor=100,
is_batch_level=True),
scheduler_fn=torch.optim.lr_scheduler.OneCycleLR,
# scheduler_params=dict(mode='max',
# factor=0.5,
# patience=5,
# is_batch_level=False,),
# scheduler_fn=torch.optim.lr_scheduler.ReduceLROnPlateau,
verbose=1)
# Defining TabNet model
model = TabNetClassifier(**tabnet_params)
model.fit(X_train=X_train, y_train=y_train,
from_unsupervised=pretrainer if PRETRAIN else None,
eval_set=[(X_train, y_train), (X_valid, y_valid)],
eval_name=["train", "valid"],
eval_metric=["auc"],
batch_size=BS,
virtual_batch_size=256,
max_epochs=MAX_EPOCH,
drop_last=True,
pin_memory=True,
patience=10,
)
val_preds = model.predict_proba(X_valid)[:, -1]
print('auc:', roc_auc_score(y_true=y_valid, y_score=val_preds))
data['oof_preds'].iloc[valid_index] = val_preds
test_preds = model.predict_proba(X_test)[:, -1]
df_sub[f"fold_{fold_nb+1}"] = test_preds
df_sub["target"] = df_sub.filter(like="fold_").mean(axis=1).values
df_sub.to_csv("Analysis/submission_5_tabnet.csv", index=False)
df_sub = pd.read_csv("Analysis/submission_5_tabnet.csv")
# df_sub.target = df_sub.target.map(lambda x: 0 if x<=0.5 else 1)
df_sub.loc[:, ["id", "target"]].to_csv("Analysis/submission_5_2_tabnet.csv", index=False)
if __name__ == '__main__':
main()
|
n1 = float(input("Digite a primeira nota: "))
n2 = float(input("Digite a segunda nota: "))
media = (n1 + n2)/2
if media < 5:
print("Reprovado!")
elif media >= 5 and media < 7:
print("Recuperação")
elif media >= 7:
print("Aprovado!")
|
import os
import yaml
from aiohttp import web
import aiohttp_cors
from utils.mongodb import MongoDB
from routes.animal import Animal
# loader config.yaml
configFile = os.path.abspath(os.path.expanduser('config.yaml'))
with open(configFile, 'r') as f:
config = yaml.load(f)
HTTP_SERVER = config['http-server']
MONGODB = config['mongodb']
# collect mongodb
collections = MongoDB.connect(config=MONGODB)
Animal(animal=collections['animal'])
# set routes
app = web.Application()
# cors
cors = aiohttp_cors.setup(app,defaults={
"*":aiohttp_cors.ResourceOptions(
allow_credentials = True,
expose_headers = "*",
allow_headers = "*",
)
})
# animal
cors.add(app.router.add_get('/animal_list', Animal.get_animal_list))
cors.add(app.router.add_get('/animal_count', Animal.get_animal_count))
# run server
if __name__ == '__main__':
web.run_app(app, host=HTTP_SERVER['host'], port=HTTP_SERVER['port'])
|
from pprint import pprint
champions = [
(2014, 'San Antonio Spurs'),
(2015, 'Golden State Warriors'),
(2016, 'The Cleveland Cavaliers'),
(2017, 'Golden State Warriors'),
(2018, 'Golden State Warriors'),
]
pprint({c[0]: c[1] for c in champions})
# Example: nonunique keys
pprint({c[1]: c[0] for c in champions})
|
a,b,c=input(" ").split()
if (a>b)and(a>c):
print(a)
elif (b>a)and(b>c):
print(b)
else:
print(c)
|
import requests, re, os
from bs4 import BeautifulSoup
def stock_price(symbol: str = "ABCDEFGHIJKLMNOPQRSTUV") -> str:
url = f"https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"
soup = BeautifulSoup(requests.get(url).text, "html.parser")
class_ = "My(6px) Pos(r) smartphone_Mt(6px)"
return soup.find("div", class_=class_).find("span").text
def getraw_stock(symbol: str = "ABCDEFGHIJKLMNOPQRSTUV") -> str:
##
##
## Check if file exists..If it does, remove it.
## Create log file, name based on STOCK symbol..
## Soup.prettify everything and dump it raw in this file..
myrawfile = f"{symbol}_raw.raw"
if os.path.exists(myrawfile):
#print("The file already exists..Will be overwritten..")
os.remove(myrawfile)
else:
print("")
url = f"https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"
soup = BeautifulSoup(requests.get(url).text, "html.parser")
with open(myrawfile, "w") as filerawdata:
filerawdata.write(soup.prettify())
return myrawfile #Return the resulting filename..
if __name__ == "__main__":
print ("\n\nDump raw BeatifulSoup into file...\n")
for symbol in "CRAYON.OL ORK.OL".split():
print (f"..We are getting raw stocks for you.. {symbol:<15}")
print (f".................Saved into raw log.. {getraw_stock(symbol)}\n")
|
#!/usr/bin/env python
#
# Poll the Bitfinex order book and print to console.
#
# Author : Scott Barr
# Date : 29 Mar 2014
#
import os, sys
from datetime import datetime
import bitfinex
# symbol to query the order book
symbol = 'btcusd'
# set the parameters to limit the number of bids or asks
parameters = {'limit_asks': 5, 'limit_bids': 5}
# create the client
client = bitfinex.Client()
while True:
# get latest ticker
ticker = client.ticker(symbol)
# get the order book
orders = client.order_book(symbol, parameters)
# clear the display, and update values
os.system('clear')
print("# Bitfinex (Last Update : %s)" % (datetime.now()))
print("## Last Ticker")
print(ticker)
for order_type in orders:
print("")
print("%s %s" % ("## ", order_type))
for order in orders[order_type]:
print(order)
|
#!/usr/bin/env python
'''
classify_video.py will classify a video using:
(1) singleFrame RGB model
(2) singleFrame flow model
(3) 0.5/0.5 singleFrame RGB/singleFrame flow fusion
(4) 0.33/0.67 singleFrame RGB/singleFrame flow fusion
(5) LRCN RGB model
(6) LRCN flow model
(7) 0.5/0.5 LRCN RGB/LRCN flow model
(8) 0.33/0.67 LRCN RGB/LRCN flow model
Before using, change RGB_video_path and flow_video_path.
Use: classify_video.py video, where video is the video you wish to classify.
If no video is specified, the video "v_Archery_g01_c01" will be classified.
'''
import numpy as np
import glob
caffe_root = '../../'
import sys
sys.path.insert(0,caffe_root + 'python')
import caffe
caffe.set_mode_gpu()
caffe.set_device(0)
import pickle
import os
import time
# global var
verbose = True
#Initialize transformers
def initialize_transformer(image_mean, is_flow):
shape = (10*16, 3, 227, 227)
transformer = caffe.io.Transformer({'data': shape})
channel_mean = np.zeros((3,227,227))
for channel_index, mean_val in enumerate(image_mean):
channel_mean[channel_index, ...] = mean_val
transformer.set_mean('data', channel_mean)
transformer.set_raw_scale('data', 255)
transformer.set_channel_swap('data', (2, 1, 0))
transformer.set_transpose('data', (2, 0, 1))
transformer.set_is_flow('data', is_flow)
return transformer
#classify video with LRCN model
def LRCN_classify_video(frames, net, transformer, is_flow):
if verbose:
print "[info] len(frames)={}".format(len(frames))
clip_length = 16
offset = 8
input_images = []
for im in frames:
input_im = caffe.io.load_image(im)
if (input_im.shape[0] < 240):
input_im = caffe.io.resize_image(input_im, (240,320))
input_images.append(input_im)
vid_length = len(input_images)
input_data = []
for i in range(0,vid_length,offset):
if (i + clip_length) < vid_length:
if verbose:
print "[info] input_data += input_images[{}:{}]".format(i,i+clip_length)
input_data.extend(input_images[i:i+clip_length])
else: #video may not be divisible by clip_length
input_data.extend(input_images[-clip_length:])
output_predictions = np.zeros((len(input_data),101))
if verbose:
print "[info] output_predictions.shape={}".format(output_predictions.shape)
print "[info] run forward with range(0,len(input_data)={}, clip_length={})".format(
len(input_data), clip_length)
for i in range(0,len(input_data),clip_length):
clip_input = input_data[i:i+clip_length]
clip_input = caffe.io.oversample(clip_input,[227,227])
clip_clip_markers = np.ones((clip_input.shape[0],1,1,1))
clip_clip_markers[0:10,:,:,:] = 0
# if is_flow: #need to negate the values when mirroring
# clip_input[5:,:,:,0] = 1 - clip_input[5:,:,:,0]
caffe_in = np.zeros(np.array(clip_input.shape)[[0,3,1,2]], dtype=np.float32)
for ix, inputs in enumerate(clip_input):
caffe_in[ix] = transformer.preprocess('data',inputs)
print "[info] i={}, ix={}, caffe_in[ix].shape={}".format(i, ix, caffe_in[ix].shape)
if verbose:
print "[info] caffe_in.shape={}".format(caffe_in.shape)
out = net.forward_all(data=caffe_in, clip_markers=np.array(clip_clip_markers))
output_predictions[i:i+clip_length] = np.mean(out['probs'],1)
return np.mean(output_predictions,0).argmax(), output_predictions
#classify video with singleFrame model
def singleFrame_classify_video(frames, net, transformer, is_flow):
batch_size = 16
input_images = []
for im in frames:
input_im = caffe.io.load_image(im)
if (input_im.shape[0] < 240):
input_im = caffe.io.resize_image(input_im, (240,320))
input_images.append(input_im)
vid_length = len(input_images)
output_predictions = np.zeros((len(input_images),101))
for i in range(0,len(input_images), batch_size):
clip_input = input_images[i:min(i+batch_size, len(input_images))]
clip_input = caffe.io.oversample(clip_input,[227,227])
clip_clip_markers = np.ones((clip_input.shape[0],1,1,1))
clip_clip_markers[0:10,:,:,:] = 0
if is_flow: #need to negate the values when mirroring
clip_input[5:,:,:,0] = 1 - clip_input[5:,:,:,0]
caffe_in = np.zeros(np.array(clip_input.shape)[[0,3,1,2]], dtype=np.float32)
for ix, inputs in enumerate(clip_input):
caffe_in[ix] = transformer.preprocess('data',inputs)
net.blobs['data'].reshape(caffe_in.shape[0], caffe_in.shape[1], caffe_in.shape[2], caffe_in.shape[3])
out = net.forward_all(data=caffe_in)
output_predictions[i:i+batch_size] = np.mean(out['probs'].reshape(10,caffe_in.shape[0]/10,101),0)
return np.mean(output_predictions,0).argmax(), output_predictions
def compute_fusion(RGB_pred, flow_pred, p):
return np.argmax(p*np.mean(RGB_pred,0) + (1-p)*np.mean(flow_pred,0))
def main():
#RGB_video_path = 'frames/'
#flow_video_path = 'flow_images/'
RGB_video_path = '/media/6TB/Videos/UCF-101'
flow_video_path = '/media/6TB/Videos/ucf101_flow_img_tvl1_gpu'
if len(sys.argv) > 1:
video = sys.argv[1]
else:
video = 'Archery/v_Archery_g02_c04'
ucf_mean_RGB = np.zeros((3,1,1))
ucf_mean_flow = np.zeros((3,1,1))
ucf_mean_flow[:,:,:] = 128
ucf_mean_RGB[0,:,:] = 103.939
ucf_mean_RGB[1,:,:] = 116.779
ucf_mean_RGB[2,:,:] = 128.68
transformer_RGB = initialize_transformer(ucf_mean_RGB, False)
transformer_flow = initialize_transformer(ucf_mean_flow,True)
# Extract list of frames in video
RGB_frames = glob.glob('{}/*.jpg'.format(os.path.join(RGB_video_path, video)))
flow_frames = glob.glob('{}/*.jpg'.format(os.path.join(flow_video_path, video)))
if verbose:
print "[debug] RGB_frames={}".format(RGB_frames)
print "[debug] flow_frames={}".format(flow_frames)
if not RGB_frames:
print "[fatal] no RGB images found"
sys.exit(-1)
if not flow_frames:
print "[fatal] no flow images found"
sys.exit(-1)
#Models and weights
singleFrame_model = 'deploy_singleFrame.prototxt'
lstm_model = 'deploy_lstm.prototxt'
RGB_singleFrame = 'single_frame_all_layers_hyb_RGB_iter_5000.caffemodel'
flow_singleFrame = 'single_frame_all_layers_hyb_flow_iter_50000.caffemodel'
RGB_lstm = 'RGB_lstm_model_iter_30000.caffemodel'
flow_lstm = 'flow_lstm_model_iter_50000.caffemodel'
#RGB_singleFrame_net = caffe.Net(singleFrame_model, RGB_singleFrame, caffe.TEST)
#start_time = time.time()
#class_RGB_singleFrame, predictions_RGB_singleFrame = \
# singleFrame_classify_video(
# RGB_frames,
# RGB_singleFrame_net,
# transformer_RGB,
# is_flow=False)
#RGB_singleFrame_processing_time = (time.time() - start_time)
#del RGB_singleFrame_net
#flow_singleFrame_net = caffe.Net(singleFrame_model, flow_singleFrame, caffe.TEST)
#start_time = time.time()
#class_flow_singleFrame, predictions_flow_singleFrame = \
# singleFrame_classify_video(
# flow_frames,
# flow_singleFrame_net,
# transformer_flow,
# is_flow=True)
#flow_singleFrame_processing_time = (time.time() - start_time)
#del flow_singleFrame_net
RGB_lstm_net = caffe.Net(lstm_model, RGB_lstm, caffe.TEST)
start_time = time.time()
class_RGB_LRCN, predictions_RGB_LRCN = \
LRCN_classify_video(
RGB_frames,
RGB_lstm_net,
transformer_RGB,
is_flow=False)
RGB_lstm_processing_time = (time.time() - start_time)
del RGB_lstm_net
#flow_lstm_net = caffe.Net(lstm_model, flow_lstm, caffe.TEST)
#start_time = time.time()
#class_flow_LRCN, predictions_flow_LRCN = \
# LRCN_classify_video(
# flow_frames,
# flow_lstm_net,
# transformer_flow,
# is_flow=True)
#flow_lstm_processing_time = (time.time() - start_time)
#del flow_lstm_net
#Load activity label hash
action_hash = pickle.load(open('action_hash_rev.p','rb'))
print "RGB single frame model classified video as: {} (took {}s).\n".format(action_hash[class_RGB_singleFrame], RGB_singleFrame_processing_time)
print "Flow single frame model classified video as: {} (took {}s).\n".format(action_hash[class_flow_singleFrame], flow_singleFrame_processing_time)
print "RGB LRCN model classified video as: {} (took {}s).\n".format(action_hash[class_RGB_LRCN], RGB_lstm_processing_time)
print "Flow LRCN frame model classified video as: {} (took {}s).\n".format(action_hash[class_flow_LRCN], flow_lstm_processing_time)
print "1:1 single frame fusion model classified video as: %s.\n" %(action_hash[compute_fusion(predictions_RGB_singleFrame, predictions_flow_singleFrame, 0.5)])
print "1:2 single frame fusion model classified video as: %s.\n" %(action_hash[compute_fusion(predictions_RGB_singleFrame, predictions_flow_singleFrame, 0.33)])
print "1:1 LRCN fusion model classified video as: %s.\n" %(action_hash[compute_fusion(predictions_RGB_LRCN, predictions_flow_LRCN, 0.5)])
print "1:2 LRCN fusion model classified video as: %s.\n" %(action_hash[compute_fusion(predictions_RGB_LRCN, predictions_flow_LRCN, 0.33)])
if __name__ == '__main__':
main()
|
#2000到3200之间,可以被7整除,不是5的倍数
l = []
for i in range(2000,3201):
if (i%7 == 0) and (i%5!=0):
l.append(str(i))
print(l)
'''
学习点: 学习到如何将循环元素加到一个列表中 l.append(str(i))
''' |
import cx_Oracle
cx = cx_Oracle.connect('jiayuan/jiayuan@192.168.1.38/orcl')
sql = cx.cursor()
print()
print()
rst1 = sql.execute('select sum(cnt)/10000 from (select count(*) as cnt from get_id_1 union all select count(*) from get_id_2 union all select count(*) from get_id_3 union all select count(*) from get_id_4)')
print('已经收集 : ' + str(rst1.fetchone()[0]) + ' w')
rst2 = sql.execute('select sum(cnt)/10000 from (select count(*) as cnt from jy_data_1 union all select count(*) from jy_data_2 union all select count(*) from jy_data_3 union all select count(*) from jy_data_4)')
print('有记录的 : ' + str(rst2.fetchone()[0]) + ' w')
rst3 = sql.execute("select 'get_id_1', count(id) from (select id from get_id_1 group by id having count(*)>1) union all select 'get_id_2', count(id) from (select id from get_id_2 group by id having count(*)>1) union all select 'get_id_3', count(id) from (select id from get_id_3 group by id having count(*)>1) union all select 'get_id_4', count(id) from (select id from get_id_4 group by id having count(*)>1)")
print()
print('重复情况')
for line in rst3:
name = line[0]
count = line[1]
print(str(name) + ' : ' + str(count))
print()
print()
sql.close()
cx.close() |
import socket
import sys
ip = sys.argv[1]
port = int(sys.argv[2])
x = int(sys.argv[3])
y = int(sys.argv[4])
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((ip, port))
print("Connected to server!")
#Read board in
file = 'opponent_board.txt'
with open(file) as f:
board = f.readlines()
board = [row.rstrip('\n') for row in board]
xs = chr(x)
ys = chr(y)
s.send(xs.encode('utf-8'))
s.send(ys.encode('utf-8'))
#Get output
data = s.recv(200)
output = data.decode('utf-8')
row = list(board[y])
if (output == 'You won!'):
print(output)
elif (output == 'Miss'):
row[x] = 'X'
print(output)
elif (output == 'Hit'):
row[x] = 'O'
print(output)
else:
print(output)
#Write board to storage
g = open(file, "w")
g.write("")
for x in board:
g.write(x + '\n')
g.close()
#Print current board
f = open(file, "r")
i = 0
while i < 10:
print(i, end =" ")
print(f.readline(), end ="")
i += 1
f.close
print('')
print(' 0123456789')
s.close() |
"""
[1,2,3,4,5,6]
1. Reverse the list [6,5,4,3,2,1]
2. Print Last element from list
3. Print element third and forth element from list
"""
list = [1,2,3,4,5,6]
# Reverse the list
print(list[::-1])
# Print Last element from list
print(list[-1])
# Print element third and forth element from list
print(list[2:4])
|
import logging
import re
import pytest
import requests
from helpers.cluster import ClickHouseCluster
@pytest.fixture(scope="module")
def cluster():
try:
cluster = ClickHouseCluster(__file__)
cluster.add_instance(
"node",
main_configs=[
"configs/config.d/storage_conf.xml",
"configs/log.xml",
"configs/query_log.xml",
"configs/ssl_conf.xml",
],
with_minio=True,
)
logging.info("Starting cluster...")
cluster.start()
logging.info("Cluster started")
yield cluster
finally:
cluster.shutdown()
def get_s3_events(instance):
result = dict()
events = instance.query(
"SELECT event, value FROM system.events WHERE event LIKE '%S3%'"
).split("\n")
for event in events:
ev = event.split("\t")
if len(ev) == 2:
result[ev[0]] = int(ev[1])
return result
def get_minio_stat(cluster):
result = {
"get_requests": 0,
"set_requests": 0,
"errors": 0,
"rx_bytes": 0,
"tx_bytes": 0,
}
stat = requests.get(
url="http://{}:{}/minio/prometheus/metrics".format(
cluster.minio_ip, cluster.minio_port
)
).text.split("\n")
for line in stat:
x = re.search(r"s3_requests_total(\{.*\})?\s(\d+)(\s.*)?", line)
if x != None:
y = re.search('.*api="(get|list|head|select).*', x.group(1))
if y != None:
result["get_requests"] += int(x.group(2))
else:
result["set_requests"] += int(x.group(2))
x = re.search(r"s3_errors_total(\{.*\})?\s(\d+)(\s.*)?", line)
if x != None:
result["errors"] += int(x.group(2))
x = re.search(r"s3_rx_bytes_total(\{.*\})?\s([\d\.e\+\-]+)(\s.*)?", line)
if x != None:
result["tx_bytes"] += float(x.group(2))
x = re.search(r"s3_tx_bytes_total(\{.*\})?\s([\d\.e\+\-]+)(\s.*)?", line)
if x != None:
result["rx_bytes"] += float(x.group(2))
return result
def get_query_stat(instance, hint):
result = dict()
instance.query("SYSTEM FLUSH LOGS")
events = instance.query(
"""
SELECT ProfileEvents.keys, ProfileEvents.values
FROM system.query_log
ARRAY JOIN ProfileEvents
WHERE type != 1 AND query LIKE '%{}%'
""".format(
hint.replace("'", "\\'")
)
).split("\n")
for event in events:
ev = event.split("\t")
if len(ev) == 2:
if "S3" in ev[0]:
if ev[0] in result:
result[ev[0]] += int(ev[1])
else:
result[ev[0]] = int(ev[1])
return result
def get_minio_size(cluster):
minio = cluster.minio_client
size = 0
for obj_level1 in minio.list_objects(
cluster.minio_bucket, prefix="data/", recursive=True
):
size += obj_level1.size
return size
def test_profile_events(cluster):
instance = cluster.instances["node"]
instance.query("SYSTEM FLUSH LOGS")
instance.query("DROP TABLE IF EXISTS test_s3.test_s3")
instance.query("DROP DATABASE IF EXISTS test_s3")
instance.query("CREATE DATABASE IF NOT EXISTS test_s3")
metrics0 = get_s3_events(instance)
minio0 = get_minio_stat(cluster)
query1 = "CREATE TABLE test_s3.test_s3 (key UInt32, value UInt32) ENGINE=MergeTree PRIMARY KEY key ORDER BY key SETTINGS storage_policy = 's3'"
instance.query(query1)
size1 = get_minio_size(cluster)
metrics1 = get_s3_events(instance)
minio1 = get_minio_stat(cluster)
assert (
metrics1["S3ReadRequestsCount"] - metrics0["S3ReadRequestsCount"]
== minio1["get_requests"] - minio0["get_requests"] - 1
) # 1 from get_minio_size
assert (
metrics1["S3WriteRequestsCount"] - metrics0["S3WriteRequestsCount"]
== minio1["set_requests"] - minio0["set_requests"]
)
stat1 = get_query_stat(instance, query1)
for metric in stat1:
assert stat1[metric] == metrics1.get(metric, 0) - metrics0.get(metric, 0)
assert (
metrics1["WriteBufferFromS3Bytes"] - metrics0["WriteBufferFromS3Bytes"] == size1
)
query2 = "INSERT INTO test_s3.test_s3 VALUES"
instance.query(query2 + " (1,1)")
size2 = get_minio_size(cluster)
metrics2 = get_s3_events(instance)
minio2 = get_minio_stat(cluster)
assert (
metrics2["S3ReadRequestsCount"] - metrics1["S3ReadRequestsCount"]
== minio2["get_requests"] - minio1["get_requests"] - 1
) # 1 from get_minio_size
assert (
metrics2["S3WriteRequestsCount"] - metrics1["S3WriteRequestsCount"]
== minio2["set_requests"] - minio1["set_requests"]
)
stat2 = get_query_stat(instance, query2)
for metric in stat2:
assert stat2[metric] == metrics2.get(metric, 0) - metrics1.get(metric, 0)
assert (
metrics2["WriteBufferFromS3Bytes"] - metrics1["WriteBufferFromS3Bytes"]
== size2 - size1
)
query3 = "SELECT * from test_s3.test_s3"
assert instance.query(query3) == "1\t1\n"
metrics3 = get_s3_events(instance)
minio3 = get_minio_stat(cluster)
assert (
metrics3["S3ReadRequestsCount"] - metrics2["S3ReadRequestsCount"]
== minio3["get_requests"] - minio2["get_requests"]
)
assert (
metrics3["S3WriteRequestsCount"] - metrics2["S3WriteRequestsCount"]
== minio3["set_requests"] - minio2["set_requests"]
)
stat3 = get_query_stat(instance, query3)
# With async reads profile events are not updated fully because reads are done in a separate thread.
# for metric in stat3:
# print(metric)
# assert stat3[metric] == metrics3.get(metric, 0) - metrics2.get(metric, 0)
|
# forを使った辞書変数の値の取り出し
we = {'金':'Fri', '土':'Sat', '日':'Sun'}
for keys in we:
print(keys)
for value in we.values():
print(value)
for item in we.items():
print(item)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-08-04 12:01
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('social', '0019_auto_20170804_1712'),
]
operations = [
migrations.RemoveField(
model_name='postmodel',
name='recommend',
),
]
|
from numpy import*
vetor = array(eval(input()))
med = sum(vetor)/size(vetor)
print(size(vetor))
print(vetor[0])
print(vetor[-1])
print(max(vetor))
print(min(vetor))
print(sum(vetor))
print(round(med,2)) |
import os
from flask import (
Flask, flash, render_template,
redirect, request, session, url_for)
from flask_pymongo import PyMongo
from bson.objectid import ObjectId
from werkzeug.security import generate_password_hash, check_password_hash
if os.path.exists("env.py"):
import env
app = Flask(__name__)
app.config["MONGO_DBNAME"] = os.environ.get("MONGO_DBNAME")
app.config["MONGO_URI"] = os.environ.get("MONGO_URI")
app.secret_key = os.environ.get("SECRET_KEY")
mongo = PyMongo(app)
@app.route("/")
@app.route("/get_jobs")
def get_jobs():
jobs = list(mongo.db.jobs.find())
return render_template("jobs.html", jobs=jobs)
@app.route("/search_bar", methods=["GET", "POST"])
def search_bar():
querying = request.form.get("querying")
jobs = list(mongo.db.jobs.find({"$text": {"$search": querying}}))
return render_template("jobs.html", jobs=jobs)
@app.route("/userprofile/<username>", methods=["GET", "POST"])
def userprofile(username):
if session["user"]:
jobs = mongo.db.jobs.find({"created_by": username})
username = mongo.db.user.find_one({
"username": session["user"]})["username"]
return render_template(
"userprofile.html", username=username, jobs=jobs)
return render_template("userprofile.html", username=username, jobs=jobs)
@app.route("/registration", methods=["GET", "POST"])
def registration():
if request.method == "POST":
# does user exists
existing_user = mongo.db.user.find_one(
{"username": request.form.get("username").lower()})
if existing_user:
flash("Username already exists")
return redirect(url_for("registration"))
registration = {
"username": request.form.get("username").lower(),
"password": generate_password_hash(request.form.get("password"))
}
mongo.db.user.insert_one(registration)
session["user"] = request.form.get("username").lower()
flash("Your Registration Was Successful! Go to the homepage")
return redirect(url_for("userprofile", username=session["user"]))
return render_template("registration.html")
# LOGIN FUNCTION
@app.route("/login", methods=["GET", "POST"])
def login():
if request.method == "POST":
# checking if the username already exists
existing_user = mongo.db.user.find_one(
{"username": request.form.get("username").lower()}
)
if existing_user:
if check_password_hash(
existing_user["password"], request.form.get("password")
):
session["user"] = request.form.get("username").lower()
flash("Hello there, {}".format(request.form.get("username")))
return redirect(url_for(
"userprofile", username=session["user"]))
else:
flash("That's an incorrect Username and/or Password")
return redirect(url_for("login"))
else:
flash("That's an incorrect Username and/or Password")
return redirect(url_for("login"))
return render_template("login.html")
@app.route("/loggingout")
def loggingout():
flash("You are logged out")
session.pop("user")
return redirect(url_for("login"))
@app.route("/credentials.html", methods=["GET", "POST"])
def credentials():
if request.method == "POST":
credentials = {
"image_url": request.form.get("image_url"),
"first_name": request.form.get("first_name"),
"last_name": request.form.get("last_name"),
"profession": request.form.get("profession"),
"available_now": request.form.get("available_now"),
"available_date": request.form.get("available_date"),
"telephone": request.form.get("telephone"),
"skills": request.form.get("skills"),
"locations": request.form.get("locations"),
"created_by": session["user"]
}
mongo.db.jobs.insert_one(credentials)
flash("Your Credentials Have Been Successfully Added")
return redirect(url_for("get_jobs"))
job = mongo.db.jobs.find().sort("first", 1)
professions = mongo.db.professions.find().sort("profession_type", 1)
return render_template(
"credentials.html", job=job, professions=professions)
@app.route("/edit_jobs/<job_id>", methods=["GET", "POST"])
def edit_jobs(job_id):
if request.method == "POST":
submitting = {
"image_url": request.form.get("image_url"),
"first_name": request.form.get("first_name"),
"last_name": request.form.get("last_name"),
"profession": request.form.get("profession"),
"available_now": request.form.get("available_now"),
"available_date": request.form.get("available_date"),
"telephone": request.form.get("telephone"),
"skills": request.form.get("skills"),
"locations": request.form.get("locations"),
"created_by": session["user"]
}
mongo.db.jobs.update({"_id": ObjectId(job_id)}, submitting)
flash("Your Credentials Have Been Successfully Updated")
job = mongo.db.jobs.find_one({"_id": ObjectId(job_id)})
professions = mongo.db.professions.find().sort("profession_type", 1)
return render_template("edit_jobs.html", job=job, professions=professions)
@app.route("/delete_job/<job_id>")
def delete_job(job_id):
mongo.db.jobs.remove({"_id": ObjectId(job_id)})
flash("Profile Successfully Deleted")
return redirect(url_for("get_jobs"))
@app.errorhandler(404)
def page_not_found(error):
return render_template("404.html")
if __name__ == "__main__":
app.run(host=os.environ.get("IP"),
port=int(os.environ.get("PORT")),
debug=False) |
from oop.advanced_oop.class_method import Spam
class Sub(Spam):
num_instances = 0
def print_num_instances(cls):
print('Extra Stuff...', cls)
Spam.print_num_instances()
print_num_instances = classmethod(print_num_instances)
class Other(Spam):
pass
x = Sub()
y = Spam()
x.print_num_instances()
y.print_num_instances()
z = Other()
z.print_num_instances()
|
import tensorflow as tf
a=tf.constant([1,2,3,4,5,6], shape=[2,3], name="a")
b=tf.constant([1,2,3,4,5,6], shape=[3,2], name="b")
c=tf.matmul(a,b)
sess=tf.Session(config=tf.ConfigProto(log_device_placement=True))
print sess.run(c)
#print sess.run(a)
#print sess.run(b)
|
import time
from api.base.base_api import BaseApi
from api.base.dto.api_output import ApiOutput
"""
所有 API 的统一入口
"""
class ApiExecutor:
@staticmethod
def execute(request) -> ApiOutput:
start_time = time.time()
# 创建 api 入参对象
api_params = ApiExecutor.build_api_params(request)
# 创建 api 实例
api = ApiExecutor.create_api_instance(request)
if api is None:
return ApiOutput.fail(-1, '未找到 api:' + request.path)
output: ApiOutput = None
try:
# 执行 api
output = api.run(api_params)
except Exception as e:
output = ApiOutput.fail(-1, str(e))
# 计算耗时
output.spend = int((time.time() - start_time) * 1000)
return output
@staticmethod
def create_api_instance(request) -> BaseApi:
"""
根据请求路径创建 api 实例
"""
# 将路径统一转为小写,使得外部大小写都能访问
api_path = request.path.lower()
# 兼容反斜杠
api_path = api_path.replace('\\', '/')
# 移除头尾的斜杠
api_path = api_path.strip('/')
# 拼接 api 文件路径
array = api_path.split('/')
array.insert(0, 'api')
api_file_name = '.'.join(array) + '_api'
# 创建 api 实例
api: BaseApi = None
try:
module_meta = __import__(api_file_name, globals(), locals(), ['Api', 'Input'])
api_class_meta = getattr(module_meta, 'Api')
api = api_class_meta()
except ModuleNotFoundError as e:
print(e.msg)
return api
@staticmethod
def build_api_params(request) -> dict:
"""创建 api 请求参数对象"""
# get 参数
get_params = request.args
# post 参数
post_params = request.get_json()
# 合并 get 参数和 post 参数
all_params = get_params.to_dict()
if post_params is not None:
all_params.update(post_params)
return all_params
|
import matplotlib.pyplot as plt
class MultiplePlot:
def __init__(self, size, _dimensions):
self.ax = []
self.images_num = 0
self.fig = plt.figure(figsize=size)
self.dimensions = _dimensions
def add(self, image, title, _cmap='gray'):
self.images_num += 1
self.ax.append(self.fig.add_subplot(self.dimensions[0], self.dimensions[1], self.images_num))
self.ax[-1].set_title(title)
plt.imshow(image, cmap=_cmap)
@staticmethod
def wait_key():
plt.waitforbuttonpress()
@staticmethod
def show():
plt.show()
def show(image, _cmap='gray'):
plt.imshow(image, cmap=_cmap)
plt.show()
|
import matplotlib.pyplot as pl
import sys
import math
def allBinaryStrings(t):
if t == 0:
return [""]
oneLess = allBinaryStrings(t-1)
returnList = []
for i in oneLess:
returnList.append(i+"0")
returnList.append(i+"1")
return returnList
def countSwitches(s, originalChar="0"):
if len(s) == 0:
return 0
lastChar = originalChar
index = 0
switches = 0
while index < len(s):
char = s[index]
if char != lastChar:
switches += 1
lastChar = char
index += 1
return switches
def allBinaryStringsWithAtMostPSwitches(t, p, originalChar="0"):
allBinary = allBinaryStrings(t)
returnList = []
for s in allBinary:
if countSwitches(s, originalChar) <= p:
returnList.append(s)
return returnList
def allBinaryStringsWithAtMostPBits(t, p):
allBinary = allBinaryStrings(t)
returnList = []
for s in allBinary:
if sum([int(x) for x in s]) <= p:
returnList.append(s)
return returnList
switches = allBinaryStringsWithAtMostPSwitches(7, 3)
bits = allBinaryStringsWithAtMostPBits(7, 3)
pl.plot([int(x, 2) for x in switches], [0.4]*len(switches), "bo")
pl.plot([int(x, 2) for x in bits], [0.6]*len(bits), "ro")
pl.show()
print allBinaryStringsWithAtMostPSwitches(5, 2)
print allBinaryStringsWithAtMostPBits(5, 2)
|
# Generated by Django 2.0.4 on 2018-04-08 13:26
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Codelab',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user_id', models.CharField(max_length=50)),
('user_name', models.CharField(max_length=50)),
('user_email', models.EmailField(max_length=50)),
('title', models.CharField(max_length=100)),
('image', models.ImageField(help_text='대표이미지를 선택해주세요.', upload_to='codelab/%Y/%m/%d')),
('slug', models.SlugField(allow_unicode=True, max_length=200, unique=True)),
('desc', models.CharField(max_length=200)),
('favorite', models.IntegerField(default=0)),
('isview', models.BooleanField(default=True)),
('idate', models.DateTimeField(auto_now_add=True)),
('mdate', models.DateTimeField(auto_now=True)),
],
options={
'db_table': 'codelab',
'ordering': ['-idate'],
},
),
migrations.CreateModel(
name='CodelabDetail',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('contents', models.TextField()),
('idate', models.DateTimeField(auto_now_add=True)),
('mdate', models.DateTimeField(auto_now=True)),
('codelab', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='codelab_codelab_set', to='codelab.Codelab')),
],
options={
'db_table': 'codelab_detail',
'ordering': ('-codelab',),
},
),
]
|
import os
import torch
import numpy as np
import pandas as pd
import torch.nn.functional as F
from dataset.data_functions import SS3_CLASSES, SS8_CLASSES, get_angle_degree, get_unnorm_asa, get_unnorm_asa_new
def classification(data_loader, model1, model2, model3, model4, model5, mean, std, device):
model1 = model1.to(device)
model2 = model2.to(device)
model3 = model3.to(device)
model4 = model4.to(device)
model5 = model5.to(device)
model1.eval()
model2.eval()
model3.eval()
model4.eval()
model5.eval()
ss3_pred_list = []
ss8_pred_list = []
ss3_prob_list = []
ss8_prob_list = []
names_list = []
seq_list = []
for i, data in enumerate(data_loader):
feats, length, name, seq = data
length = torch.tensor(length).to(device)
feats = (feats - mean) / std
feats = feats.to(device, dtype=torch.float)
# print(feats.shape)
pred1 = model1(feats)
pred2 = model2(feats, length)
pred3 = model3(feats, length)
pred4 = model4(feats, length)
pred5 = model5(feats)[None, :, :]
pred = (pred1 + pred2 + pred3 + pred4 + pred5) / 5
ss3_pred = F.softmax(pred[:, :, 0:3], dim=2)
ss8_pred = F.softmax(pred[:, :, 3:], dim=2)
name = list(name)
for i, prot_len in enumerate(list(length)):
prot_len_int = int(prot_len)
ss3_pred_single = ss3_pred[i, :prot_len_int, :]
ss3_pred_single = torch.squeeze(ss3_pred_single, dim=0).cpu().detach().numpy()
ss3_indices = np.argmax(ss3_pred_single, axis=1)
ss3_pred_aa = np.array([SS3_CLASSES[aa] for aa in ss3_indices])[:, None]
ss3_pred_list.append(ss3_pred_aa)
ss3_prob_list.append(ss3_pred_single)
ss8_pred_single = ss8_pred[i, :prot_len_int, :]
ss8_pred_single = torch.squeeze(ss8_pred_single, dim=0).cpu().detach().numpy()
ss8_indices = np.argmax(ss8_pred_single, axis=1)
ss8_pred_aa = np.array([SS8_CLASSES[aa] for aa in ss8_indices])[:, None]
ss8_pred_list.append(ss8_pred_aa)
ss8_prob_list.append(ss8_pred_single)
names_list.append(name[i])
for seq in list(seq):
seq_list.append(np.array([i for i in seq])[:, None])
return names_list, seq_list, ss3_pred_list, ss8_pred_list, ss3_prob_list, ss8_prob_list
def write_csv(class_out, save_dir):
names, seq, ss3_pred_list, ss8_pred_list, ss3_prob_list, ss8_prob_list = class_out
for seq, ss3, ss8, ss3_prob, ss8_prob, name in zip(seq, ss3_pred_list,
ss8_pred_list,
ss3_prob_list,
ss8_prob_list, names):
# print(seq.shape, ss3.shape, ss8.shape, ss3_prob.shape, ss8_prob.shape)
data = np.concatenate((seq, ss3, ss8, ss3_prob, ss8_prob), axis=1)
save_path = os.path.join(save_dir, name + ".csv")
pd.DataFrame(data).to_csv(save_path,
header=["AA", "SS3", "SS8", "P3C", "P3E", "P3H", "P8C", "P8S", "P8T", "P8H", "P8G",
"P8I", "P8E", "P8B"])
return print(f'please find the results saved at {save_dir} with .csv extention')
if __name__ == '__main__':
print("Please run the spot_ss.py instead")
|
#!/usr/bin/env python
import tornado.ioloop
import tornado.iostream
import tornado.netutil
import hexdump
import socket
class ServerOnline(object):
def on_connected(self):
print '%s:%d OK' % (self.host, self.port)
def connect(self, host, port):
self.host = host
self.port = port
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
stream = tornado.iostream.IOStream(s);
self.target_stream = stream
stream.connect((self.host, port), self.on_connected)
for line in open('server.list'):
line = line.strip()
ServerOnline().connect(line, 800)
tornado.ioloop.IOLoop.instance().start()
|
8# -*- coding: utf-8 -*-
from utils import equals,sprint
from avepayoff import avePayoff_cal
import numpy as np
import matplotlib.pyplot as plt
from scipy.spatial import ConvexHull
# generate possible strategies of q with increment of 0.1
def candvector():
epsilon = 0.001
vector = []
for v in range(0,11):
v = v/10.0
if (equals(v, 0.0)):
v = v + epsilon
if (equals(v, 1.0)):
v = v - epsilon
vector = vector + [v]
return vector
# calculate best response to strategy p
def allresponse(q):
# file = open(r'../../data/response8462.csv','w')
result = []
candidates = [ 0.001, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.95, 0.98, 0.999 ]
for p1 in candidates:
for p2 in candidates:
for p3 in candidates:
for p4 in candidates:
if p1 == 1 and p2 ==1 and p3 == 0 and p4 == 0:
continue;
p=[p1,p2,p3,p4]
payoff = avePayoff_cal(p,q)
result = result + [payoff]
# record = sprint(q)+':'+str(payoff[0])+','+str(payoff[1])+'\n'
# print(record)
# file.write(record)
# file.close()
return result
def main():
q = [0.9, 0.5, 0.2, 0.1]
result = allresponse(q)
result = np.array(result)
points = result
# print(result)
hull = ConvexHull(result)
# print(hull)
plt.xlabel("$s_X$")
plt.ylabel("$s_Y$")
plt.axis([1.5, 2.4, 0.5, 4])
plt.plot(result[:,0], result[:,1], 'c.', label="$(s_X,s_Y)$")
for simplex in hull.simplices:
lines = plt.plot(points[simplex, 0], points[simplex, 1], 'b-', linewidth = 2)
p = [1,1,1,1]
corner = avePayoff_cal(p,q)
print(corner[0],corner[1])
plt.plot(corner[0],corner[1], 'g^', markersize=14, label = "$p=(1,1,1,1)$")
plt.legend()
plt.show()
if __name__== "__main__":
main() |
#110106654 Mar/16/2021 17:07UTC+5.5 Shan_XD 379A - New Year Candles PyPy 3 Accepted 93 ms 0 KB
a,b= map(int,input().split())
count=a
burn=a
while(burn>=b):
count+=burn//b
rem=burn%b
burn=burn//b+rem
print(count)
|
# Generated by Django 2.0.3 on 2019-11-14 06:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0014_article_is_addtimeline'),
]
operations = [
migrations.AlterField(
model_name='article',
name='is_addtimeline',
field=models.BooleanField(default=False, verbose_name='是否添加时间线'),
),
]
|
N = int(input())
a = [0] * 100
a[0] = 2
a[1] = 1
for i in range(2, 100):
a[i] = a[i-1] + a[i-2]
print(a[N]) |
"""
* Copyright 2020, Departamento de sistemas y Computación
* Universidad de Los Andes
*
*
* Desarrolado para el curso ISIS1225 - Estructuras de Datos y Algoritmos
*
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
* Contribución de:
*
* Dario Correal
*
"""
import sys
import os
import config as cf
assert cf
from DISClib.ADT.graph import gr
from DISClib.ADT import map as m
from DISClib.DataStructures import listiterator as it
from DISClib.ADT import list as lt
from App import model
import csv
from DISClib.ADT import queue as qe
"""
El controlador se encarga de mediar entre la vista y el modelo.
Existen algunas operaciones en las que se necesita invocar
el modelo varias veces o integrar varias de las respuestas
del modelo en una sola respuesta. Esta responsabilidad
recae sobre el controlador.
"""
# ___________________________________________________
# Inicializacion del catalogo
# ___________________________________________________
def init():
"""
Llama la funcion de inicializacion del modelo.
"""
# analyzer es utilizado para interactuar con el modelo
analyzer = model.newAnalyzer()
return analyzer
# ___________________________________________________
# Funciones para la carga de datos y almacenamiento
# de datos en los modelos
# ___________________________________________________
def loadTrips(citibike):
for filename in os.listdir(cf.data_dir):
if filename.endswith('.csv'):
print('Cargando archivo: ' + filename)
loadServices(citibike, filename)
return citibike
def loadServices(citibike, tripfile):
tripfile = cf.data_dir + tripfile
input_file = csv.DictReader(open(tripfile, encoding="utf-8"),
delimiter=",")
for trip in input_file:
model.addTrip(citibike, trip)
return citibike
# ___________________________________________________
# Funciones para consultas
# ___________________________________________________
def totalStops(analyzer):
"""
Total de paradas de autobus
"""
return model.totalStops(analyzer)
def totalConnections(analyzer):
"""
Total de enlaces entre las paradas
"""
return model.totalConnections(analyzer)
def f3(analyzer,s1,s2):
cola = qe.newQueue()
qe.enqueue(cola, "Hay " + str(model.numSCC(analyzer)) + " clústeres en el grafo")
if model.sameCC(analyzer,s1,s2)==False:
qe.enqueue(cola, "Las dos estaciones NO pertenecen al mismo clúster")
else:
qe.enqueue(cola, "Las dos estaciones SI pertenecen al mismo clúster")
return cola
def f4(cont,s1,tMIN,tMAX):
cola = qe.newQueue()
qe.enqueue(cola,"Nota: se parte del supuesto de que un turista toma 20 minutos conociendo los alrededores en cada parada.")
listaCand = model.CandidatasCirculares(cont,s1)
if lt.isEmpty(listaCand):
qe.enqueue(cola,"No se encontraron rutas.")
return cola
listaFinal = model.buscarEstaciónesFinales(cont,s1,listaCand)
if lt.isEmpty(listaFinal):
qe.enqueue(cola,"No se encontraron rutas.")
return cola
qe.enqueue(cola,"Se encontraron las siguientes rutas: ")
iterador = it.newIterator(listaFinal)
C = True
while C:
dixx = it.next(iterador)
llave = list(dixx.keys())[0]
valor = list(dixx.values())[0]
tupla = model.CostoMinimoCircular(cont,s1,llave,valor)
if (tMIN*60)<tupla[1]<(tMAX*60):
qe.enqueue(cola,(tupla[0] + " , duración esperada en minutos: " + str(round(tupla[1]/60)) ))
if not it.hasNext(iterador):
C = False
return cola
def f5(cont):
cola = qe.newQueue()
Top3Salida = model.Top3Salida(cont)
Top3Llegada = model.Top3Llegada(cont)
Top3Total = model.Top3Total(cont)
qe.enqueue(cola, "Las 3 estaciones principales de llegada (en orden) son: " + Top3Llegada[0] + " " + Top3Llegada[1] + " " + Top3Llegada[2])
qe.enqueue(cola, "Las 3 estaciones principales de salida (en orden) son: " + Top3Salida[0] + " " + Top3Salida[1] + " " + Top3Salida[2])
qe.enqueue(cola, "Las 3 estaciones menos usadas en total (en orden) son: " + Top3Total[1] + " " + Top3Total[2] + " " + Top3Total[3])
return cola
def f6(cont, s1, tMAX):
cola = qe.newQueue()
listaDeListasDeTuplas = model.buscarEstacionesBFS(cont,s1,tMAX)
for i in listaDeListasDeTuplas:
for j in i:
qe.enqueue(cola, s1 + "-->" + str(j[0]) + ". La duración esperada de esta ruta es " + str(j[1]) + " minutos")
return cola
def f7(cont,age):
cola = qe.newQueue()
qe.enqueue(cola,model.RutaEdad(cont,age))
return cola
def f8(cont,lat1,lon1,lat2,lon2):
cola = qe.newQueue()
lista = model.RutaTuristica(cont,lat1,lon1,lat2,lon2)
for i in lista:
qe.enqueue(cola,i)
return cola
def f9(cont,age):
cola = qe.newQueue()
qe.enqueue(cola,"Las estaciones adyacentes que más utilizan las personas de este grupo de edad, con suscripción de 3 días son: ")
retorno = model.idEstPublicidad(cont,age)
for i in retorno:
qe.enqueue(cola,i)
return cola |
#! /usr/bin/python3.6
# coding: utf-8
from bot_app import app
if __name__ == "__main__":
app.run(debug=True)
|
import requests
from bs4 import BeautifulSoup
from urllib.parse import unquote
import json
import re
base_url = "https://open.spotify.com/embed/playlist/{}"
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36'}
class SpotifyWebsiteParserException(Exception):
pass
def get_songs_from_spotify_website(playlist):
# parses Spotify Playlist from Spotify website
# playlist: playlist url or playlist id as string
# e.g. https://open.spotify.com/playlist/0wl9Q3oedquNlBAJ4MGZtS
# e.g. https://open.spotify.com/embed/0wl9Q3oedquNlBAJ4MGZtS
# e.g. 0wl9Q3oedquNlBAJ4MGZtS
# return: list of songs (song: artist - title)
# raises SpotifyWebsiteParserException if parsing the website goes wrong
return_data = []
if playlist.startswith("http"):
url = playlist.replace("spotify.com/playlist", "spotify.com/embed/playlist")
else:
url = base_url.format(playlist)
req = requests.get(url)
if req.status_code != 200:
raise SpotifyWebsiteParserException("ERROR: {} gave us not a 200. Instead: {}".format(url, req.status_code))
bs = BeautifulSoup(req.text, 'html.parser')
try:
songs_txt = bs.find('script', {'id': 'resource'}).string.strip()
except AttributeError:
raise SpotifyWebsiteParserException("ERROR: Could not get songs from Spotify website. Wrong Playlist id? Tried {}".format(url))
songs_json = json.loads(unquote(songs_txt))
for track in songs_json['tracks']['items']:
artist = track['track']['artists'][0]['name']
song = track['track']['name']
full = "{} {}".format(artist, song)
# remove everything in brackets to get better search results later on Deezer
# e.g. (Radio Version) or (Remastered)
full = re.sub(r'\([^)]*\)', '', full)
return_data.append(full)
return return_data
if __name__ == '__main__':
#playlist = "21wZXvtrERELL0bVtKtuUh"
playlist = "0wl9Q3oedquNlBAJ4MGZtS?si=jmdvnQSyRYCxDTWzrZARJg"
get_songs_from_spotify_website(playlist)
|
#!/usr/bin/env python
import os
import time
from ConfigParser import SafeConfigParser
import tarfile
#This script is based on:
#http://codepoets.co.uk/2010/python-script-to-backup-mysql-databases-on-debian/
#http://stackoverflow.com/questions/5849999/how-to-make-tar-backup-using-python
config = SafeConfigParser()
config.read("pybackup.cnf")
username = config.get('mysql', 'user')
password = config.get('mysql', 'password')
hostname = config.get('mysql', 'host')
web_dir = config.get('webs', 'web_dir')
backup_dir = config.get('webs', 'backup_dir')
webs_dirs = [ name for name in os.listdir(web_dir) if os.path.isdir(os.path.join(web_dir, name)) ]
filestamp = time.strftime('%Y-%m-%d')
for web in webs_dirs:
full_dir = os.path.join(home, directory)
tar = tarfile.open(os.path.join(backup_dir, directory+'.tar.gz'), 'w:gz')
tar.add(full_dir)
tar.close()
database_list_command="mysql -u %s -p%s -h %s --silent -N -e 'show databases'" % (username, password, hostname)
for database in os.popen(database_list_command).readlines():
database = database.strip()
if database == 'information_schema':
continue
if database == 'performance_schema':
continue
filename = backup_dir + "%s-%s.sql" % (database, filestamp)
os.popen("mysqldump -u %s -p%s -h %s -e --opt -c %s | gzip -c > %s.gz" % (username, password, hostname, database, filename))</pre>
|
from typing import List
class Solution:
def countComponents(self, n: int, edges: List[List[int]]) -> int:
from collections import defaultdict
graph = defaultdict(list)
visited = set()
for edge in edges:
graph[edge[0]].append(edge[1])
graph[edge[1]].append(edge[0])
def BFS(node):
queue = [node]
while queue:
node = queue.pop(0)
for neighbor in graph[node]:
if neighbor not in visited:
visited.add(neighbor)
queue.append(neighbor)
res = 0
for i in range(n):
if i not in visited:
res += 1
visited.add(i)
BFS(i)
return res |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.