markdown stringlengths 0 1.02M | code stringlengths 0 832k | output stringlengths 0 1.02M | license stringlengths 3 36 | path stringlengths 6 265 | repo_name stringlengths 6 127 |
|---|---|---|---|---|---|
**Load embedding words** | # ***********************
# *** LOAD EMBEDDINGS ***
# ***********************
embedding_weights = []
vocab_size = len(tk.word_index)
embedding_weights.append(np.zeros(vocab_size))
for char, i in tk.word_index.items():
onehot = np.zeros(vocab_size)
onehot[i-1] = 1
embedding_weights.append(onehot)
embedding_weights = np.array(embedding_weights)
print("Vocabulary size: ",vocab_size)
print("Embedding weights: ", embedding_weights) | Vocabulary size: 27
Embedding weights: [[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0.]
[1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0.]
[0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0.]
[0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0.]
[0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0.]
[0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0.]
[0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0.]
[0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0.
0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0.
0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0.
0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0.
0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0.
0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0.
0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1.
0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
1. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 1. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 1.]]
| MIT | models/Character_Level_CNN.ipynb | TheBlueEngineer/Serene-1.0 |
**Build the CNN model** | def KerasModel():
# ***************************************
# *****| BUILD THE NEURAL NETWORK |******
# ***************************************
embedding_layer = Embedding(vocab_size+1,
embedding_size,
input_length = input_size,
weights = [embedding_weights])
# Input layer
inputs = Input(shape=(input_size,), name='input', dtype='int64')
# Embedding layer
x = embedding_layer(inputs)
# Convolution
for filter_num, filter_size, pooling_size in conv_layers:
x = Conv1D(filter_num, filter_size)(x)
x = Activation('relu')(x)
if pooling_size != -1:
x = MaxPooling1D( pool_size = pooling_size)(x)
x = Flatten()(x)
# Fully Connected layers
for dense_size in fully_connected_layers:
x = Dense( dense_size, activation='relu')(x)
x = Dropout( dropout_p)(x)
# Output Layer
predictions = Dense(num_of_classes, activation = 'softmax')(x)
# BUILD MODEL
model = Model( inputs = inputs, outputs = predictions)
model.compile(optimizer = optimizer, loss = loss, metrics = ['accuracy'])
model.summary()
return model | _____no_output_____ | MIT | models/Character_Level_CNN.ipynb | TheBlueEngineer/Serene-1.0 |
**Train the CNN** | #with tf.device("/gpu:0"):
# history = model.fit(x_train, y_train,
# validation_data = ( x_test, y_test),
# epochs = 10,
# batch_size = batch,
# verbose = True)
with tf.device("/gpu:0"):
grid = KerasClassifier(build_fn = KerasModel, epochs = 15, verbose= True)
param_grid = dict(
epochs = [15]
)
#grid = GridSearchCV(estimator = model,
# param_grid = param_grid,
# cv = 5,
# verbose = 10,
# return_train_score = True)
grid_result = grid.fit(x_train, y_train) | Model: "model_1"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input (InputLayer) (None, 1000) 0
_________________________________________________________________
embedding_1 (Embedding) (None, 1000, 27) 756
_________________________________________________________________
conv1d_1 (Conv1D) (None, 994, 256) 48640
_________________________________________________________________
activation_1 (Activation) (None, 994, 256) 0
_________________________________________________________________
max_pooling1d_1 (MaxPooling1 (None, 331, 256) 0
_________________________________________________________________
conv1d_2 (Conv1D) (None, 325, 256) 459008
_________________________________________________________________
activation_2 (Activation) (None, 325, 256) 0
_________________________________________________________________
max_pooling1d_2 (MaxPooling1 (None, 108, 256) 0
_________________________________________________________________
conv1d_3 (Conv1D) (None, 106, 256) 196864
_________________________________________________________________
activation_3 (Activation) (None, 106, 256) 0
_________________________________________________________________
conv1d_4 (Conv1D) (None, 104, 256) 196864
_________________________________________________________________
activation_4 (Activation) (None, 104, 256) 0
_________________________________________________________________
conv1d_5 (Conv1D) (None, 102, 256) 196864
_________________________________________________________________
activation_5 (Activation) (None, 102, 256) 0
_________________________________________________________________
conv1d_6 (Conv1D) (None, 100, 256) 196864
_________________________________________________________________
activation_6 (Activation) (None, 100, 256) 0
_________________________________________________________________
max_pooling1d_3 (MaxPooling1 (None, 33, 256) 0
_________________________________________________________________
flatten_1 (Flatten) (None, 8448) 0
_________________________________________________________________
dense_1 (Dense) (None, 1024) 8651776
_________________________________________________________________
dropout_1 (Dropout) (None, 1024) 0
_________________________________________________________________
dense_2 (Dense) (None, 1024) 1049600
_________________________________________________________________
dropout_2 (Dropout) (None, 1024) 0
_________________________________________________________________
dense_3 (Dense) (None, 2) 2050
=================================================================
Total params: 10,999,286
Trainable params: 10,999,286
Non-trainable params: 0
_________________________________________________________________
| MIT | models/Character_Level_CNN.ipynb | TheBlueEngineer/Serene-1.0 |
**Test the CNN** | #loss, accuracy = model.evaluate( x_train, y_train, verbose = True)
#print("Training Accuracy: {:.4f}".format( accuracy))
#loss, accuracy = model.evaluate( x_test, y_test, verbose = True)
#print("Testing Accuracy: {:.4f}".format( accuracy))
from sklearn.metrics import classification_report, confusion_matrix
y_predict = grid.predict( x_test)
# Build the confusion matrix
y_tested = y_test
print( type(y_test))
print(y_tested)
y_tested = np.argmax( y_tested, axis = 1)
print(y_tested)
confMatrix = confusion_matrix(y_tested, y_predict)
tn, fp, fn, tp = confMatrix.ravel()
# Build a classification report
classification_reports = classification_report( y_tested, y_predict, target_names = ['Non-depressed', 'Depressed'], digits=3)
print(confMatrix)
print(classification_reports) | _____no_output_____ | MIT | models/Character_Level_CNN.ipynb | TheBlueEngineer/Serene-1.0 |
constitutive vs variable | def add_genetype(coverage):
"""function to add gene type to the df, and remove random genes"""
select_genes_file = '../../data/genomes/ara_housekeeping_list.out'
select_genes = pd.read_table(select_genes_file, sep='\t', header=None)
cols = ['gene','gene_type']
select_genes.columns = cols
merged = pd.merge(coverage, select_genes, on='gene')
merged_renamed = merged.copy()
merged_renamed.gene_type.replace('housekeeping','constitutive', inplace=True)
merged_renamed.gene_type.replace('highVar','variable', inplace=True)
merged_renamed.gene_type.replace('randCont','random', inplace=True)
# no_random = merged_renamed[merged_renamed.gene_type != 'random']
# no_random.reset_index(drop=True, inplace=True)
return merged_renamed
roots_merged = add_genetype(root_coverage)
no_random_roots = roots_merged[roots_merged.gene_type != 'random']
shoots_merged = add_genetype(shoot_coverage)
no_random_shoots = shoots_merged[shoots_merged.gene_type != 'random']
rootsshootsintersect_merged = add_genetype(rootshootintersect_coverage)
no_random_rootsshoots = rootsshootsintersect_merged[rootsshootsintersect_merged.gene_type != 'random']
#how many have open chromatin??
print('root openchromatin present:')
print(len(no_random_roots)-len(no_random_roots[no_random_roots.percentage_bases_covered == 0]))
print('shoot openchromatin present:')
print(len(no_random_shoots)-len(no_random_shoots[no_random_shoots.percentage_bases_covered == 0]))
print('root-shoot intersect openchromatin present:')
print(len(no_random_rootsshoots)-len(no_random_rootsshoots[no_random_rootsshoots.percentage_bases_covered == 0]))
#how many have open chromatin??
print('root openchromatin present variable promoters:')
print(len(no_random_roots[no_random_roots.gene_type=='variable'])-len(no_random_roots[no_random_roots.gene_type=='variable'][no_random_roots[no_random_roots.gene_type=='variable'].percentage_bases_covered == 0]))
print('root openchromatin present constitutive promoters:')
print(len(no_random_roots[no_random_roots.gene_type=='constitutive'])-len(no_random_roots[no_random_roots.gene_type=='constitutive'][no_random_roots[no_random_roots.gene_type=='constitutive'].percentage_bases_covered == 0]))
print('shoot openchromatin present variable promoters:')
print(len(no_random_shoots[no_random_shoots.gene_type=='variable'])-len(no_random_shoots[no_random_shoots.gene_type=='variable'][no_random_shoots[no_random_shoots.gene_type=='variable'].percentage_bases_covered == 0]))
print('shoot openchromatin present constitutive promoters:')
print(len(no_random_shoots[no_random_shoots.gene_type=='constitutive'])-len(no_random_shoots[no_random_shoots.gene_type=='constitutive'][no_random_shoots[no_random_shoots.gene_type=='constitutive'].percentage_bases_covered == 0]))
print('root-shoot intersect openchromatin present variable promoters:')
print(len(no_random_rootsshoots[no_random_rootsshoots.gene_type=='variable'])-len(no_random_rootsshoots[no_random_rootsshoots.gene_type=='variable'][no_random_rootsshoots[no_random_rootsshoots.gene_type=='variable'].percentage_bases_covered == 0]))
print('root-shoot intersect openchromatin present constitutive promoters:')
print(len(no_random_rootsshoots[no_random_rootsshoots.gene_type=='constitutive'])-len(no_random_rootsshoots[no_random_rootsshoots.gene_type=='constitutive'][no_random_rootsshoots[no_random_rootsshoots.gene_type=='constitutive'].percentage_bases_covered == 0]))
sns.catplot(x="gene_type", y="percentage_bases_covered", data=roots_merged) #.savefig('../../data/plots/TFBS_coverage/responsive_bp_covered.pdf', format='pdf')
sns.catplot(x="gene_type", y="percentage_bases_covered", data=shoots_merged) #.savefig('../../data/plots/TFBS_coverage/responsive_bp_covered.pdf', format='pdf')
#roots
plot = sns.catplot(x="gene_type", y="percentage_bases_covered", kind='box', data=no_random_roots)
#plot points
ax = sns.swarmplot(x="gene_type", y="percentage_bases_covered", data=no_random_roots, color=".25")
plt.ylabel('Percentage bases covered')
plt.xlabel('Gene type');
#ax.get_figure() #.savefig('../../data/plots/TFBS_coverage/responsive_bp_covered_boxplot.pdf', format='pdf')
#shoots
plot = sns.catplot(x="gene_type", y="percentage_bases_covered", kind='box', data=no_random_shoots)
#plot points
ax = sns.swarmplot(x="gene_type", y="percentage_bases_covered", data=no_random_shoots, color=".25")
plt.ylabel('Percentage bases covered')
plt.xlabel('Gene type');
#ax.get_figure() #.savefig('../../data/plots/TFBS_coverage/responsive_bp_covered_boxplot.pdf', format='pdf')
#roots-shoots intersect
plot = sns.catplot(x="gene_type", y="percentage_bases_covered", kind='box', data=no_random_rootsshoots)
#plot points
ax = sns.swarmplot(x="gene_type", y="percentage_bases_covered", data=no_random_rootsshoots, color=".25")
plt.ylabel('Percentage bases covered')
plt.xlabel('Gene type');
#ax.get_figure() #.savefig('../../data/plots/TFBS_coverage/responsive_bp_covered_boxplot.pdf', format='pdf')
#Get names of each promoter
def normality(input_proms):
"""function to test normality of data - returns test statistic, p-value"""
#Get names of each promoter
pd.Categorical(input_proms.gene_type)
names = input_proms.gene_type.unique()
# for name in names:
# print(name)
for name in names:
print('{}: {}'.format(name, stats.shapiro(input_proms.percentage_bases_covered[input_proms.gene_type == name])))
def variance(input_proms):
"""function to test variance of data"""
#test variance
constitutive = input_proms[input_proms.gene_type == 'constitutive']
#reset indexes so residuals can be calculated later
constitutive.reset_index(inplace=True)
responsive = input_proms[input_proms.gene_type == 'variable']
responsive.reset_index(inplace=True)
control = input_proms[input_proms.gene_type == 'random']
control.reset_index(inplace=True)
print(stats.levene(constitutive.percentage_bases_covered, responsive.percentage_bases_covered))
normality(no_random_roots)
normality(no_random_shoots)
normality(no_random_rootsshoots) | variable: (0.8546600937843323, 2.263117515610702e-08)
constitutive: (0.8711197376251221, 9.823354929494599e-08)
| MIT | src/plotting/OpenChromatin_plotsold.ipynb | Switham1/PromoterArchitecture |
Not normal | variance(no_random_roots)
variance(no_random_shoots)
variance(no_random_rootsshoots) | LeveneResult(statistic=0.00041366731166758155, pvalue=0.9837939970964911)
| MIT | src/plotting/OpenChromatin_plotsold.ipynb | Switham1/PromoterArchitecture |
unequal variance for shoots | def kruskal_test(input_data):
"""function to do kruskal-wallis test on data"""
#print('\033[1m' +promoter + '\033[0m')
print(kruskal(data=input_data, dv='percentage_bases_covered', between='gene_type'))
#print('')
no_random_roots
kruskal_test(no_random_roots)
kruskal_test(no_random_shoots)
kruskal_test(no_random_rootsshoots) | Source ddof1 H p-unc
Kruskal gene_type 1 22.450983 0.000002
| MIT | src/plotting/OpenChromatin_plotsold.ipynb | Switham1/PromoterArchitecture |
try gat enrichment | #add Chr to linestart of chromatin bed files
add_chr_linestart('../../data/ATAC-seq/potter2018/Shoots_NaOH_peaks_all.bed','../../data/ATAC-seq/potter2018/Shoots_NaOH_peaks_all_renamed.bed')
add_chr_linestart('../../data/ATAC-seq/potter2018/Roots_NaOH_peaks_all.bed','../../data/ATAC-seq/potter2018/Roots_NaOH_peaks_all_renamed.bed')
add_chr_linestart('../../data/ATAC-seq/potter2018/intersectRootsShoots_PeaksInBoth.bed','../../data/ATAC-seq/potter2018/intersectRootsShoots_PeaksInBoth_renamed.bed')
#create a bed file containing all 100 constitutive/responsive promoters with the fourth column annotating whether it's constitutive or responsive
proms_file = '../../data/genes/constitutive-variable-random_100_each.csv'
promoters = pd.read_csv(proms_file)
promoters
cols2 = ['delete','promoter_AGI', 'gene_type']
promoters_df = promoters[['promoter_AGI','gene_type']]
promoters_no_random = promoters_df.copy()
#drop randCont rows
promoters_no_random = promoters_df[~(promoters_df.gene_type == 'randCont')]
promoters_no_random
#merge promoters with genetype selected
promoterbedfile = '../../data/FIMO/responsivepromoters.bed'
promoters_bed = pd.read_table(promoterbedfile, sep='\t', header=None)
cols = ['chr', 'start', 'stop', 'promoter_AGI', 'score', 'strand', 'source', 'feature_name', 'dot2', 'attributes']
promoters_bed.columns = cols
merged = pd.merge(promoters_bed,promoters_no_random, on='promoter_AGI')
#add gene_type to column3
merged = merged[['chr','start','stop','gene_type','promoter_AGI', 'score', 'strand', 'source', 'feature_name', 'dot2', 'attributes']]
#write to bed file
promoter_file = '../../data/promoter_analysis/old1000bpproms_variable_constitutive_workspace.bed'
with open(promoter_file,'w') as f:
merged.to_csv(f,index=False,sep='\t',header=None)
# new_merged = merged.astype({'start': 'int'})
# new_merged = merged.astype({'stop': 'int'})
# new_merged = merged.astype({'chr': 'int'})
#add Chr to linestart of promoter bed file
add_chr_linestart('../../data/promoter_analysis/old1000bpproms_variable_constitutive_workspace.bed','../../data/promoter_analysis/old1000bpproms_variable_constitutive_workspace_renamed.bed')
#create separate variable and constitutive and gat workspace
promoter_file_renamed = '../../data/promoter_analysis/old1000bpproms_variable_constitutive_workspace_renamed.bed'
promoters = pd.read_table(promoter_file_renamed, sep='\t', header=None)
#make a new gat workspace file with all promoters (first 3 columns)
bed = BedTool.from_dataframe(promoters[[0,1,2]]).saveas('../../data/promoter_analysis/chromatin/variable_constitutive_promoters_1000bp_workspace.bed')
#select only variable promoters
variable_promoters = promoters[promoters[3] == 'highVar']
sorted_variable = variable_promoters.sort_values([0,1])
bed = BedTool.from_dataframe(sorted_variable).saveas('../../data/promoter_analysis/chromatin/variable_promoters_1000bp.bed')
#make a constitutive only file
constitutive_promoters = promoters[promoters[3] == 'housekeeping']
sorted_constitutive = constitutive_promoters.sort_values([0,1])
bed = BedTool.from_dataframe(sorted_constitutive).saveas('../../data/promoter_analysis/chromatin/constitutive_promoters_1000bp.bed') | _____no_output_____ | MIT | src/plotting/OpenChromatin_plotsold.ipynb | Switham1/PromoterArchitecture |
now I will do the plots with non-overlapping promoters including the 5'UTR | #merge promoters with genetype selected
promoter_UTR = '../../data/FIMO/non-overlapping_includingbidirectional_all_genes/promoters_5UTR_renamedChr.bed'
promoters_bed = pd.read_table(promoter_UTR, sep='\t', header=None)
cols = ['chr', 'start', 'stop', 'promoter_AGI', 'score', 'strand', 'source', 'feature_name', 'dot2', 'attributes']
promoters_bed.columns = cols
merged = pd.merge(promoters_bed,promoters_no_random, on='promoter_AGI')
#how many constitutive genes left after removed/shortened overlapping
len(merged[merged.gene_type == 'housekeeping'])
#how many variable genes left after removed/shortened overlapping
len(merged[merged.gene_type == 'highVar'])
merged['length'] = (merged.start - merged.stop).abs()
merged.sort_values('length',ascending=True)
#plot of lengths
dist_plot = merged['length']
#create figure with no transparency
dist_plot_fig = sns.distplot(dist_plot).get_figure()
#remove 2 genes from constitutive group so equal sample size to variable
#random sample of 98, using seed 1
merged[merged.gene_type == 'housekeeping'] = merged[merged.gene_type == 'housekeeping'].sample(98, random_state=1)
#drop rows with at least 2 NaNs
merged = merged.dropna(thresh=2)
merged
#write to bed file so can run OpenChromatin_coverage.py
new_promoter_file = '../../data/promoter_analysis/non-overlapping_includingbidirectional_variable_constitutive.bed'
cols = ['chr', 'start', 'stop', 'promoter_AGI', 'score', 'strand', 'source', 'feature_name', 'dot2', 'attributes']
#remove trailing decimal .0 from start and stop
merged = merged.astype({'start': 'int'})
merged = merged.astype({'stop': 'int'})
merged = merged.astype({'chr': 'int'})
merged_coverage = merged[cols]
with open(new_promoter_file,'w') as f:
merged_coverage.to_csv(f,index=False,sep='\t',header=None)
#write to bed file so can run gat
new_promoter_file_gat = '../../data/promoter_analysis/non-overlapping_includingbidirectional_variable_constitutive_gat.bed'
cols_gat = ['chr', 'start', 'stop', 'gene_type','promoter_AGI', 'score', 'strand', 'source', 'feature_name', 'dot2', 'attributes']
merged_gat = merged[cols_gat]
with open(new_promoter_file_gat,'w') as f:
merged_gat.to_csv(f,index=False,sep='\t',header=None)
#Read in new files
RootChomatin_bp_covered = '../../data/promoter_analysis/non-overlapping_includingbidirectional_variable_constitutiveRootOpenChrom.bp_covered.txt'
ShootChomatin_bp_covered = '../../data/promoter_analysis/non-overlapping_includingbidirectional_variable_constitutiveShootOpenChrom.bp_covered.txt'
RootShootIntersect_bp_covered = '../../data/promoter_analysis/non-overlapping_includingbidirectional_variable_constitutiveShootRootIntersectOpenChrom.bp_covered.txt'
root_coverage = percent_coverage(RootChomatin_bp_covered)
shoot_coverage = percent_coverage(ShootChomatin_bp_covered)
rootshootintersect_coverage = percent_coverage(RootShootIntersect_bp_covered)
#add Chr to linestart of promoter bed file
add_chr_linestart('../../data/promoter_analysis/non-overlapping_includingbidirectional_variable_constitutive_gat.bed','../../data/promoter_analysis/non-overlapping_includingbidirectional_variable_constitutive_gat_renamed.bed')
#create separate variable and constitutive and gat workspace
promoter_file_renamed = '../../data/promoter_analysis/non-overlapping_includingbidirectional_variable_constitutive_gat_renamed.bed'
promoters = pd.read_table(promoter_file_renamed, sep='\t', header=None)
#make a new gat workspace file with all promoters (first 3 columns)
bed = BedTool.from_dataframe(promoters[[0,1,2]]).saveas('../../data/promoter_analysis/chromatin/non-overlapping_includingbidirectional_variable_constitutive_workspace.bed')
#select only variable promoters
variable_promoters = promoters[promoters[3] == 'highVar']
sorted_variable = variable_promoters.sort_values([0,1])
bed = BedTool.from_dataframe(sorted_variable).saveas('../../data/promoter_analysis/chromatin/non-overlapping_includingbidirectional_variable_promoters.bed')
#make a constitutive only file
constitutive_promoters = promoters[promoters[3] == 'housekeeping']
sorted_constitutive = constitutive_promoters.sort_values([0,1])
bed = BedTool.from_dataframe(sorted_constitutive).saveas('../../data/promoter_analysis/chromatin/non-overlapping_includingbidirectional_constitutive_promoters.bed')
#show distribution of the distance from the closest end of the open chromatin peak to the ATG (if overlapping already then distance is 0)
root_peaks_bed = '../../data/ATAC-seq/potter2018/Roots_NaOH_peaks_all_renamed.bed'
shoot_peaks_bed = '../../data/ATAC-seq/potter2018/Shoots_NaOH_peaks_all_renamed.bed'
rootshootintersect_peaks_bed = '../../data/ATAC-seq/potter2018/intersectRootsShoots_PeaksInBoth_renamed.bed'
promoters_bed = '../../data/promoter_analysis/non-overlapping_includingbidirectional_variable_constitutive_renamed.bed'
promoter_openchrom_intersect = '../../data/promoter_analysis/non-overlapping_includingbidirectional_variable_constitutive_chromintersect.bed'
add_chr_linestart('../../data/promoter_analysis/non-overlapping_includingbidirectional_variable_constitutive.bed','../../data/promoter_analysis/non-overlapping_includingbidirectional_variable_constitutive_renamed.bed')
def distr_distance_ATG(peaks_bed, promoter_bed, output_file):
"""function to show the distribution of the distance rom the closest end
of the open chromatin peak to the ATG (if overlapping already then distance is 0)"""
# peaks = pd.read_table(peaks_bed, sep='\t', header=None)
# cols = ['chr','start', 'stop']
# peaks.columns = cols
# promoters = pd.read_table(promoter_bed, sep='\t', header=None)
# cols_proms = ['chr', 'start', 'stop', 'gene_type','promoter_AGI', 'score', 'strand', 'source', 'feature_name', 'dot2', 'attributes']
# promoters.columns = cols_proms
proms = BedTool(promoter_bed) #read in files using BedTools
peaks = BedTool(peaks_bed)
#report chromosome position of overlapping feature, along with the promoter which overlaps it (only reports the overlapping nucleotides, not the whole promoter length. Can use u=True to get whole promoter length)
#f, the minimum overlap as fraction of A. F, nucleotide fraction of B (genes) that need to be overlapping with A (promoters)
#wa, Write the original entry in A for each overlap.
#wo, Write the original A and B entries plus the number of base pairs of overlap between the two features. Only A features with overlap are reported.
#u, write original A entry only once even if more than one overlap
intersect = proms.intersect(peaks, wo=True) #could add u=True which indicates we want to see the promoters that overlap features in the genome
#Write to output_file
with open(output_file, 'w') as output:
#Each line in the file contains bed entry a and bed entry b that it overlaps plus the number of bp in the overlap so 19 columns
output.write(str(intersect))
#read in intersect bed file
overlapping_proms = pd.read_table(output_file, sep='\t', header=None)
cols = ['chrA', 'startA', 'stopA', 'promoter_AGI','dot1','strand','source','type','dot2','attributes','chrB', 'startB','stopB','bp_overlap']
overlapping_proms.columns = cols
#add empty openchrom_distance_from_ATG column
overlapping_proms['openchrom_distance_from_ATG'] = int()
for i, v in overlapping_proms.iterrows():
#if positive strand feature A
if overlapping_proms.loc[i,'strand'] == '+':
#if end of open chromatin is downstream or equal to ATG, distance is 0
if overlapping_proms.loc[i,'stopA'] <= overlapping_proms.loc[i, 'stopB']:
overlapping_proms.loc[i,'openchrom_distance_from_ATG'] = 0
#else if upstream and chromatin stop is after promoter start, add distance from chromatin stop to ATG
elif overlapping_proms.loc[i,'startA'] <= overlapping_proms.loc[i, 'stopB']:
overlapping_proms.loc[i,'openchrom_distance_from_ATG'] = overlapping_proms.loc[i,'stopA'] - overlapping_proms.loc[i, 'stopB']
elif overlapping_proms.loc[i,'strand'] == '-':
#if end of open chromatin is downstream or equal to ATG, distance is 0
if overlapping_proms.loc[i,'startA'] >= overlapping_proms.loc[i, 'startB']:
overlapping_proms.loc[i,'openchrom_distance_from_ATG'] = 0
#else if upstream and chromatin stop is after promoter start, add distance from chromatin stop to ATG
elif overlapping_proms.loc[i,'stopA'] >= overlapping_proms.loc[i, 'startB']:
overlapping_proms.loc[i,'openchrom_distance_from_ATG'] = overlapping_proms.loc[i, 'startB'] - overlapping_proms.loc[i,'startB']
return overlapping_proms
#show length of open chromatin peaks
rootshootintersect = distr_distance_ATG(rootshootintersect_peaks_bed)
rootshootintersect['length'] = (rootshootintersect.start - rootshootintersect.stop).abs()
rootshootintersect.sort_values('length',ascending=True)
rootshootintersect = distr_distance_ATG(rootshootintersect_peaks_bed,promoters_bed,promoter_openchrom_intersect)
rootshootintersect
rootshootintersect.sort_values('openchrom_distance_from_ATG',ascending=True)
#plot of distances of chomatin to ATG
dist_plot = rootshootintersect['openchrom_distance_from_ATG']
#create figure with no transparency
dist_plot_fig = sns.distplot(dist_plot).get_figure()
#now split constitutive and variable
merged_distances = pd.merge(merged, rootshootintersect, on='promoter_AGI')
merged_distances.gene_type
#VARIABLE
#plot of distances of chomatin to ATG
dist_plot = merged_distances[merged_distances.gene_type=='highVar']['openchrom_distance_from_ATG']
#create figure with no transparency
dist_plot_fig = sns.distplot(dist_plot).get_figure()
merged_distances[merged_distances.gene_type=='housekeeping']['openchrom_distance_from_ATG']
#CONSTITUTIVE
#plot of distances of chomatin to ATG
dist_plot = merged_distances[merged_distances.gene_type=='housekeeping']['openchrom_distance_from_ATG']
#create figure with no transparency
dist_plot_fig = sns.distplot(dist_plot).get_figure() | /home/witham/opt/anaconda3/envs/PromoterArchitecturePipeline/lib/python3.7/site-packages/seaborn/distributions.py:369: UserWarning: Default bandwidth for data is 0; skipping density estimation.
warnings.warn(msg, UserWarning)
| MIT | src/plotting/OpenChromatin_plotsold.ipynb | Switham1/PromoterArchitecture |
Create, train, and predict with models | n,D = X.train.shape
m_v = 25
m_u, Q, = 50, D
Z_v = (m_v,D)
Z_u = (m_u,Q)
sample_size = 200 | _____no_output_____ | Apache-2.0 | main.ipynb | spectraldani/DeepMahalanobisGP |
SGPR | models['sgpr'] = gpflow.models.SGPR(X.train, y.train, gpflow.kernels.RBF(D, ARD=True), initial_inducing_points(X.train, m_u))
train_model('sgpr')
y_pred[('sgpr','mean')], y_pred[('sgpr','var')] = models['sgpr'].predict_y(X.test) | _____no_output_____ | Apache-2.0 | main.ipynb | spectraldani/DeepMahalanobisGP |
Deep Mahalanobis GP | reset_seed()
with gpflow.defer_build():
models['dvmgp'] = deep_vmgp.DeepVMGP(
X.train, y.train, Z_u, Z_v,
[gpflow.kernels.RBF(D,ARD=True) for i in range(Q)],
full_qcov=False, diag_qmu=False
)
models['dvmgp'].compile()
train_model('dvmgp')
y_pred[('dvmgp','mean')], y_pred[('dvmgp','var')] = models['dvmgp'].predict_y(X.test) | _____no_output_____ | Apache-2.0 | main.ipynb | spectraldani/DeepMahalanobisGP |
Show scores | for m in models.index:
scaled_y_test = scalers.y.inverse_transform(y.test)
scaled_y_pred = [
scalers.y.inverse_transform(y_pred[m].values[:,[0]]),
scalers.y.var_ * y_pred[m].values[:,[1]]
]
results.at[m,'MRAE'] = metrics.mean_relative_absolute_error(scaled_y_test, scaled_y_pred[0]).squeeze()
results.at[m,'RMSE'] = metrics.root_mean_squared_error(scaled_y_test, scaled_y_pred[0]).squeeze()
results.at[m,'NLPD'] = metrics.negative_log_predictive_density(scaled_y_test, *scaled_y_pred).squeeze()
results | _____no_output_____ | Apache-2.0 | main.ipynb | spectraldani/DeepMahalanobisGP |
Plot results | class MidpointNormalize(mpl.colors.Normalize):
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
mpl.colors.Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y), np.isnan(value))
f = plt.figure()
ax = plt.gca()
ax.scatter(scalers.x.transform(X_all)[:,0],scalers.x.transform(X_all)[:,1],edgecolors='white',facecolors='none')
lims = (ax.get_xlim(), ax.get_ylim())
plt.close(f)
n = 50
grid_points = np.dstack(np.meshgrid(np.linspace(*lims[0],n), np.linspace(*lims[1],n))).reshape(-1,2)
grid_y = np.empty((len(models.index),grid_points.shape[0]))
for i,m in enumerate(models.index):
reset_seed()
grid_pred = models[m].predict_y(grid_points, sample_size)[0]
if len(grid_pred.shape) == 3:
grid_y[i] = grid_pred.mean(axis=0)[:,0]
else:
grid_y[i] = grid_pred[:,0]
grid_points = grid_points.reshape(n,n,2)
grid_y = grid_y.reshape(-1,n,n)
f = plt.figure(constrained_layout=True,figsize=(8,7))
gs = f.add_gridspec(ncols=4, nrows=2)
axs = np.empty(3,dtype=object)
axs[0] = f.add_subplot(gs[0,0:2])
axs[1] = f.add_subplot(gs[0,2:4],sharey=axs[0])
axs[2] = f.add_subplot(gs[1,1:3])
axs[1].yaxis.set_visible(False)
axs[2].yaxis.set_visible(False)
axs[0].set_title('SGPR')
axs[1].set_title('DVMGP')
axs[2].set_title('Full Dataset')
ims = np.empty((2,4),dtype=object)
for i,m in enumerate(['sgpr', 'dvmgp']):
ax = axs[i]
ims[0,i] = ax.contourf(grid_points[:,:,0],grid_points[:,:,1],grid_y[i],30)
# Plot features
Z = None
if m == 'dgp':
Z = models[m].layers[0].feature.Z.value
elif m in ['sgpr','vmgp']:
Z = models[m].feature.Z.value
elif m == 'dvmgp':
Z = models[m].Z_v.Z.value
if Z is not None:
ax.scatter(Z[:,0],Z[:,1],marker='^',edgecolors='white',facecolors='none')
# ims[1,i] = ax.scatter(X.test[:,0],X.test[:,1],edgecolors='white',c=y.test)
ims[0,3] = axs[2].scatter(X.test[:,0],X.test[:,1],c=y.test)
ims[1,3] = axs[2].scatter(X.train[:,0],X.train[:,1],c=y.train)
for ax in axs:
ax.set_xlim(lims[0]);
ax.set_ylim(lims[1]);
clim = np.array([i.get_clim() for i in ims.flat if i is not None])
clim = (clim.min(), clim.max())
norm = mpl.colors.Normalize(vmin=clim[0], vmax=clim[1])
# norm = MidpointNormalize(vmin=clim[0], vmax=clim[1], midpoint=0)
for im in ims.flat:
if im is not None:
im.set_norm(norm)
f.colorbar(ims[0,0], ax=axs, orientation='vertical', fraction=1, aspect=50)
for im in ims[0,:3].flat:
if im is not None:
for c in im.collections:
c.set_edgecolor("face")
f.savefig('./figs/outputs.pdf')
n = 50
grid_points = np.dstack(np.meshgrid(np.linspace(*lims[0],n), np.linspace(*lims[1],n))).reshape(-1,2)
grid_y = np.empty((grid_points.shape[0],2))
grid_y = models['dvmgp'].enquire_session().run(tf.matmul(
tf.transpose(models['dvmgp'].compute_qW(grid_points)[0][...,0],[2,0,1]),grid_points[:,:,None]
)[:,:,0])
grid_points = grid_points.reshape(n,n,2)
grid_y = grid_y.reshape(n,n,2)
f = plt.figure(constrained_layout=True,figsize=(8,4))
gs = f.add_gridspec(ncols=2, nrows=1)
axs = np.empty(4,dtype=object)
axs[0] = f.add_subplot(gs[0,0])
axs[1] = f.add_subplot(gs[0,1])
extent = (*lims[0], *lims[1])
colorspace = 'cielab'
alpha = 0.7
axs[0].imshow(
cplot.get_srgb1(grid_points[:,:,0] + grid_points[:,:,1]*1j, colorspace=colorspace, alpha=alpha),
origin='lower',
extent=extent,
aspect='auto',
interpolation='gaussian'
)
axs[0].set_title('Identity map')
axs[1].imshow(
cplot.get_srgb1(grid_y[:,:,0] + grid_y[:,:,1]*1j, colorspace=colorspace, alpha=alpha),
origin='lower',
extent=extent,
aspect='auto',
interpolation='gaussian'
)
axs[1].set_title('DVMGP: $Wx^\intercal$');
f.savefig('./figs/layers.pdf')
dvmgp_var = np.array([k.variance.value for k in models['dvmgp'].w_kerns])
f,ax = plt.subplots(1,1,figsize=(3,3))
ax.bar(np.arange(2), dvmgp_var/dvmgp_var.max(), color='C2')
ax.set_ylabel('1st layer variance\nrelative to largest value')
ax.set_xlabel('Latent dimension')
ax.set_xticks([])
ax.set_title('DVMGP')
f.tight_layout()
f.savefig('./figs/dims.pdf') | _____no_output_____ | Apache-2.0 | main.ipynb | spectraldani/DeepMahalanobisGP |
`sasum(N, SX, INCX)`Computes the sum of absolute values of elements of the vector $x$.Operates on single-precision real valued arrays.Input vector $\mathbf{x}$ is represented as a [strided array](../strided_arrays.ipynb) `SX`, spaced by `INCX`.Vector $\mathbf{x}$ is of size `N`. Example usage | import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.abspath(''), "..", "..")))
import numpy as np
from pyblas.level1 import sasum
x = np.array([1, 2, 3], dtype=np.single)
N = len(x)
incx = 1
sasum(N, x, incx) | _____no_output_____ | BSD-3-Clause | docs/level1/sasum.ipynb | timleslie/pyblas |
Docstring | help(sasum) | Help on function sasum in module pyblas.level1.sasum:
sasum(N, SX, INCX)
Computes the sum of absolute values of elements of the vector x
Parameters
----------
N : int
Number of elements in input vector
SX : numpy.ndarray
A single precision real array, dimension (1 + (`N` - 1)*abs(`INCX`))
INCX : int
Storage spacing between elements of `SX`
Returns
-------
numpy.single
See Also
--------
dasum : Double-precision sum of absolute values
Notes
-----
Online PyBLAS documentation: https://nbviewer.jupyter.org/github/timleslie/pyblas/blob/main/docs/sasum.ipynb
Reference BLAS documentation: https://github.com/Reference-LAPACK/lapack/blob/v3.9.0/BLAS/SRC/sasum.f
Examples
--------
>>> x = np.array([1, 2, 3], dtype=np.single)
>>> N = len(x)
>>> incx = 1
>>> print(sasum(N, x, incx)
6.
| BSD-3-Clause | docs/level1/sasum.ipynb | timleslie/pyblas |
Source code | sasum?? | _____no_output_____ | BSD-3-Clause | docs/level1/sasum.ipynb | timleslie/pyblas |
Notebook para o PAN - Atribuição Autoral - 2018 | %matplotlib inline
#python basic libs
import os;
from os.path import join as pathjoin;
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
from sklearn.exceptions import UndefinedMetricWarning
warnings.simplefilter(action='ignore', category=UndefinedMetricWarning)
import re;
import json;
import codecs;
from collections import defaultdict;
from pprint import pprint
from time import time
import logging
#data analysis libs
import numpy as np;
import pandas as pd;
from pandas.plotting import scatter_matrix;
import matplotlib.pyplot as plt;
import random;
#machine learning libs
#feature extraction
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer, TfidfTransformer
#preprocessing and transformation
from sklearn import preprocessing
from sklearn.preprocessing import normalize, MaxAbsScaler, RobustScaler;
from sklearn.decomposition import PCA;
from sklearn.base import BaseEstimator, ClassifierMixin
#classifiers
from sklearn import linear_model;
from sklearn.linear_model import LogisticRegression
from sklearn.svm import LinearSVC, SVC
from sklearn.multiclass import OneVsOneClassifier, OneVsRestClassifier
from sklearn.neural_network import MLPClassifier
#
from sklearn import feature_selection;
from sklearn import ensemble;
from sklearn.model_selection import train_test_split;
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
#model valuation
from sklearn.metrics import roc_auc_score, f1_score, precision_score, recall_score, accuracy_score;
import seaborn as sns;
sns.set(color_codes=True);
import spacy
get_ipython().config.get('IPKernelApp', {})['parent_appname'] = "" #spacy causes a bug on pandas and this code fix it
import platform;
import sklearn;
import scipy;
print("|%-15s|%-40s|"%("PACK","VERSION"))
print("|%-15s|%-40s|"%('-'*15,'-'*40))
print('\n'.join(
"|%-15s|%-40s|" % (pack, version)
for pack, version in
zip(['SO','NumPy','SciPy','Scikit-Learn','seaborn','spacy'],
[platform.platform(), np.__version__, scipy.__version__, sklearn.__version__, sns.__version__, spacy.__version__])
))
np.set_printoptions(precision=4)
pd.options.display.float_format = '{:,.4f}'.format
#externalizing codes that is used in many notebooks and it is not experiment specific
import pan
#convert a sparse matrix into a dense for being used on PCA
from skleanExtensions import DenseTransformer;
#convert an array of text into an array of tokenized texts each token must contain text, tag_, pos_, dep_
from skleanExtensions import POSTagTransformer | _____no_output_____ | Apache-2.0 | 2019/PAN_AA_2018-POS-tag.ipynb | jeleandro/PANAA2018 |
paths configuration | baseDir = '/Users/joseeleandrocustodio/Dropbox/mestrado/02 - Pesquisa/code';
inputDir= pathjoin(baseDir,'pan18aa');
outputDir= pathjoin(baseDir,'out',"oficial");
if not os.path.exists(outputDir):
os.mkdir(outputDir); | _____no_output_____ | Apache-2.0 | 2019/PAN_AA_2018-POS-tag.ipynb | jeleandro/PANAA2018 |
loading the dataset | problems = pan.readCollectionsOfProblems(inputDir);
print(problems[0]['problem'])
print(problems[0].keys())
pd.DataFrame(problems)
def cachingPOSTAG(problem, taggingVersion='TAG'):
import json;
print ("Tagging: %s, language: %s, " %(problem['problem'],problem['language']), end=' ');
if not os.path.exists('POSTAG_cache'):
os.makedirs('POSTAG_cache');
_id = problem['problem']+problem['language'];
filename = os.path.join('POSTAG_cache',taggingVersion+'_'+_id+'.json')
if not os.path.exists(filename):
lang = problem['language'];
if lang == 'sp':
lang = 'es';
elif lang =='pl':
print(lang, ' not supported');
return ;
train_docs, train_labels, _ = zip(*problem['candidates'])
problem['training_docs_size'] = len(train_docs);
test_docs, _, test_filename = zip(*problem['unknown'])
t0 = time()
tagger = POSTagTransformer(language=lang);
train_docs = tagger.fit_transform(train_docs);
test_docs = tagger.fit_transform(test_docs);
print("Annotation time %0.3fs" % (time() - t0))
with open(filename,'w') as f:
json.dump({
'train':train_docs,
'train_labels':train_labels,
'test':test_docs,
'test_filename':test_filename
},f);
else:
with open(filename,'r') as f:
data = json.load(f);
train_docs = data['train'];
train_labels = data['train_labels'];
test_docs = data['test'];
test_filename = data['test_filename'];
print('tagged')
return train_docs, train_labels, test_docs, test_filename;
for problem in problems:
cachingPOSTAG(problem)
train_docs, train_labels, test_docs, test_filename = cachingPOSTAG(problem)
class FilterTagTransformer(BaseEstimator):
def __init__(self,token='POS', parts=None):
self.token = token;
self.parts = parts;
def transform(self, X, y=None):
""" Return An array of tokens
Parameters
----------
X : {array-like}, shape = [n_samples, n_tokens]
Array documents, where each document consists of a list of node
and each node consist of a token and its correspondent tag
[
[('a','TAG1'),('b','TAG2')],
[('a','TAG1')]
]
y : array-like, shape = [n_samples] (default: None)
Returns
---------
X_dense : dense version of the input X array.
"""
if self.token == 'TAG':
X = [' '.join([d[1].split('__')[0] for d in doc]) for doc in X]
elif self.token == 'POS':
if self.parts is None:
X = [' '.join([d[2] for d in doc]) for doc in X];
else:
X = [' '.join([d[0] for d in doc if d[2] in self.parts]) for doc in X]
elif self.token == 'DEP':
X = [' '.join([d[3] for d in doc]) for doc in X]
elif self.token == 'word_POS':
if self.parts is None:
X = [' '.join([d[0]+'/'+d[2] for d in doc]) for doc in X]
elif self.token == 'filter':
if self.parts is None:
X = [' '.join([d[2] for d in doc]) for doc in X];
else:
X = [' '.join([d[0] for d in doc if d[2] in self.parts]) for doc in X]
else:
X = [' '.join([d[0] for d in doc]) for doc in X]
return np.array(X);
def fit(self, X, y=None):
self.is_fitted = True
return self
def fit_transform(self, X, y=None):
return self.transform(X=X, y=y) | _____no_output_____ | Apache-2.0 | 2019/PAN_AA_2018-POS-tag.ipynb | jeleandro/PANAA2018 |
analisando os demais parametros | def spaceTokenizer(x):
return x.split(" ");
def runML(problem):
print ("\nProblem: %s, language: %s, " %(problem['problem'],problem['language']), end=' ');
lang = problem['language'];
if lang == 'sp':
lang = 'es';
elif lang =='pl':
print(lang, ' not supported');
return None,None,None,None;
train_docs, train_labels, test_docs, test_filename = cachingPOSTAG(problem)
problem['training_docs_size'] = len(train_docs);
t0 = time()
pipeline = Pipeline([
('filter',FilterTagTransformer(token='TAG')),
('vect', CountVectorizer(
tokenizer=spaceTokenizer,
min_df=0.01,
lowercase=False
)),
('tfidf', TfidfTransformer()),
('scaler', MaxAbsScaler()),
('dense', DenseTransformer()),
('transf', PCA(0.999)),
('clf', LogisticRegression(random_state=0,multi_class='multinomial', solver='newton-cg')),
])
# uncommenting more parameters will give better exploring power but will
# increase processing time in a combinatorial way
parameters = {
'vect__ngram_range' :((1,1),(1,2),(1,3),(1,5)),
'tfidf__use_idf' :(True, False),
'tfidf__sublinear_tf':(True, False),
'tfidf__norm':('l1','l2'),
'clf__C':(0.1,1,10),
}
grid_search = GridSearchCV(pipeline,
parameters,
cv=4,
iid=False,
n_jobs=-1,
verbose=False,
scoring='f1_macro')
t0 = time()
grid_search.fit(train_docs, train_labels)
print("Gridsearh %0.3fs" % (time() - t0), end=' ')
print("Best score: %0.3f" % grid_search.best_score_)
print("Best parameters set:")
best_parameters = grid_search.best_estimator_.get_params()
for param_name in sorted(parameters.keys()):
print("\t%s: %r" % (param_name, best_parameters[param_name]))
train_pred=grid_search.predict(train_docs);
test_pred=grid_search.predict(test_docs);
# Writing output file
out_data=[]
for i,v in enumerate(test_pred):
out_data.append({'unknown-text': test_filename[i],'predicted-author': v})
answerFile = pathjoin(outputDir,'answers-'+problem['problem']+'.json');
with open(answerFile, 'w') as f:
json.dump(out_data, f, indent=4)
#calculating the performance using PAN evaluation code
f1,precision,recall,accuracy=pan.evaluate(
pathjoin(inputDir, problem['problem'], 'ground-truth.json'),
answerFile)
return {
'problem-name' : problem['problem'],
"language" : problem['language'],
'AuthorCount' : len(set(train_labels)),
'macro-f1' : round(f1,3),
'macro-precision': round(precision,3),
'macro-recall' : round(recall,3),
'micro-accuracy' : round(accuracy,3),
}, grid_search.cv_results_,best_parameters, grid_search.best_estimator_;
result = [];
cv_result = [];
best_parameters = [];
estimators = [];
for problem in problems:
with warnings.catch_warnings():
warnings.filterwarnings("ignore");
r, c, b, e = runML(problem);
if r is None:
continue;
result.append(r);
cv_result.append(c);
estimators.append(e);
b['problem'] = problem['problem'];
best_parameters.append(b);
df=pd.DataFrame(result)[['problem-name',
"language",
'AuthorCount',
'macro-f1','macro-precision','macro-recall' ,'micro-accuracy']]
df
df[['macro-f1']].mean()
languages={
'en':'inglesa',
'sp':'espanhola',
'it':'italiana',
'pl':'polonesa',
'fr':'francesa'
}
cv_result2 = [];
dfCV = pd.DataFrame();
for i, c in enumerate(cv_result):
temp = pd.DataFrame(c);
temp['language'] = result[i]['AuthorCount']
temp['problem'] = int(re.sub('\D','',result[i]['problem-name']));
temp['language'] = languages[result[i]['language']]
dfCV = dfCV.append(temp);
for p in [
'mean_test_score','std_test_score','mean_train_score',
'split0_test_score',
'split1_test_score',
'split2_test_score']:
dfCV[p]=dfCV[p].astype(np.float32);
dfCV =dfCV[[
'problem',
'language',
'rank_test_score',
'param_vect__ngram_range',
'param_tfidf__sublinear_tf',
'param_tfidf__norm',
'param_clf__C',
'mean_test_score',
'std_test_score',
'split0_test_score',
'split1_test_score',
'split2_test_score',
'mean_score_time',
'mean_fit_time',
'std_fit_time',
'std_score_time',
'std_train_score',
]];
dfCV.rename(columns={
'param_vect__ngram_range':'ngram_range',
'param_tfidf__sublinear_tf':'sublinear_tf',
'param_tfidf__smooth_idf':'smooth_idf',
'param_tfidf__norm':'norm',
'param_clf__C':'regularization',
},inplace=True);
#print('\',\n\''.join(dfCV.columns))
dfCV.head() | _____no_output_____ | Apache-2.0 | 2019/PAN_AA_2018-POS-tag.ipynb | jeleandro/PANAA2018 |
Saving the model | dfCV.to_csv('PANAA2018_POSTAG.csv', index=False)
dfCV = pd.read_csv('PANAA2018_POSTAG.csv', na_values='')
import pickle;
with open("PANAA2018_POSTAG.pkl","wb") as f:
pickle.dump(estimators,f) | _____no_output_____ | Apache-2.0 | 2019/PAN_AA_2018-POS-tag.ipynb | jeleandro/PANAA2018 |
understanding the model with reports Podemos ver que para um mesmo problema mais de uma configuração é possível | print(' | '.join(best_parameters[0]['vect'].get_feature_names()[0:20]))
(dfCV[dfCV.rank_test_score == 1]).drop_duplicates()[
['problem',
'language',
'mean_test_score',
'std_test_score',
'ngram_range',
'sublinear_tf',
'norm']
].sort_values(by=[
'problem',
'mean_test_score',
'std_test_score',
'ngram_range',
'sublinear_tf'
], ascending=[True, False,True,False,False])
dfCV.pivot_table(
index=['problem','language','norm','sublinear_tf'],
columns=[ 'ngram_range','regularization'],
values='mean_test_score'
) | _____no_output_____ | Apache-2.0 | 2019/PAN_AA_2018-POS-tag.ipynb | jeleandro/PANAA2018 |
O score retornado vem do conjunto de teste da validação cruzada e não do conjunto de testes | pd.options.display.precision = 3
print(u"\\begin{table}[h]\n\\centering\n\\caption{Medida F1 para os parâmetros }")
print(re.sub(r'[ ]{2,}',' ',dfCV.pivot_table(
index=['problem','language','sublinear_tf','norm'],
columns=['ngram_range'],
values='mean_test_score'
).to_latex()))
print ("\label{tab:modelocaracter}")
print(r"\end{table}")
d = dfCV.copy()
d = d.rename(columns={'language':u'Língua', 'sublinear_tf':'TF Sublinear'})
d = d [ d.norm.isna() == False]
d['autorNumber'] = d.problem.map(lambda x: 20 if x % 2==0 else 5)
d.problem = d.apply(lambda x: x[u'Língua'] +" "+ str(x[u'problem']), axis=1)
#d.ngram_range = d.apply(lambda x: str(x[u'ngram_range'][0]) +" "+ str(x[u'ngram_range'][1]), axis=1)
d.std_test_score =d.std_test_score / d.std_test_score.quantile(0.95) *500;
d.std_test_score +=1;
d.std_test_score = d.std_test_score.astype(np.int64)
g = sns.FacetGrid(d, col='Língua', hue='TF Sublinear', row="regularization", height=3,palette="Set1")
g.map(plt.scatter, "ngram_range", "mean_test_score",s=d.std_test_score.values).add_legend();
#sns.pairplot(d, hue="TF Sublinear", vars=["autorNumber", "mean_test_score"])
g = sns.FacetGrid(d, row='autorNumber', hue='TF Sublinear', col=u"Língua", height=3,palette="Set1")
g.map(plt.scatter, "ngram_range", "mean_test_score", alpha=0.5, s=d.std_test_score.values).add_legend();
sns.distplot(dfCV.std_test_score, bins=25);
import statsmodels.api as sm
d = dfCV[['mean_test_score','problem', 'language','sublinear_tf','norm','ngram_range']].copy();
d.sublinear_tf=d.sublinear_tf.apply(lambda x: 1 if x else 0)
d.norm=d.norm.apply(lambda x: 1 if x=='l1' else 0)
d['autorNumber'] = d.problem.map(lambda x: 20 if x % 2==0 else 5)
d.norm.fillna(value='None', inplace=True);
_, d['ngram_max'] = zip(*d.ngram_range.str.replace(r'[^\d,]','').str.split(',').values.tolist())
#d.ngram_min = d.ngram_min.astype(np.uint8);
d.ngram_max = d.ngram_max.astype(np.uint8);
d.drop(columns=['ngram_range','problem'], inplace=True)
#d['intercept'] = 1;
d=pd.get_dummies(d, columns=['language'])
d.describe()
mod = sm.OLS( d.iloc[:,0], d.iloc[:,1:])
res = mod.fit()
res.summary()
sns.distplot(res.predict()-d.iloc[:,0].values, bins=25)
sns.jointplot(x='F1',y='F1-estimated',data=pd.DataFrame({'F1':d.iloc[:,0].values, 'F1-estimated':res.predict()})); | _____no_output_____ | Apache-2.0 | 2019/PAN_AA_2018-POS-tag.ipynb | jeleandro/PANAA2018 |
tests | problem = problems[0]
print ("\nProblem: %s, language: %s, " %(problem['problem'],problem['language']), end=' ');
def d(estimator, n_features=5):
from IPython.display import Markdown, display, HTML
names = np.array(estimator.named_steps['vect'].get_feature_names());
classes_ = estimator.named_steps['clf'].classes_;
weights = estimator.named_steps['clf'].coef_;
def tag(tag, content, attrib=''):
if attrib != '':
attrib = ' style="' + attrib+'"';
return ''.join(['<',tag,attrib,' >',content,'</',tag,'>']);
def color(baseColor, intensity):
r,g,b = baseColor[0:2],baseColor[2:4],baseColor[4:6]
r,g,b = int(r, 16), int(g, 16), int(b, 16)
f= (1-np.abs(intensity))/2;
r = r + int((255-r)*f)
g = g + int((255-g)*f)
b = b + int((255-b)*f)
rgb = '#%02x%x%x' % (r, g, b);
#print(baseColor,rgb,r,g,b,intensity,f)
return rgb
spanStyle ='border-radius: 5px;margin:4px;padding:3px; color:#FFF !important;';
lines = '<table>'+tag('thead',tag('th','Classes')+tag('th','positive')+tag('th','negative'))
lines += '<tbody>'
for i,c in enumerate(weights):
c = np.round(c / np.abs(c).max(),2);
positive = names[np.argsort(-c)][:n_features];
positiveV = c[np.argsort(-c)][:n_features]
negative = names[np.argsort(c)][:n_features];
negativeV = c[np.argsort(c)][:n_features]
lines += tag('tr',
tag('td', re.sub('\D0*','',classes_[i]))
+ tag('td',''.join([tag('span',d.upper()+' '+str(v),spanStyle+'background:'+color('51A3DD',v)) for d,v in zip(positive,positiveV)]))
+ tag('td',''.join([tag('span',d.upper()+' '+str(v),spanStyle+'background:'+color('DD5555',v)) for d,v in zip(negative,negativeV)]))
)
lines+= '</tbody></table>'
display(HTML(lines))
#print(lines)
d(estimators[0])
%%HTML
<table><tbody><tr><th>POS</th><th>Description</th><th>Examples</th></tr><tr >
<td class="c-table__cell u-text"><code>ADJ</code></td><td class="c-table__cell u-text u-text-small">adjective</td><td class="c-table__cell u-text u-text-small"><em>big, old, green, incomprehensible, first</em></td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>ADP</code></td><td class="c-table__cell u-text u-text-small">adposition</td><td class="c-table__cell u-text u-text-small"><em>in, to, during</em></td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>ADV</code></td><td class="c-table__cell u-text u-text-small">adverb</td><td class="c-table__cell u-text u-text-small"><em>very, tomorrow, down, where, there</em></td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>AUX</code></td><td class="c-table__cell u-text u-text-small">auxiliary</td><td class="c-table__cell u-text u-text-small"><em>is, has (done), will (do), should (do)</em></td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>CONJ</code></td><td class="c-table__cell u-text u-text-small">conjunction</td><td class="c-table__cell u-text u-text-small"><em>and, or, but</em></td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>CCONJ</code></td><td class="c-table__cell u-text u-text-small">coordinating conjunction</td><td class="c-table__cell u-text u-text-small"><em>and, or, but</em></td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>DET</code></td><td class="c-table__cell u-text u-text-small">determiner</td><td class="c-table__cell u-text u-text-small"><em>a, an, the</em></td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>INTJ</code></td><td class="c-table__cell u-text u-text-small">interjection</td><td class="c-table__cell u-text u-text-small"><em>psst, ouch, bravo, hello</em></td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>NOUN</code></td><td class="c-table__cell u-text u-text-small">noun</td><td class="c-table__cell u-text u-text-small"><em>girl, cat, tree, air, beauty</em></td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>NUM</code></td><td class="c-table__cell u-text u-text-small">numeral</td><td class="c-table__cell u-text u-text-small"><em>1, 2017, one, seventy-seven, IV, MMXIV</em></td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>PART</code></td><td class="c-table__cell u-text u-text-small">particle</td><td class="c-table__cell u-text u-text-small"><em>'s, not, </em></td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>PRON</code></td><td class="c-table__cell u-text u-text-small">pronoun</td><td class="c-table__cell u-text u-text-small"><em>I, you, he, she, myself, themselves, somebody</em></td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>PROPN</code></td><td class="c-table__cell u-text u-text-small">proper noun</td><td class="c-table__cell u-text u-text-small"><em>Mary, John, London, NATO, HBO</em></td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>PUNCT</code></td><td class="c-table__cell u-text u-text-small">punctuation</td><td class="c-table__cell u-text u-text-small"><em>., (, ), ?</em></td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>SCONJ</code></td><td class="c-table__cell u-text u-text-small">subordinating conjunction</td><td class="c-table__cell u-text u-text-small"><em>if, while, that</em></td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>SYM</code></td><td class="c-table__cell u-text u-text-small">symbol</td><td class="c-table__cell u-text u-text-small"><em>$, %, §, ©, +, −, ×, ÷, =, :), 😝</em></td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>VERB</code></td><td class="c-table__cell u-text u-text-small">verb</td><td class="c-table__cell u-text u-text-small"><em>run, runs, running, eat, ate, eating</em></td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>X</code></td><td class="c-table__cell u-text u-text-small">other</td><td class="c-table__cell u-text u-text-small"><em>sfpksdpsxmsa</em></td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>SPACE</code></td><td class="c-table__cell u-text u-text-small">space</td></tr></tbody></table>
%%HTML
<h1>English</h1>
<table class="c-table o-block"><tbody><tr class="c-table__row c-table__row--head"><th class="c-table__head-cell u-text-label">Tag</th><th class="c-table__head-cell u-text-label">POS</th><th class="c-table__head-cell u-text-label">Morphology</th><th class="c-table__head-cell u-text-label">Description</th></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>-LRB-</code></td><td class="c-table__cell u-text"><code>PUNCT</code></td><td class="c-table__cell u-text"> <code>PunctType=brck</code> <code>PunctSide=ini</code></td><td class="c-table__cell u-text u-text-small">left round bracket</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>-RRB-</code></td><td class="c-table__cell u-text"><code>PUNCT</code></td><td class="c-table__cell u-text"> <code>PunctType=brck</code> <code>PunctSide=fin</code></td><td class="c-table__cell u-text u-text-small">right round bracket</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>,</code></td><td class="c-table__cell u-text"><code>PUNCT</code></td><td class="c-table__cell u-text"> <code>PunctType=comm</code></td><td class="c-table__cell u-text u-text-small">punctuation mark, comma</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>:</code></td><td class="c-table__cell u-text"><code>PUNCT</code></td><td class="c-table__cell u-text"></td><td class="c-table__cell u-text u-text-small">punctuation mark, colon or ellipsis</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>.</code></td><td class="c-table__cell u-text"><code>PUNCT</code></td><td class="c-table__cell u-text"> <code>PunctType=peri</code></td><td class="c-table__cell u-text u-text-small">punctuation mark, sentence closer</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>''</code></td><td class="c-table__cell u-text"><code>PUNCT</code></td><td class="c-table__cell u-text"> <code>PunctType=quot</code> <code>PunctSide=fin</code></td><td class="c-table__cell u-text u-text-small">closing quotation mark</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>""</code></td><td class="c-table__cell u-text"><code>PUNCT</code></td><td class="c-table__cell u-text"> <code>PunctType=quot</code> <code>PunctSide=fin</code></td><td class="c-table__cell u-text u-text-small">closing quotation mark</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>#</code></td><td class="c-table__cell u-text"><code>SYM</code></td><td class="c-table__cell u-text"> <code>SymType=numbersign</code></td><td class="c-table__cell u-text u-text-small">symbol, number sign</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>``</code></td><td class="c-table__cell u-text"><code>PUNCT</code></td><td class="c-table__cell u-text"> <code>PunctType=quot</code> <code>PunctSide=ini</code></td><td class="c-table__cell u-text u-text-small">opening quotation mark</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>$</code></td><td class="c-table__cell u-text"><code>SYM</code></td><td class="c-table__cell u-text"> <code>SymType=currency</code></td><td class="c-table__cell u-text u-text-small">symbol, currency</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>ADD</code></td><td class="c-table__cell u-text"><code>X</code></td><td class="c-table__cell u-text"></td><td class="c-table__cell u-text u-text-small">email</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>AFX</code></td><td class="c-table__cell u-text"><code>ADJ</code></td><td class="c-table__cell u-text"> <code>Hyph=yes</code></td><td class="c-table__cell u-text u-text-small">affix</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>BES</code></td><td class="c-table__cell u-text"><code>VERB</code></td><td class="c-table__cell u-text"></td><td class="c-table__cell u-text u-text-small">auxiliary "be"</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>CC</code></td><td class="c-table__cell u-text"><code>CONJ</code></td><td class="c-table__cell u-text"> <code>ConjType=coor</code></td><td class="c-table__cell u-text u-text-small">conjunction, coordinating</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>CD</code></td><td class="c-table__cell u-text"><code>NUM</code></td><td class="c-table__cell u-text"> <code>NumType=card</code></td><td class="c-table__cell u-text u-text-small">cardinal number</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>DT</code></td><td class="c-table__cell u-text"><code>DET</code></td><td class="c-table__cell u-text"> <code>determiner</code></td><td class="c-table__cell u-text u-text-small"></td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>EX</code></td><td class="c-table__cell u-text"><code>ADV</code></td><td class="c-table__cell u-text"> <code>AdvType=ex</code></td><td class="c-table__cell u-text u-text-small">existential there</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>FW</code></td><td class="c-table__cell u-text"><code>X</code></td><td class="c-table__cell u-text"> <code>Foreign=yes</code></td><td class="c-table__cell u-text u-text-small">foreign word</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>GW</code></td><td class="c-table__cell u-text"><code>X</code></td><td class="c-table__cell u-text"></td><td class="c-table__cell u-text u-text-small">additional word in multi-word expression</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>HVS</code></td><td class="c-table__cell u-text"><code>VERB</code></td><td class="c-table__cell u-text"></td><td class="c-table__cell u-text u-text-small">forms of "have"</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>HYPH</code></td><td class="c-table__cell u-text"><code>PUNCT</code></td><td class="c-table__cell u-text"> <code>PunctType=dash</code></td><td class="c-table__cell u-text u-text-small">punctuation mark, hyphen</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>IN</code></td><td class="c-table__cell u-text"><code>ADP</code></td><td class="c-table__cell u-text"></td><td class="c-table__cell u-text u-text-small">conjunction, subordinating or preposition</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>JJ</code></td><td class="c-table__cell u-text"><code>ADJ</code></td><td class="c-table__cell u-text"> <code>Degree=pos</code></td><td class="c-table__cell u-text u-text-small">adjective</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>JJR</code></td><td class="c-table__cell u-text"><code>ADJ</code></td><td class="c-table__cell u-text"> <code>Degree=comp</code></td><td class="c-table__cell u-text u-text-small">adjective, comparative</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>JJS</code></td><td class="c-table__cell u-text"><code>ADJ</code></td><td class="c-table__cell u-text"> <code>Degree=sup</code></td><td class="c-table__cell u-text u-text-small">adjective, superlative</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>LS</code></td><td class="c-table__cell u-text"><code>PUNCT</code></td><td class="c-table__cell u-text"> <code>NumType=ord</code></td><td class="c-table__cell u-text u-text-small">list item marker</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>MD</code></td><td class="c-table__cell u-text"><code>VERB</code></td><td class="c-table__cell u-text"> <code>VerbType=mod</code></td><td class="c-table__cell u-text u-text-small">verb, modal auxiliary</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>NFP</code></td><td class="c-table__cell u-text"><code>PUNCT</code></td><td class="c-table__cell u-text"></td><td class="c-table__cell u-text u-text-small">superfluous punctuation</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>NIL</code></td><td class="c-table__cell u-text"><code></code></td><td class="c-table__cell u-text"></td><td class="c-table__cell u-text u-text-small">missing tag</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>NN</code></td><td class="c-table__cell u-text"><code>NOUN</code></td><td class="c-table__cell u-text"> <code>Number=sing</code></td><td class="c-table__cell u-text u-text-small">noun, singular or mass</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>NNP</code></td><td class="c-table__cell u-text"><code>PROPN</code></td><td class="c-table__cell u-text"> <code>NounType=prop</code> <code>Number=sign</code></td><td class="c-table__cell u-text u-text-small">noun, proper singular</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>NNPS</code></td><td class="c-table__cell u-text"><code>PROPN</code></td><td class="c-table__cell u-text"> <code>NounType=prop</code> <code>Number=plur</code></td><td class="c-table__cell u-text u-text-small">noun, proper plural</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>NNS</code></td><td class="c-table__cell u-text"><code>NOUN</code></td><td class="c-table__cell u-text"> <code>Number=plur</code></td><td class="c-table__cell u-text u-text-small">noun, plural</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>PDT</code></td><td class="c-table__cell u-text"><code>ADJ</code></td><td class="c-table__cell u-text"> <code>AdjType=pdt</code> <code>PronType=prn</code></td><td class="c-table__cell u-text u-text-small">predeterminer</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>POS</code></td><td class="c-table__cell u-text"><code>PART</code></td><td class="c-table__cell u-text"> <code>Poss=yes</code></td><td class="c-table__cell u-text u-text-small">possessive ending</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>PRP</code></td><td class="c-table__cell u-text"><code>PRON</code></td><td class="c-table__cell u-text"> <code>PronType=prs</code></td><td class="c-table__cell u-text u-text-small">pronoun, personal</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>PRP$</code></td><td class="c-table__cell u-text"><code>ADJ</code></td><td class="c-table__cell u-text"> <code>PronType=prs</code> <code>Poss=yes</code></td><td class="c-table__cell u-text u-text-small">pronoun, possessive</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>RB</code></td><td class="c-table__cell u-text"><code>ADV</code></td><td class="c-table__cell u-text"> <code>Degree=pos</code></td><td class="c-table__cell u-text u-text-small">adverb</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>RBR</code></td><td class="c-table__cell u-text"><code>ADV</code></td><td class="c-table__cell u-text"> <code>Degree=comp</code></td><td class="c-table__cell u-text u-text-small">adverb, comparative</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>RBS</code></td><td class="c-table__cell u-text"><code>ADV</code></td><td class="c-table__cell u-text"> <code>Degree=sup</code></td><td class="c-table__cell u-text u-text-small">adverb, superlative</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>RP</code></td><td class="c-table__cell u-text"><code>PART</code></td><td class="c-table__cell u-text"></td><td class="c-table__cell u-text u-text-small">adverb, particle</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>_SP</code></td><td class="c-table__cell u-text"><code>SPACE</code></td><td class="c-table__cell u-text"></td><td class="c-table__cell u-text u-text-small">space</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>SYM</code></td><td class="c-table__cell u-text"><code>SYM</code></td><td class="c-table__cell u-text"></td><td class="c-table__cell u-text u-text-small">symbol</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>TO</code></td><td class="c-table__cell u-text"><code>PART</code></td><td class="c-table__cell u-text"> <code>PartType=inf</code> <code>VerbForm=inf</code></td><td class="c-table__cell u-text u-text-small">infinitival to</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>UH</code></td><td class="c-table__cell u-text"><code>INTJ</code></td><td class="c-table__cell u-text"></td><td class="c-table__cell u-text u-text-small">interjection</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>VB</code></td><td class="c-table__cell u-text"><code>VERB</code></td><td class="c-table__cell u-text"> <code>VerbForm=inf</code></td><td class="c-table__cell u-text u-text-small">verb, base form</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>VBD</code></td><td class="c-table__cell u-text"><code>VERB</code></td><td class="c-table__cell u-text"> <code>VerbForm=fin</code> <code>Tense=past</code></td><td class="c-table__cell u-text u-text-small">verb, past tense</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>VBG</code></td><td class="c-table__cell u-text"><code>VERB</code></td><td class="c-table__cell u-text"> <code>VerbForm=part</code> <code>Tense=pres</code> <code>Aspect=prog</code></td><td class="c-table__cell u-text u-text-small">verb, gerund or present participle</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>VBN</code></td><td class="c-table__cell u-text"><code>VERB</code></td><td class="c-table__cell u-text"> <code>VerbForm=part</code> <code>Tense=past</code> <code>Aspect=perf</code></td><td class="c-table__cell u-text u-text-small">verb, past participle</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>VBP</code></td><td class="c-table__cell u-text"><code>VERB</code></td><td class="c-table__cell u-text"> <code>VerbForm=fin</code> <code>Tense=pres</code></td><td class="c-table__cell u-text u-text-small">verb, non-3rd person singular present</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>VBZ</code></td><td class="c-table__cell u-text"><code>VERB</code></td><td class="c-table__cell u-text"> <code>VerbForm=fin</code> <code>Tense=pres</code> <code>Number=sing</code> <code>Person=3</code></td><td class="c-table__cell u-text u-text-small">verb, 3rd person singular present</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>WDT</code></td><td class="c-table__cell u-text"><code>ADJ</code></td><td class="c-table__cell u-text"> <code>PronType=int</code> <code>rel</code></td><td class="c-table__cell u-text u-text-small">wh-determiner</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>WP</code></td><td class="c-table__cell u-text"><code>NOUN</code></td><td class="c-table__cell u-text"> <code>PronType=int</code> <code>rel</code></td><td class="c-table__cell u-text u-text-small">wh-pronoun, personal</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>WP$</code></td><td class="c-table__cell u-text"><code>ADJ</code></td><td class="c-table__cell u-text"> <code>Poss=yes PronType=int</code> <code>rel</code></td><td class="c-table__cell u-text u-text-small">wh-pronoun, possessive</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>WRB</code></td><td class="c-table__cell u-text"><code>ADV</code></td><td class="c-table__cell u-text"> <code>PronType=int</code> <code>rel</code></td><td class="c-table__cell u-text u-text-small">wh-adverb</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>XX</code></td><td class="c-table__cell u-text"><code>X</code></td><td class="c-table__cell u-text"></td><td class="c-table__cell u-text u-text-small">unknown</td></tr></tbody></table>
%%HTML
<h1>German</h1>
<p> The German part-of-speech tagger uses the <a href="http://www.ims.uni-stuttgart.de/forschung/ressourcen/korpora/TIGERCorpus/annotation/index.html" target="_blank" rel="noopener nofollow">TIGER Treebank</a> annotation scheme. We also map the tags to the simpler Google
Universal POS tag set.</p>
<table class="c-table o-block"><tbody><tr class="c-table__row c-table__row--head"><th class="c-table__head-cell u-text-label">Tag</th><th class="c-table__head-cell u-text-label">POS</th><th class="c-table__head-cell u-text-label">Morphology</th><th class="c-table__head-cell u-text-label">Description</th></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>$(</code></td><td class="c-table__cell u-text"><code>PUNCT</code></td><td class="c-table__cell u-text"> <code>PunctType=brck</code></td><td class="c-table__cell u-text u-text-small">other sentence-internal punctuation mark</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>$,</code></td><td class="c-table__cell u-text"><code>PUNCT</code></td><td class="c-table__cell u-text"> <code>PunctType=comm</code></td><td class="c-table__cell u-text u-text-small">comma</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>$.</code></td><td class="c-table__cell u-text"><code>PUNCT</code></td><td class="c-table__cell u-text"> <code>PunctType=peri</code></td><td class="c-table__cell u-text u-text-small">sentence-final punctuation mark</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>ADJA</code></td><td class="c-table__cell u-text"><code>ADJ</code></td><td class="c-table__cell u-text"></td><td class="c-table__cell u-text u-text-small">adjective, attributive</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>ADJD</code></td><td class="c-table__cell u-text"><code>ADJ</code></td><td class="c-table__cell u-text"> <code>Variant=short</code></td><td class="c-table__cell u-text u-text-small">adjective, adverbial or predicative</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>ADV</code></td><td class="c-table__cell u-text"><code>ADV</code></td><td class="c-table__cell u-text"></td><td class="c-table__cell u-text u-text-small">adverb</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>APPO</code></td><td class="c-table__cell u-text"><code>ADP</code></td><td class="c-table__cell u-text"> <code>AdpType=post</code></td><td class="c-table__cell u-text u-text-small">postposition</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>APPR</code></td><td class="c-table__cell u-text"><code>ADP</code></td><td class="c-table__cell u-text"> <code>AdpType=prep</code></td><td class="c-table__cell u-text u-text-small">preposition; circumposition left</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>APPRART</code></td><td class="c-table__cell u-text"><code>ADP</code></td><td class="c-table__cell u-text"> <code>AdpType=prep</code> <code>PronType=art</code></td><td class="c-table__cell u-text u-text-small">preposition with article</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>APZR</code></td><td class="c-table__cell u-text"><code>ADP</code></td><td class="c-table__cell u-text"> <code>AdpType=circ</code></td><td class="c-table__cell u-text u-text-small">circumposition right</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>ART</code></td><td class="c-table__cell u-text"><code>DET</code></td><td class="c-table__cell u-text"> <code>PronType=art</code></td><td class="c-table__cell u-text u-text-small">definite or indefinite article</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>CARD</code></td><td class="c-table__cell u-text"><code>NUM</code></td><td class="c-table__cell u-text"> <code>NumType=card</code></td><td class="c-table__cell u-text u-text-small">cardinal number</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>FM</code></td><td class="c-table__cell u-text"><code>X</code></td><td class="c-table__cell u-text"> <code>Foreign=yes</code></td><td class="c-table__cell u-text u-text-small">foreign language material</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>ITJ</code></td><td class="c-table__cell u-text"><code>INTJ</code></td><td class="c-table__cell u-text"></td><td class="c-table__cell u-text u-text-small">interjection</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>KOKOM</code></td><td class="c-table__cell u-text"><code>CONJ</code></td><td class="c-table__cell u-text"> <code>ConjType=comp</code></td><td class="c-table__cell u-text u-text-small">comparative conjunction</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>KON</code></td><td class="c-table__cell u-text"><code>CONJ</code></td><td class="c-table__cell u-text"></td><td class="c-table__cell u-text u-text-small">coordinate conjunction</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>KOUI</code></td><td class="c-table__cell u-text"><code>SCONJ</code></td><td class="c-table__cell u-text"></td><td class="c-table__cell u-text u-text-small">subordinate conjunction with "zu" and infinitive</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>KOUS</code></td><td class="c-table__cell u-text"><code>SCONJ</code></td><td class="c-table__cell u-text"></td><td class="c-table__cell u-text u-text-small">subordinate conjunction with sentence</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>NE</code></td><td class="c-table__cell u-text"><code>PROPN</code></td><td class="c-table__cell u-text"></td><td class="c-table__cell u-text u-text-small">proper noun</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>NNE</code></td><td class="c-table__cell u-text"><code>PROPN</code></td><td class="c-table__cell u-text"></td><td class="c-table__cell u-text u-text-small">proper noun</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>NN</code></td><td class="c-table__cell u-text"><code>NOUN</code></td><td class="c-table__cell u-text"></td><td class="c-table__cell u-text u-text-small">noun, singular or mass</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>PAV</code></td><td class="c-table__cell u-text"><code>ADV</code></td><td class="c-table__cell u-text"> <code>PronType=dem</code></td><td class="c-table__cell u-text u-text-small">pronominal adverb</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>PROAV</code></td><td class="c-table__cell u-text"><code>ADV</code></td><td class="c-table__cell u-text"> <code>PronType=dem</code></td><td class="c-table__cell u-text u-text-small">pronominal adverb</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>PDAT</code></td><td class="c-table__cell u-text"><code>DET</code></td><td class="c-table__cell u-text"> <code>PronType=dem</code></td><td class="c-table__cell u-text u-text-small">attributive demonstrative pronoun</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>PDS</code></td><td class="c-table__cell u-text"><code>PRON</code></td><td class="c-table__cell u-text"> <code>PronType=dem</code></td><td class="c-table__cell u-text u-text-small">substituting demonstrative pronoun</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>PIAT</code></td><td class="c-table__cell u-text"><code>DET</code></td><td class="c-table__cell u-text"> <code>PronType=ind</code> <code>neg</code> <code>tot</code></td><td class="c-table__cell u-text u-text-small">attributive indefinite pronoun without determiner</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>PIDAT</code></td><td class="c-table__cell u-text"><code>DET</code></td><td class="c-table__cell u-text"> <code>AdjType=pdt PronType=ind</code> <code>neg</code> <code>tot</code></td><td class="c-table__cell u-text u-text-small">attributive indefinite pronoun with determiner</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>PIS</code></td><td class="c-table__cell u-text"><code>PRON</code></td><td class="c-table__cell u-text"> <code>PronType=ind</code> <code>neg</code> <code>tot</code></td><td class="c-table__cell u-text u-text-small">substituting indefinite pronoun</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>PPER</code></td><td class="c-table__cell u-text"><code>PRON</code></td><td class="c-table__cell u-text"> <code>PronType=prs</code></td><td class="c-table__cell u-text u-text-small">non-reflexive personal pronoun</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>PPOSAT</code></td><td class="c-table__cell u-text"><code>DET</code></td><td class="c-table__cell u-text"> <code>Poss=yes</code> <code>PronType=prs</code></td><td class="c-table__cell u-text u-text-small">attributive possessive pronoun</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>PPOSS</code></td><td class="c-table__cell u-text"><code>PRON</code></td><td class="c-table__cell u-text"> <code>PronType=rel</code></td><td class="c-table__cell u-text u-text-small">substituting possessive pronoun</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>PRELAT</code></td><td class="c-table__cell u-text"><code>DET</code></td><td class="c-table__cell u-text"> <code>PronType=rel</code></td><td class="c-table__cell u-text u-text-small">attributive relative pronoun</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>PRELS</code></td><td class="c-table__cell u-text"><code>PRON</code></td><td class="c-table__cell u-text"> <code>PronType=rel</code></td><td class="c-table__cell u-text u-text-small">substituting relative pronoun</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>PRF</code></td><td class="c-table__cell u-text"><code>PRON</code></td><td class="c-table__cell u-text"> <code>PronType=prs</code> <code>Reflex=yes</code></td><td class="c-table__cell u-text u-text-small">reflexive personal pronoun</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>PTKA</code></td><td class="c-table__cell u-text"><code>PART</code></td><td class="c-table__cell u-text"></td><td class="c-table__cell u-text u-text-small">particle with adjective or adverb</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>PTKANT</code></td><td class="c-table__cell u-text"><code>PART</code></td><td class="c-table__cell u-text"> <code>PartType=res</code></td><td class="c-table__cell u-text u-text-small">answer particle</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>PTKNEG</code></td><td class="c-table__cell u-text"><code>PART</code></td><td class="c-table__cell u-text"> <code>Negative=yes</code></td><td class="c-table__cell u-text u-text-small">negative particle</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>PTKVZ</code></td><td class="c-table__cell u-text"><code>PART</code></td><td class="c-table__cell u-text"> <code>PartType=vbp</code></td><td class="c-table__cell u-text u-text-small">separable verbal particle</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>PTKZU</code></td><td class="c-table__cell u-text"><code>PART</code></td><td class="c-table__cell u-text"> <code>PartType=inf</code></td><td class="c-table__cell u-text u-text-small">"zu" before infinitive</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>PWAT</code></td><td class="c-table__cell u-text"><code>DET</code></td><td class="c-table__cell u-text"> <code>PronType=int</code></td><td class="c-table__cell u-text u-text-small">attributive interrogative pronoun</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>PWAV</code></td><td class="c-table__cell u-text"><code>ADV</code></td><td class="c-table__cell u-text"> <code>PronType=int</code></td><td class="c-table__cell u-text u-text-small">adverbial interrogative or relative pronoun</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>PWS</code></td><td class="c-table__cell u-text"><code>PRON</code></td><td class="c-table__cell u-text"> <code>PronType=int</code></td><td class="c-table__cell u-text u-text-small">substituting interrogative pronoun</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>TRUNC</code></td><td class="c-table__cell u-text"><code>X</code></td><td class="c-table__cell u-text"> <code>Hyph=yes</code></td><td class="c-table__cell u-text u-text-small">word remnant</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>VAFIN</code></td><td class="c-table__cell u-text"><code>AUX</code></td><td class="c-table__cell u-text"> <code>Mood=ind</code> <code>VerbForm=fin</code></td><td class="c-table__cell u-text u-text-small">finite verb, auxiliary</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>VAIMP</code></td><td class="c-table__cell u-text"><code>AUX</code></td><td class="c-table__cell u-text"> <code>Mood=imp</code> <code>VerbForm=fin</code></td><td class="c-table__cell u-text u-text-small">imperative, auxiliary</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>VAINF</code></td><td class="c-table__cell u-text"><code>AUX</code></td><td class="c-table__cell u-text"> <code>VerbForm=inf</code></td><td class="c-table__cell u-text u-text-small">infinitive, auxiliary</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>VAPP</code></td><td class="c-table__cell u-text"><code>AUX</code></td><td class="c-table__cell u-text"> <code>Aspect=perf</code> <code>VerbForm=fin</code></td><td class="c-table__cell u-text u-text-small">perfect participle, auxiliary</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>VMFIN</code></td><td class="c-table__cell u-text"><code>VERB</code></td><td class="c-table__cell u-text"> <code>Mood=ind</code> <code>VerbForm=fin</code> <code>VerbType=mod</code></td><td class="c-table__cell u-text u-text-small">finite verb, modal</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>VMINF</code></td><td class="c-table__cell u-text"><code>VERB</code></td><td class="c-table__cell u-text"> <code>VerbForm=fin</code> <code>VerbType=mod</code></td><td class="c-table__cell u-text u-text-small">infinitive, modal</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>VMPP</code></td><td class="c-table__cell u-text"><code>VERB</code></td><td class="c-table__cell u-text"> <code>Aspect=perf</code> <code>VerbForm=part</code> <code>VerbType=mod</code></td><td class="c-table__cell u-text u-text-small">perfect participle, modal</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>VVFIN</code></td><td class="c-table__cell u-text"><code>VERB</code></td><td class="c-table__cell u-text"> <code>Mood=ind</code> <code>VerbForm=fin</code></td><td class="c-table__cell u-text u-text-small">finite verb, full</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>VVIMP</code></td><td class="c-table__cell u-text"><code>VERB</code></td><td class="c-table__cell u-text"> <code>Mood=imp</code> <code>VerbForm=fin</code></td><td class="c-table__cell u-text u-text-small">imperative, full</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>VVINF</code></td><td class="c-table__cell u-text"><code>VERB</code></td><td class="c-table__cell u-text"> <code>VerbForm=inf</code></td><td class="c-table__cell u-text u-text-small">infinitive, full</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>VVIZU</code></td><td class="c-table__cell u-text"><code>VERB</code></td><td class="c-table__cell u-text"> <code>VerbForm=inf</code></td><td class="c-table__cell u-text u-text-small">infinitive with "zu", full</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>VVPP</code></td><td class="c-table__cell u-text"><code>VERB</code></td><td class="c-table__cell u-text"> <code>Aspect=perf</code> <code>VerbForm=part</code></td><td class="c-table__cell u-text u-text-small">perfect participle, full</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>XY</code></td><td class="c-table__cell u-text"><code>X</code></td><td class="c-table__cell u-text"></td><td class="c-table__cell u-text u-text-small">non-word containing non-letter</td></tr><tr class="c-table__row"><td class="c-table__cell u-text"><code>SP</code></td><td class="c-table__cell u-text"><code>SPACE</code></td><td class="c-table__cell u-text"></td><td class="c-table__cell u-text u-text-small">space</td></tr></tbody></table> | _____no_output_____ | Apache-2.0 | 2019/PAN_AA_2018-POS-tag.ipynb | jeleandro/PANAA2018 |
 Spark NLP Quick Start How to use Spark NLP pretrained pipelines [](https://colab.research.google.com/github/JohnSnowLabs/spark-nlp-workshop/blob/master/jupyter/quick_start_google_colab.ipynb) We will first set up the runtime environment and then load pretrained Entity Recognition model and Sentiment analysis model and give it a quick test. Feel free to test the models on your own sentences / datasets. | !wget http://setup.johnsnowlabs.com/colab.sh -O - | bash
import sparknlp
spark = sparknlp.start()
print("Spark NLP version: {}".format(sparknlp.version()))
print("Apache Spark version: {}".format(spark.version))
from sparknlp.pretrained import PretrainedPipeline | _____no_output_____ | Apache-2.0 | jupyter/spark_nlp_model.ipynb | akashmavle5/--akash |
Let's use Spark NLP pre-trained pipeline for `named entity recognition` | pipeline = PretrainedPipeline('recognize_entities_dl', 'en')
result = pipeline.annotate('President Biden represented Delaware for 36 years in the U.S. Senate before becoming the 47th Vice President of the United States.')
print(result['ner'])
print(result['entities']) | ['O', 'B-PER', 'O', 'B-LOC', 'O', 'O', 'O', 'O', 'O', 'B-LOC', 'O', 'B-ORG', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B-LOC', 'I-LOC', 'O']
['Biden', 'Delaware', 'U.S', 'Senate', 'United States']
| Apache-2.0 | jupyter/spark_nlp_model.ipynb | akashmavle5/--akash |
Let's try another Spark NLP pre-trained pipeline for `named entity recognition` | pipeline = PretrainedPipeline('onto_recognize_entities_bert_tiny', 'en')
result = pipeline.annotate("Johnson first entered politics when elected in 2001 as a member of Parliament. He then served eight years as the mayor of London, from 2008 to 2016, before rejoining Parliament.")
print(result['ner'])
print(result['entities']) | onto_recognize_entities_bert_tiny download started this may take some time.
Approx size to download 30.2 MB
[OK!]
['B-PERSON', 'B-ORDINAL', 'O', 'O', 'O', 'O', 'O', 'B-DATE', 'O', 'O', 'O', 'O', 'B-ORG', 'O', 'O', 'O', 'B-DATE', 'I-DATE', 'O', 'O', 'O', 'O', 'B-GPE', 'O', 'B-DATE', 'O', 'B-DATE', 'O', 'O', 'O', 'B-ORG']
['Johnson', 'first', '2001', 'Parliament.', 'eight years', 'London,', '2008', '2016', 'Parliament.']
| Apache-2.0 | jupyter/spark_nlp_model.ipynb | akashmavle5/--akash |
Let's use Spark NLP pre-trained pipeline for `sentiment` analysis | pipeline = PretrainedPipeline('analyze_sentimentdl_glove_imdb', 'en')
result = pipeline.annotate("Harry Potter is a great movie.")
print(result['sentiment']) | ['pos']
| Apache-2.0 | jupyter/spark_nlp_model.ipynb | akashmavle5/--akash |
Please check our [Models Hub](https://nlp.johnsnowlabs.com/models) for more pretrained models and pipelines! 😊 | _____no_output_____ | Apache-2.0 | jupyter/spark_nlp_model.ipynb | akashmavle5/--akash | |
电影评论文本分类 此笔记本(notebook)使用评论文本将影评分为*积极(positive)*或*消极(nagetive)*两类。这是一个*二元(binary)*或者二分类问题,一种重要且应用广泛的机器学习问题。我们将使用来源于[网络电影数据库(Internet Movie Database)](https://www.imdb.com/)的 [IMDB 数据集(IMDB dataset)](https://tensorflow.google.cn/api_docs/python/tf/keras/datasets/imdb),其包含 50,000 条影评文本。从该数据集切割出的25,000条评论用作训练,另外 25,000 条用作测试。训练集与测试集是*平衡的(balanced)*,意味着它们包含相等数量的积极和消极评论。此笔记本(notebook)使用了 [tf.keras](https://tensorflow.google.cn/guide/keras),它是一个 Tensorflow 中用于构建和训练模型的高级API。有关使用 `tf.keras` 进行文本分类的更高级教程,请参阅 [MLCC文本分类指南(MLCC Text Classification Guide)](https://developers.google.com/machine-learning/guides/text-classification/)。 | from __future__ import absolute_import, division, print_function, unicode_literals
try:
# Colab only
%tensorflow_version 2.x
except Exception:
pass
import tensorflow as tf
from tensorflow import keras
import numpy as np
print(tf.__version__) | 2.0.0
| Apache-2.0 | chapter2/2.3.2-text_classification.ipynb | wangxingda/Tensorflow-Handbook |
下载 IMDB 数据集IMDB 数据集已经打包在 Tensorflow 中。该数据集已经经过预处理,评论(单词序列)已经被转换为整数序列,其中每个整数表示字典中的特定单词。以下代码将下载 IMDB 数据集到您的机器上(如果您已经下载过将从缓存中复制): | imdb = keras.datasets.imdb
(train_data, train_labels), (test_data, test_labels) = imdb.load_data(num_words=10000) | _____no_output_____ | Apache-2.0 | chapter2/2.3.2-text_classification.ipynb | wangxingda/Tensorflow-Handbook |
参数 `num_words=10000` 保留了训练数据中最常出现的 10,000 个单词。为了保持数据规模的可管理性,低频词将被丢弃。 探索数据让我们花一点时间来了解数据格式。该数据集是经过预处理的:每个样本都是一个表示影评中词汇的整数数组。每个标签都是一个值为 0 或 1 的整数值,其中 0 代表消极评论,1 代表积极评论。 | print("Training entries: {}, labels: {}".format(len(train_data), len(train_labels))) | Training entries: 25000, labels: 25000
| Apache-2.0 | chapter2/2.3.2-text_classification.ipynb | wangxingda/Tensorflow-Handbook |
评论文本被转换为整数值,其中每个整数代表词典中的一个单词。首条评论是这样的: | print(train_data[0]) | [1, 14, 22, 16, 43, 530, 973, 1622, 1385, 65, 458, 4468, 66, 3941, 4, 173, 36, 256, 5, 25, 100, 43, 838, 112, 50, 670, 2, 9, 35, 480, 284, 5, 150, 4, 172, 112, 167, 2, 336, 385, 39, 4, 172, 4536, 1111, 17, 546, 38, 13, 447, 4, 192, 50, 16, 6, 147, 2025, 19, 14, 22, 4, 1920, 4613, 469, 4, 22, 71, 87, 12, 16, 43, 530, 38, 76, 15, 13, 1247, 4, 22, 17, 515, 17, 12, 16, 626, 18, 2, 5, 62, 386, 12, 8, 316, 8, 106, 5, 4, 2223, 5244, 16, 480, 66, 3785, 33, 4, 130, 12, 16, 38, 619, 5, 25, 124, 51, 36, 135, 48, 25, 1415, 33, 6, 22, 12, 215, 28, 77, 52, 5, 14, 407, 16, 82, 2, 8, 4, 107, 117, 5952, 15, 256, 4, 2, 7, 3766, 5, 723, 36, 71, 43, 530, 476, 26, 400, 317, 46, 7, 4, 2, 1029, 13, 104, 88, 4, 381, 15, 297, 98, 32, 2071, 56, 26, 141, 6, 194, 7486, 18, 4, 226, 22, 21, 134, 476, 26, 480, 5, 144, 30, 5535, 18, 51, 36, 28, 224, 92, 25, 104, 4, 226, 65, 16, 38, 1334, 88, 12, 16, 283, 5, 16, 4472, 113, 103, 32, 15, 16, 5345, 19, 178, 32]
| Apache-2.0 | chapter2/2.3.2-text_classification.ipynb | wangxingda/Tensorflow-Handbook |
电影评论可能具有不同的长度。以下代码显示了第一条和第二条评论的中单词数量。由于神经网络的输入必须是统一的长度,我们稍后需要解决这个问题。 | len(train_data[0]), len(train_data[1]) | _____no_output_____ | Apache-2.0 | chapter2/2.3.2-text_classification.ipynb | wangxingda/Tensorflow-Handbook |
将整数转换回单词了解如何将整数转换回文本对您可能是有帮助的。这里我们将创建一个辅助函数来查询一个包含了整数到字符串映射的字典对象: | # 一个映射单词到整数索引的词典
word_index = imdb.get_word_index()
# 保留第一个索引
word_index = {k:(v+3) for k,v in word_index.items()}
word_index["<PAD>"] = 0
word_index["<START>"] = 1
word_index["<UNK>"] = 2 # unknown
word_index["<UNUSED>"] = 3
reverse_word_index = dict([(value, key) for (key, value) in word_index.items()])
def decode_review(text):
return ' '.join([reverse_word_index.get(i, '?') for i in text]) | Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/imdb_word_index.json
1646592/1641221 [==============================] - 0s 0us/step
| Apache-2.0 | chapter2/2.3.2-text_classification.ipynb | wangxingda/Tensorflow-Handbook |
现在我们可以使用 `decode_review` 函数来显示首条评论的文本: | decode_review(train_data[0]) | _____no_output_____ | Apache-2.0 | chapter2/2.3.2-text_classification.ipynb | wangxingda/Tensorflow-Handbook |
准备数据影评——即整数数组必须在输入神经网络之前转换为张量。这种转换可以通过以下两种方式来完成:* 将数组转换为表示单词出现与否的由 0 和 1 组成的向量,类似于 one-hot 编码。例如,序列[3, 5]将转换为一个 10,000 维的向量,该向量除了索引为 3 和 5 的位置是 1 以外,其他都为 0。然后,将其作为网络的首层——一个可以处理浮点型向量数据的稠密层。不过,这种方法需要大量的内存,需要一个大小为 `num_words * num_reviews` 的矩阵。* 或者,我们可以填充数组来保证输入数据具有相同的长度,然后创建一个大小为 `max_length * num_reviews` 的整型张量。我们可以使用能够处理此形状数据的嵌入层作为网络中的第一层。在本教程中,我们将使用第二种方法。由于电影评论长度必须相同,我们将使用 [pad_sequences](https://tensorflow.google.cn/api_docs/python/tf/keras/preprocessing/sequence/pad_sequences) 函数来使长度标准化: | train_data = keras.preprocessing.sequence.pad_sequences(train_data,
value=word_index["<PAD>"],
padding='post',
maxlen=256)
test_data = keras.preprocessing.sequence.pad_sequences(test_data,
value=word_index["<PAD>"],
padding='post',
maxlen=256) | _____no_output_____ | Apache-2.0 | chapter2/2.3.2-text_classification.ipynb | wangxingda/Tensorflow-Handbook |
现在让我们看下样本的长度: | len(train_data[0]), len(train_data[1]) | _____no_output_____ | Apache-2.0 | chapter2/2.3.2-text_classification.ipynb | wangxingda/Tensorflow-Handbook |
并检查一下首条评论(当前已经填充): | print(train_data[0]) | [ 1 14 22 16 43 530 973 1622 1385 65 458 4468 66 3941
4 173 36 256 5 25 100 43 838 112 50 670 2 9
35 480 284 5 150 4 172 112 167 2 336 385 39 4
172 4536 1111 17 546 38 13 447 4 192 50 16 6 147
2025 19 14 22 4 1920 4613 469 4 22 71 87 12 16
43 530 38 76 15 13 1247 4 22 17 515 17 12 16
626 18 2 5 62 386 12 8 316 8 106 5 4 2223
5244 16 480 66 3785 33 4 130 12 16 38 619 5 25
124 51 36 135 48 25 1415 33 6 22 12 215 28 77
52 5 14 407 16 82 2 8 4 107 117 5952 15 256
4 2 7 3766 5 723 36 71 43 530 476 26 400 317
46 7 4 2 1029 13 104 88 4 381 15 297 98 32
2071 56 26 141 6 194 7486 18 4 226 22 21 134 476
26 480 5 144 30 5535 18 51 36 28 224 92 25 104
4 226 65 16 38 1334 88 12 16 283 5 16 4472 113
103 32 15 16 5345 19 178 32 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0]
| Apache-2.0 | chapter2/2.3.2-text_classification.ipynb | wangxingda/Tensorflow-Handbook |
构建模型神经网络由堆叠的层来构建,这需要从两个主要方面来进行体系结构决策:* 模型里有多少层?* 每个层里有多少*隐层单元(hidden units)*?在此样本中,输入数据包含一个单词索引的数组。要预测的标签为 0 或 1。让我们来为该问题构建一个模型: | # 输入形状是用于电影评论的词汇数目(10,000 词)
vocab_size = 10000
model = keras.Sequential()
model.add(keras.layers.Embedding(vocab_size, 16))
model.add(keras.layers.GlobalAveragePooling1D())
model.add(keras.layers.Dense(16, activation='relu'))
model.add(keras.layers.Dense(1, activation='sigmoid'))
model.summary() | Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
embedding (Embedding) (None, None, 16) 160000
_________________________________________________________________
global_average_pooling1d (Gl (None, 16) 0
_________________________________________________________________
dense (Dense) (None, 16) 272
_________________________________________________________________
dense_1 (Dense) (None, 1) 17
=================================================================
Total params: 160,289
Trainable params: 160,289
Non-trainable params: 0
_________________________________________________________________
| Apache-2.0 | chapter2/2.3.2-text_classification.ipynb | wangxingda/Tensorflow-Handbook |
层按顺序堆叠以构建分类器:1. 第一层是`嵌入(Embedding)`层。该层采用整数编码的词汇表,并查找每个词索引的嵌入向量(embedding vector)。这些向量是通过模型训练学习到的。向量向输出数组增加了一个维度。得到的维度为:`(batch, sequence, embedding)`。2. 接下来,`GlobalAveragePooling1D` 将通过对序列维度求平均值来为每个样本返回一个定长输出向量。这允许模型以尽可能最简单的方式处理变长输入。3. 该定长输出向量通过一个有 16 个隐层单元的全连接(`Dense`)层传输。4. 最后一层与单个输出结点密集连接。使用 `Sigmoid` 激活函数,其函数值为介于 0 与 1 之间的浮点数,表示概率或置信度。 隐层单元上述模型在输入输出之间有两个中间层或“隐藏层”。输出(单元,结点或神经元)的数量即为层表示空间的维度。换句话说,是学习内部表示时网络所允许的自由度。如果模型具有更多的隐层单元(更高维度的表示空间)和/或更多层,则可以学习到更复杂的表示。但是,这会使网络的计算成本更高,并且可能导致学习到不需要的模式——一些能够在训练数据上而不是测试数据上改善性能的模式。这被称为*过拟合(overfitting)*,我们稍后会对此进行探究。 损失函数与优化器一个模型需要损失函数和优化器来进行训练。由于这是一个二分类问题且模型输出概率值(一个使用 sigmoid 激活函数的单一单元层),我们将使用 `binary_crossentropy` 损失函数。这不是损失函数的唯一选择,例如,您可以选择 `mean_squared_error` 。但是,一般来说 `binary_crossentropy` 更适合处理概率——它能够度量概率分布之间的“距离”,或者在我们的示例中,指的是度量 ground-truth 分布与预测值之间的“距离”。稍后,当我们研究回归问题(例如,预测房价)时,我们将介绍如何使用另一种叫做均方误差的损失函数。现在,配置模型来使用优化器和损失函数: | model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy']) | _____no_output_____ | Apache-2.0 | chapter2/2.3.2-text_classification.ipynb | wangxingda/Tensorflow-Handbook |
创建一个验证集在训练时,我们想要检查模型在未见过的数据上的准确率(accuracy)。通过从原始训练数据中分离 10,000 个样本来创建一个*验证集*。(为什么现在不使用测试集?我们的目标是只使用训练数据来开发和调整模型,然后只使用一次测试数据来评估准确率(accuracy))。 | x_val = train_data[:10000]
partial_x_train = train_data[10000:]
y_val = train_labels[:10000]
partial_y_train = train_labels[10000:] | _____no_output_____ | Apache-2.0 | chapter2/2.3.2-text_classification.ipynb | wangxingda/Tensorflow-Handbook |
训练模型以 512 个样本的 mini-batch 大小迭代 40 个 epoch 来训练模型。这是指对 `x_train` 和 `y_train` 张量中所有样本的的 40 次迭代。在训练过程中,监测来自验证集的 10,000 个样本上的损失值(loss)和准确率(accuracy): | history = model.fit(partial_x_train,
partial_y_train,
epochs=40,
batch_size=512,
validation_data=(x_val, y_val),
verbose=1) | Train on 15000 samples, validate on 10000 samples
Epoch 1/40
15000/15000 [==============================] - 1s 99us/sample - loss: 0.6921 - accuracy: 0.5437 - val_loss: 0.6903 - val_accuracy: 0.6241
Epoch 2/40
15000/15000 [==============================] - 1s 52us/sample - loss: 0.6870 - accuracy: 0.7057 - val_loss: 0.6833 - val_accuracy: 0.7018
Epoch 3/40
15000/15000 [==============================] - 1s 54us/sample - loss: 0.6760 - accuracy: 0.7454 - val_loss: 0.6694 - val_accuracy: 0.7501
Epoch 4/40
15000/15000 [==============================] - 1s 53us/sample - loss: 0.6563 - accuracy: 0.7659 - val_loss: 0.6467 - val_accuracy: 0.7571
Epoch 5/40
15000/15000 [==============================] - 1s 52us/sample - loss: 0.6270 - accuracy: 0.7837 - val_loss: 0.6155 - val_accuracy: 0.7793
Epoch 6/40
15000/15000 [==============================] - 1s 52us/sample - loss: 0.5882 - accuracy: 0.7993 - val_loss: 0.5762 - val_accuracy: 0.7960
Epoch 7/40
15000/15000 [==============================] - 1s 52us/sample - loss: 0.5420 - accuracy: 0.8219 - val_loss: 0.5336 - val_accuracy: 0.8106
Epoch 8/40
15000/15000 [==============================] - 1s 52us/sample - loss: 0.4955 - accuracy: 0.8367 - val_loss: 0.4930 - val_accuracy: 0.8262
Epoch 9/40
15000/15000 [==============================] - 1s 52us/sample - loss: 0.4507 - accuracy: 0.8522 - val_loss: 0.4542 - val_accuracy: 0.8393
Epoch 10/40
15000/15000 [==============================] - 1s 52us/sample - loss: 0.4107 - accuracy: 0.8667 - val_loss: 0.4218 - val_accuracy: 0.8478
Epoch 11/40
15000/15000 [==============================] - 1s 52us/sample - loss: 0.3766 - accuracy: 0.8779 - val_loss: 0.3957 - val_accuracy: 0.8551
Epoch 12/40
15000/15000 [==============================] - 1s 52us/sample - loss: 0.3483 - accuracy: 0.8843 - val_loss: 0.3741 - val_accuracy: 0.8613
Epoch 13/40
15000/15000 [==============================] - 1s 52us/sample - loss: 0.3238 - accuracy: 0.8925 - val_loss: 0.3573 - val_accuracy: 0.8667
Epoch 14/40
15000/15000 [==============================] - 1s 54us/sample - loss: 0.3027 - accuracy: 0.8977 - val_loss: 0.3439 - val_accuracy: 0.8678
Epoch 15/40
15000/15000 [==============================] - 1s 54us/sample - loss: 0.2850 - accuracy: 0.9032 - val_loss: 0.3318 - val_accuracy: 0.8737
Epoch 16/40
15000/15000 [==============================] - 1s 56us/sample - loss: 0.2695 - accuracy: 0.9071 - val_loss: 0.3231 - val_accuracy: 0.8744
Epoch 17/40
15000/15000 [==============================] - 1s 52us/sample - loss: 0.2549 - accuracy: 0.9124 - val_loss: 0.3151 - val_accuracy: 0.8790
Epoch 18/40
15000/15000 [==============================] - 1s 52us/sample - loss: 0.2421 - accuracy: 0.9166 - val_loss: 0.3086 - val_accuracy: 0.8807
Epoch 19/40
15000/15000 [==============================] - 1s 52us/sample - loss: 0.2307 - accuracy: 0.9201 - val_loss: 0.3035 - val_accuracy: 0.8794
Epoch 20/40
15000/15000 [==============================] - 1s 52us/sample - loss: 0.2201 - accuracy: 0.9243 - val_loss: 0.2994 - val_accuracy: 0.8802
Epoch 21/40
15000/15000 [==============================] - 1s 52us/sample - loss: 0.2103 - accuracy: 0.9271 - val_loss: 0.2953 - val_accuracy: 0.8825
Epoch 22/40
15000/15000 [==============================] - 1s 53us/sample - loss: 0.2014 - accuracy: 0.9306 - val_loss: 0.2926 - val_accuracy: 0.8834
Epoch 23/40
15000/15000 [==============================] - 1s 52us/sample - loss: 0.1923 - accuracy: 0.9352 - val_loss: 0.2901 - val_accuracy: 0.8848
Epoch 24/40
15000/15000 [==============================] - 1s 53us/sample - loss: 0.1845 - accuracy: 0.9395 - val_loss: 0.2907 - val_accuracy: 0.8852
Epoch 25/40
15000/15000 [==============================] - 1s 52us/sample - loss: 0.1770 - accuracy: 0.9426 - val_loss: 0.2875 - val_accuracy: 0.8838
Epoch 26/40
15000/15000 [==============================] - 1s 52us/sample - loss: 0.1696 - accuracy: 0.9459 - val_loss: 0.2870 - val_accuracy: 0.8849
Epoch 27/40
15000/15000 [==============================] - 1s 58us/sample - loss: 0.1628 - accuracy: 0.9492 - val_loss: 0.2868 - val_accuracy: 0.8849
Epoch 28/40
15000/15000 [==============================] - 1s 65us/sample - loss: 0.1563 - accuracy: 0.9513 - val_loss: 0.2876 - val_accuracy: 0.8842
Epoch 29/40
15000/15000 [==============================] - 1s 65us/sample - loss: 0.1505 - accuracy: 0.9534 - val_loss: 0.2881 - val_accuracy: 0.8849
Epoch 30/40
15000/15000 [==============================] - 1s 62us/sample - loss: 0.1450 - accuracy: 0.9553 - val_loss: 0.2878 - val_accuracy: 0.8857
Epoch 31/40
15000/15000 [==============================] - 1s 60us/sample - loss: 0.1389 - accuracy: 0.9584 - val_loss: 0.2879 - val_accuracy: 0.8862
Epoch 32/40
15000/15000 [==============================] - 1s 62us/sample - loss: 0.1347 - accuracy: 0.9595 - val_loss: 0.2907 - val_accuracy: 0.8849
Epoch 33/40
15000/15000 [==============================] - 1s 61us/sample - loss: 0.1286 - accuracy: 0.9626 - val_loss: 0.2908 - val_accuracy: 0.8859
Epoch 34/40
15000/15000 [==============================] - 1s 59us/sample - loss: 0.1244 - accuracy: 0.9645 - val_loss: 0.2926 - val_accuracy: 0.8864
Epoch 35/40
15000/15000 [==============================] - 1s 59us/sample - loss: 0.1192 - accuracy: 0.9664 - val_loss: 0.2945 - val_accuracy: 0.8850
Epoch 36/40
15000/15000 [==============================] - 1s 61us/sample - loss: 0.1149 - accuracy: 0.9688 - val_loss: 0.2959 - val_accuracy: 0.8847
Epoch 37/40
15000/15000 [==============================] - 1s 60us/sample - loss: 0.1107 - accuracy: 0.9699 - val_loss: 0.2998 - val_accuracy: 0.8833
Epoch 38/40
15000/15000 [==============================] - 1s 59us/sample - loss: 0.1065 - accuracy: 0.9703 - val_loss: 0.3007 - val_accuracy: 0.8844
Epoch 39/40
15000/15000 [==============================] - 1s 62us/sample - loss: 0.1029 - accuracy: 0.9722 - val_loss: 0.3042 - val_accuracy: 0.8827
Epoch 40/40
15000/15000 [==============================] - 1s 69us/sample - loss: 0.0995 - accuracy: 0.9736 - val_loss: 0.3074 - val_accuracy: 0.8817
| Apache-2.0 | chapter2/2.3.2-text_classification.ipynb | wangxingda/Tensorflow-Handbook |
评估模型我们来看一下模型的性能如何。将返回两个值。损失值(loss)(一个表示误差的数字,值越低越好)与准确率(accuracy)。 | results = model.evaluate(test_data, test_labels, verbose=2)
print(results) | 25000/1 - 1s - loss: 0.3459 - accuracy: 0.8727
[0.325805940823555, 0.87268]
| Apache-2.0 | chapter2/2.3.2-text_classification.ipynb | wangxingda/Tensorflow-Handbook |
这种十分朴素的方法得到了约 87% 的准确率(accuracy)。若采用更好的方法,模型的准确率应当接近 95%。 创建一个准确率(accuracy)和损失值(loss)随时间变化的图表`model.fit()` 返回一个 `History` 对象,该对象包含一个字典,其中包含训练阶段所发生的一切事件: | history_dict = history.history
history_dict.keys() | _____no_output_____ | Apache-2.0 | chapter2/2.3.2-text_classification.ipynb | wangxingda/Tensorflow-Handbook |
有四个条目:在训练和验证期间,每个条目对应一个监控指标。我们可以使用这些条目来绘制训练与验证过程的损失值(loss)和准确率(accuracy),以便进行比较。 | import matplotlib.pyplot as plt
acc = history_dict['accuracy']
val_acc = history_dict['val_accuracy']
loss = history_dict['loss']
val_loss = history_dict['val_loss']
epochs = range(1, len(acc) + 1)
# “bo”代表 "蓝点"
plt.plot(epochs, loss, 'bo', label='Training loss')
# b代表“蓝色实线”
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
plt.clf() # 清除数字
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
plt.show() | _____no_output_____ | Apache-2.0 | chapter2/2.3.2-text_classification.ipynb | wangxingda/Tensorflow-Handbook |
Just Plot It! Introduction The System In this course we will work with a set of "experimental" data to illustrate going from "raw" measurement (or simulation) data through exploratory visualization to an (almost) paper ready figure.In this scenario, we have fabricated (or simulated) 25 cantilevers. There is some value (suggestively called "control") that varies between the cantilevers and we want to see how the properties of the cantilever are affect by "control". To see what this will look like physically, take part a "clicky" pen. Hold one end of the spring in your fingers and flick the free end. Or just watch this cat: | from IPython.display import YouTubeVideo
YouTubeVideo('4aTagDSnclk?start=19') | _____no_output_____ | MIT | notebooks/00_just_plot_it.ipynb | NFAcademy/2021_course_dev-tacaswell |
Springs, and our cantilevers, are part of a class of systems known as (Damped) Harmonic Oscillators. We are going to measure the natural frequency and damping rate we deflect each cantilever by the same amount and then observe the position as a function of time as the vibrations damp out. The Tools We are going make use of: - [jupyter](https://jupyter.org)- [numpy](https://numpy.org)- [matplotlib](https://matplotlib.org)- [scipy](https://www.scipy.org/scipylib/index.html)- [xarray](http://xarray.pydata.org/en/stable/index.html)- [pandas](https://pandas.pydata.org/docs/)We are only going to scratch the surface of what any of these libraries can do! For the purposes of this course we assume you know numpy and Matplotlib at least to the level of LINKS TO OTHER COURSES. We will only be using one aspect (least square fitting) from scipy so no prior familiarity is needed. Similarly, we will only be superficially making use of pandas and xarray to provided access to structured data. No prior familiarity is required and if you want to learn more see LINK TO OTHER COURSES. | # interactive figures, requires ipypml!
%matplotlib widget
#%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy
import xarray as xa | _____no_output_____ | MIT | notebooks/00_just_plot_it.ipynb | NFAcademy/2021_course_dev-tacaswell |
Philsophy While this coures uses Matplotlib for the visualization, the high-level lessons of this course are transferable to any plotting tools (in any language).At its core, programing in the process of taking existing tools (libraries) and building new tools more fit to your purpose. This course will walk through a concrete example, starting with a pile of data and ending with a paper figure, of how to think about and design scientific visualizations tools tuned to exactly *your* data and questions. The Data Accessing dataAs a rule-of-thumb I/O logic should be kept out of the inner loops of analysis or plotting. This will, in the medium term, lead to more re-usable and maintainable code. Remember your most frequent collaborator is yourself in 6 months. Be kind to your (future) self and write re-usable, maintainable, and understandable code now ;)In this case, we have a data (simulation) function `get_data` that will simulate the experiment and returns to us a [`xarray.DataArray`](http://xarray.pydata.org/en/stable/quick-overview.htmlcreate-a-dataarray). `xarray.DataArray` is (roughly) a N-dimensional numpy array that is enriched by the concept of coordinates and indies on the the axes and meta-data. `xarray` has much more functionality than we will use in this course! | # not sure how else to get the helpers on the path!
import sys
sys.path.append('../scripts')
from data_gen import get_data, fit | _____no_output_____ | MIT | notebooks/00_just_plot_it.ipynb | NFAcademy/2021_course_dev-tacaswell |
First look Using the function `get_data` we can pull an `xarray.DataArray` into our namespace and the use the html repr from xarray to get a first look at the data | d = get_data(25)
d | _____no_output_____ | MIT | notebooks/00_just_plot_it.ipynb | NFAcademy/2021_course_dev-tacaswell |
From this we can see that we have a, more-or-less, 2D array with 25 rows, each of which is a measurement that is a 4,112 point time series. Because this is an DataArray it also caries **coordinates** giving the value of **control** for each row and the time for each column. If we pull out just one row we can see a single experimental measurement. | d[6] | _____no_output_____ | MIT | notebooks/00_just_plot_it.ipynb | NFAcademy/2021_course_dev-tacaswell |
We can see that the **control** coordinate now gives 1 value, but the **time** coordinate is still a vector. We can access these values via attribute access (which we will use later): | d[6].control
d[6].time | _____no_output_____ | MIT | notebooks/00_just_plot_it.ipynb | NFAcademy/2021_course_dev-tacaswell |
The Plotting Plot it?Looking at (truncated) lists of numbers is not intuitive or informative for most people, to get a better sense of what this data looks like lets plot it! We know that `Axes.plot` can plot multiple lines at once so lets try naively throwing `d` at `ax.plot`! | fig, ax = plt.subplots()
ax.plot(d); | _____no_output_____ | MIT | notebooks/00_just_plot_it.ipynb | NFAcademy/2021_course_dev-tacaswell |
While this does look sort of cool, it is not *useful*. What has happened is that Matplotlib has looked at our `(25, 4_112)` array and said "Clearly, you have a table that is 4k columns wide and 25 rows long. What you want is each column plotted!". Thus, what we are seeing is "The deflection at a fixed time as a function of cantilever ID number". This plot does accurately reflect that data that we passed in, but this is a nearly meaningless plot!Visualization, just like writing, is a tool for communication and you need to think about the story you want to tell as you make the plots. Sidebar: Explicit vs Implicit Matplotlib APIThere are two related but distinct APIs to use Matplotlib: the "Explicit" (nee "Object Oriented") and "Implicit" (nee "pyplot/pylab"). The Implicit API is implemented using the Explicit API; anything you can do with the Implicit API you can do with the Explicit API, but there is some functionality of the Explicit API that is not exposed through the Implicit API. It is also possible, but with one exception not suggested, to mix the two APIs.The core conceptual difference is than in the Implicit API Matplotlib has a notion of the "current figure" and "current axes" that all of the calls re-directed to. For example, the implementation of `plt.plot` (once you scroll past the docstring) is only 1 line: | ?? plt.plot | _____no_output_____ | MIT | notebooks/00_just_plot_it.ipynb | NFAcademy/2021_course_dev-tacaswell |
While the Implicit API reduces the boilerplate required to get some things done and is convenient when working in a terminal, it comes at the cost of Matplotlib maintaining global state of which Axes is currently active! When scripting this can quickly become a headache to manage. When using Matplotlib with one of the GUI backends, we do need to, at the library level, keep track of some global state so that the plot windows remain responsive. If you are embedding Matplotlib in your own GUI application you are responsible for this, but when working at an IPython prompt,`pyplot` takes care of this for you. This course is going to, with the exception of creating new figures, always use the Explict API. Plot it!What we really want to see is the transpose of the above (A line per experiment as a function of time): | fig, ax = plt.subplots()
ax.plot(d.T); | _____no_output_____ | MIT | notebooks/00_just_plot_it.ipynb | NFAcademy/2021_course_dev-tacaswell |
Which is better! If we squint a bit (or zoom in if we are using `ipympl` or a GUI backend) can sort of see each of the individual oscillators ringing-down over time. Just one at a time To make it easier to see lets plot just one of the curves: | fig, ax = plt.subplots()
ax.plot(d[6]); | _____no_output_____ | MIT | notebooks/00_just_plot_it.ipynb | NFAcademy/2021_course_dev-tacaswell |
Pass freshman physics While we do have just one line on the axes and can see what is going on, this plot would, right, be marked as little-to-no credit if turned in as part of a freshman Physics lab! We do not have a meaningful value on the x-axis, no legend, and no axis labels! | fig, ax = plt.subplots()
m = d[6]
ax.plot(m.time, m, label=f'control = {float(m.control):.1f}')
ax.set_xlabel('time (ms)')
ax.set_ylabel('displacement (mm)')
ax.legend(); | _____no_output_____ | MIT | notebooks/00_just_plot_it.ipynb | NFAcademy/2021_course_dev-tacaswell |
At this point we have a minimally acceptable plot! It shows us one curve with axis labels (with units!) and a legend. With sidebar: xarray plotting Because xarray knows more about the structure of your data than a couple of numpy arrays in your local namespace or dictionary, it can make smarter choices about the automatic visualization: | fig, ax = plt.subplots()
m.plot(ax=ax) | _____no_output_____ | MIT | notebooks/00_just_plot_it.ipynb | NFAcademy/2021_course_dev-tacaswell |
8. Classification[Data Science Playlist on YouTube](https://www.youtube.com/watch?v=VLKEj9EN2ew&list=PLLBUgWXdTBDg1Qgmwt4jKtVn9BWh5-zgy)[](https://www.youtube.com/watch?v=VLKEj9EN2ew&list=PLLBUgWXdTBDg1Qgmwt4jKtVn9BWh5-zgy "Python Data Science")**Classification** predicts *discrete labels (outcomes)* such as `yes`/`no`, `True`/`False`, or any number of discrete levels such as a letter from text recognition, or a word from speech recognition. There are two main methods for training classifiers: unsupervised and supervised learning. The difference between the two is that unsupervised learning does not use labels while supervised learning uses labels to build the classifier. The goal of unsupervised learning is to cluster input features but without labels to guide the grouping.  Supervised Learning to Classify NumbersA dataset that is included with sklearn is a set of 1797 images of numbers that are 64 pixels (8x8) each. There are labels with each to indicate the correct answer. A Support Vector Classifier is trained on the first half of the images. | from sklearn import datasets, svm
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
%matplotlib inline
import numpy as np
# train classifier
digits = datasets.load_digits()
n_samples = len(digits.images)
data = digits.images.reshape((n_samples, -1))
svc = svm.SVC(gamma=0.001)
X_train, X_test, y_train, y_test = train_test_split(
data, digits.target, test_size=0.5, shuffle=False)
svc.fit(X_train, y_train)
print('SVC Trained') | _____no_output_____ | MIT | 08. Classification.ipynb | monocilindro/data_science |
 Test Number ClassifierThe image classification is trained on 10 randomly selected images from the other half of the data set to evaluate the training. Run the classifier test until you observe a misclassified number. | plt.figure(figsize=(10,4))
for i in range(10):
n = np.random.randint(int(n_samples/2),n_samples)
predict = svc.predict(digits.data[n:n+1])[0]
plt.subplot(2,5,i+1)
plt.imshow(digits.images[n], cmap=plt.cm.gray_r, interpolation='nearest')
plt.text(0,7,'Actual: ' + str(digits.target[n]),color='r')
plt.text(0,1,'Predict: ' + str(predict),color='b')
if predict==digits.target[n]:
plt.text(0,4,'Correct',color='g')
else:
plt.text(0,4,'Incorrect',color='orange')
plt.show() | _____no_output_____ | MIT | 08. Classification.ipynb | monocilindro/data_science |
 Classification with Supervised Learning Select data set option with `moons`, `cirlces`, or `blobs`. Run the following cell to generate the data that will be used to test the classifiers. | option = 'moons' # moons, circles, or blobs
n = 2000 # number of data points
X = np.random.random((n,2))
mixing = 0.0 # add random mixing element to data
xplot = np.linspace(0,1,100)
if option=='moons':
X, y = datasets.make_moons(n_samples=n,noise=0.1)
yplot = xplot*0.0
elif option=='circles':
X, y = datasets.make_circles(n_samples=n,noise=0.1,factor=0.5)
yplot = xplot*0.0
elif option=='blobs':
X, y = datasets.make_blobs(n_samples=n,centers=[[-5,3],[5,-3]],cluster_std=2.0)
yplot = xplot*0.0
# Split into train and test subsets (50% each)
XA, XB, yA, yB = train_test_split(X, y, test_size=0.5, shuffle=False)
# Plot regression results
def assess(P):
plt.figure()
plt.scatter(XB[P==1,0],XB[P==1,1],marker='^',color='blue',label='True')
plt.scatter(XB[P==0,0],XB[P==0,1],marker='x',color='red',label='False')
plt.scatter(XB[P!=yB,0],XB[P!=yB,1],marker='s',color='orange',\
alpha=0.5,label='Incorrect')
plt.legend() | _____no_output_____ | MIT | 08. Classification.ipynb | monocilindro/data_science |
 S.1 Logistic Regression**Definition:** Logistic regression is a machine learning algorithm for classification. In this algorithm, the probabilities describing the possible outcomes of a single trial are modelled using a logistic function.**Advantages:** Logistic regression is designed for this purpose (classification), and is most useful for understanding the influence of several independent variables on a single outcome variable.**Disadvantages:** Works only when the predicted variable is binary, assumes all predictors are independent of each other, and assumes data is free of missing values. | from sklearn.linear_model import LogisticRegression
lr = LogisticRegression(solver='lbfgs')
lr.fit(XA,yA)
yP = lr.predict(XB)
assess(yP) | _____no_output_____ | MIT | 08. Classification.ipynb | monocilindro/data_science |
 S.2 Naïve Bayes**Definition:** Naive Bayes algorithm based on Bayes’ theorem with the assumption of independence between every pair of features. Naive Bayes classifiers work well in many real-world situations such as document classification and spam filtering.**Advantages:** This algorithm requires a small amount of training data to estimate the necessary parameters. Naive Bayes classifiers are extremely fast compared to more sophisticated methods.**Disadvantages:** Naive Bayes is known to be a bad estimator. | from sklearn.naive_bayes import GaussianNB
nb = GaussianNB()
nb.fit(XA,yA)
yP = nb.predict(XB)
assess(yP) | _____no_output_____ | MIT | 08. Classification.ipynb | monocilindro/data_science |
 S.3 Stochastic Gradient Descent**Definition:** Stochastic gradient descent is a simple and very efficient approach to fit linear models. It is particularly useful when the number of samples is very large. It supports different loss functions and penalties for classification.**Advantages:** Efficiency and ease of implementation.**Disadvantages:** Requires a number of hyper-parameters and it is sensitive to feature scaling. | from sklearn.linear_model import SGDClassifier
sgd = SGDClassifier(loss='modified_huber', shuffle=True,random_state=101)
sgd.fit(XA,yA)
yP = sgd.predict(XB)
assess(yP) | _____no_output_____ | MIT | 08. Classification.ipynb | monocilindro/data_science |
 S.4 K-Nearest Neighbours**Definition:** Neighbours based classification is a type of lazy learning as it does not attempt to construct a general internal model, but simply stores instances of the training data. Classification is computed from a simple majority vote of the k nearest neighbours of each point.**Advantages:** This algorithm is simple to implement, robust to noisy training data, and effective if training data is large.**Disadvantages:** Need to determine the value of `K` and the computation cost is high as it needs to computer the distance of each instance to all the training samples. One possible solution to determine `K` is to add a feedback loop to determine the number of neighbors. | from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors=5)
knn.fit(XA,yA)
yP = knn.predict(XB)
assess(yP) | _____no_output_____ | MIT | 08. Classification.ipynb | monocilindro/data_science |
 S.5 Decision Tree**Definition:** Given a data of attributes together with its classes, a decision tree produces a sequence of rules that can be used to classify the data.**Advantages:** Decision Tree is simple to understand and visualise, requires little data preparation, and can handle both numerical and categorical data.**Disadvantages:** Decision tree can create complex trees that do not generalise well, and decision trees can be unstable because small variations in the data might result in a completely different tree being generated. | from sklearn.tree import DecisionTreeClassifier
dtree = DecisionTreeClassifier(max_depth=10,random_state=101,\
max_features=None,min_samples_leaf=5)
dtree.fit(XA,yA)
yP = dtree.predict(XB)
assess(yP) | _____no_output_____ | MIT | 08. Classification.ipynb | monocilindro/data_science |
 S.6 Random Forest**Definition:** Random forest classifier is a meta-estimator that fits a number of decision trees on various sub-samples of datasets and uses average to improve the predictive accuracy of the model and controls over-fitting. The sub-sample size is always the same as the original input sample size but the samples are drawn with replacement.**Advantages:** Reduction in over-fitting and random forest classifier is more accurate than decision trees in most cases.**Disadvantages:** Slow real time prediction, difficult to implement, and complex algorithm. | from sklearn.ensemble import RandomForestClassifier
rfm = RandomForestClassifier(n_estimators=70,oob_score=True,\
n_jobs=1,random_state=101,max_features=None,\
min_samples_leaf=3) #change min_samples_leaf from 30 to 3
rfm.fit(XA,yA)
yP = rfm.predict(XB)
assess(yP) | _____no_output_____ | MIT | 08. Classification.ipynb | monocilindro/data_science |
 S.7 Support Vector Classifier**Definition:** Support vector machine is a representation of the training data as points in space separated into categories by a clear gap that is as wide as possible. New examples are then mapped into that same space and predicted to belong to a category based on which side of the gap they fall.**Advantages:** Effective in high dimensional spaces and uses a subset of training points in the decision function so it is also memory efficient.**Disadvantages:** The algorithm does not directly provide probability estimates, these are calculated using an expensive five-fold cross-validation. | from sklearn.svm import SVC
svm = SVC(gamma='scale', C=1.0, random_state=101)
svm.fit(XA,yA)
yP = svm.predict(XB)
assess(yP) | _____no_output_____ | MIT | 08. Classification.ipynb | monocilindro/data_science |
 S.8 Neural NetworkThe `MLPClassifier` implements a multi-layer perceptron (MLP) algorithm that trains using Backpropagation.**Definition:** A neural network is a set of neurons (activation functions) in layers that are processed sequentially to relate an input to an output.**Advantages:** Effective in nonlinear spaces where the structure of the relationship is not linear. No prior knowledge or specialized equation structure is defined although there are different network architectures that may lead to a better result.**Disadvantages:** Neural networks do not extrapolate well outside of the training domain. They may also require longer to train by adjusting the parameter weights to minimize a loss (objective) function. It is also more challenging to explain the outcome of the training and changes in initialization or number of epochs (iterations) may lead to different results. Too many epochs may lead to overfitting, especially if there are excess parameters beyond the minimum needed to capture the input to output relationship. MLP trains on two arrays: array X of size (n_samples, n_features), which holds the training samples represented as floating point feature vectors; and array y of size (n_samples,), which holds the target values (class labels) for the training samples.MLP can fit a non-linear model to the training data. clf.coefs_ contains the weight matrices that constitute the model parameters. Currently, MLPClassifier supports only the Cross-Entropy loss function, which allows probability estimates by running the predict_proba method. MLP trains using Backpropagation. More precisely, it trains using some form of gradient descent and the gradients are calculated using Backpropagation. For classification, it minimizes the Cross-Entropy loss function, giving a vector of probability estimates. MLPClassifier supports multi-class classification by applying Softmax as the output function. Further, the model supports multi-label classification in which a sample can belong to more than one class. For each class, the raw output passes through the logistic function. Values larger or equal to 0.5 are rounded to 1, otherwise to 0. For a predicted output of a sample, the indices where the value is 1 represents the assigned classes of that sample. | from sklearn.neural_network import MLPClassifier
clf = MLPClassifier(solver='lbfgs',alpha=1e-5,max_iter=200,activation='relu',\
hidden_layer_sizes=(10,30,10), random_state=1, shuffle=True)
clf.fit(XA,yA)
yP = clf.predict(XB)
assess(yP) | _____no_output_____ | MIT | 08. Classification.ipynb | monocilindro/data_science |
 Unsupervised ClassificationAdditional examples show the potential for unsupervised learning to classify the groups. Unsupervised learning does not use the labels (`True`/`False`) so the results may need to be switched to align with the test set with `if len(XB[yP!=yB]) > n/4: yP = 1 - yP `  U.1 K-Means Clustering**Definition:** Specify how many possible clusters (or K) there are in the dataset. The algorithm then iteratively moves the K-centers and selects the datapoints that are closest to that centroid in the cluster.**Advantages:** The most common and simplest clustering algorithm.**Disadvantages:** Must specify the number of clusters although this can typically be determined by increasing the number of clusters until the objective function does not change significantly. | from sklearn.cluster import KMeans
km = KMeans(n_clusters=2)
km.fit(XA)
yP = km.predict(XB)
if len(XB[yP!=yB]) > n/4: yP = 1 - yP
assess(yP) | _____no_output_____ | MIT | 08. Classification.ipynb | monocilindro/data_science |
 U.2 Gaussian Mixture Model**Definition:** Data points that exist at the boundary of clusters may simply have similar probabilities of being on either clusters. A mixture model predicts a probability instead of a hard classification such as K-Means clustering.**Advantages:** Incorporates uncertainty into the solution.**Disadvantages:** Uncertainty may not be desirable for some applications. This method is not as common as the K-Means method for clustering. | from sklearn.mixture import GaussianMixture
gmm = GaussianMixture(n_components=2)
gmm.fit(XA)
yP = gmm.predict_proba(XB) # produces probabilities
if len(XB[np.round(yP[:,0])!=yB]) > n/4: yP = 1 - yP
assess(np.round(yP[:,0])) | _____no_output_____ | MIT | 08. Classification.ipynb | monocilindro/data_science |
 U.3 Spectral Clustering**Definition:** Spectral clustering is known as segmentation-based object categorization. It is a technique with roots in graph theory, where identify communities of nodes in a graph are based on the edges connecting them. The method is flexible and allows clustering of non graph data as well.It uses information from the eigenvalues of special matrices built from the graph or the data set. **Advantages:** Flexible approach for finding clusters when data doesn’t meet the requirements of other common algorithms.**Disadvantages:** For large-sized graphs, the second eigenvalue of the (normalized) graph Laplacian matrix is often ill-conditioned, leading to slow convergence of iterative eigenvalue solvers. Spectral clustering is computationally expensive unless the graph is sparse and the similarity matrix can be efficiently constructed. | from sklearn.cluster import SpectralClustering
sc = SpectralClustering(n_clusters=2,eigen_solver='arpack',\
affinity='nearest_neighbors')
yP = sc.fit_predict(XB) # No separation between fit and predict calls
# need to fit and predict on same dataset
if len(XB[yP!=yB]) > n/4: yP = 1 - yP
assess(yP) | _____no_output_____ | MIT | 08. Classification.ipynb | monocilindro/data_science |
 TCLab ActivityTrain a classifier to predict if the heater is on (100%) or off (0%). Generate data with 10 minutes of 1 second data. If you do not have a TCLab, use one of the sample data sets.- [Sample Data Set 1 (10 min)](http://apmonitor.com/do/uploads/Main/tclab_data5.txt): http://apmonitor.com/do/uploads/Main/tclab_data5.txt - [Sample Data Set 2 (60 min)](http://apmonitor.com/do/uploads/Main/tclab_data6.txt): http://apmonitor.com/do/uploads/Main/tclab_data6.txt | # 10 minute data collection
import tclab, time
import numpy as np
import pandas as pd
with tclab.TCLab() as lab:
n = 600; on=100; t = np.linspace(0,n-1,n)
Q1 = np.zeros(n); T1 = np.zeros(n)
Q2 = np.zeros(n); T2 = np.zeros(n)
Q1[20:41]=on; Q1[60:91]=on; Q1[150:181]=on
Q1[190:206]=on; Q1[220:251]=on; Q1[260:291]=on
Q1[300:316]=on; Q1[340:351]=on; Q1[400:431]=on
Q1[500:521]=on; Q1[540:571]=on; Q1[20:41]=on
Q1[60:91]=on; Q1[150:181]=on; Q1[190:206]=on
Q1[220:251]=on; Q1[260:291]=on
print('Time Q1 Q2 T1 T2')
for i in range(n):
T1[i] = lab.T1; T2[i] = lab.T2
lab.Q1(Q1[i])
if i%5==0:
print(int(t[i]),Q1[i],Q2[i],T1[i],T2[i])
time.sleep(1)
data = np.column_stack((t,Q1,Q2,T1,T2))
data8 = pd.DataFrame(data,columns=['Time','Q1','Q2','T1','T2'])
data8.to_csv('08-tclab.csv',index=False) | _____no_output_____ | MIT | 08. Classification.ipynb | monocilindro/data_science |
Use the data file `08-tclab.csv` to train and test the classifier. Select and scale (0-1) the features of the data including `T1`, `T2`, and the 1st and 2nd derivatives of `T1`. Use the measured temperatures, derivatives, and heater value label to create a classifier that predicts when the heater is on or off. Validate the classifier with new data that was not used for training. Starting code is provided below but does not include `T2` as a feature input. **Add `T2` as an input feature to the classifer. Does it improve the classifier performance?** | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import gridspec
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
try:
data = pd.read_csv('08-tclab.csv')
except:
print('Warning: Unable to load 08-tclab.csv, using online data')
url = 'http://apmonitor.com/do/uploads/Main/tclab_data5.txt'
data = pd.read_csv(url)
# Input Features: Temperature and 1st / 2nd Derivatives
# Cubic polynomial fit of temperature using 10 data points
data['dT1'] = np.zeros(len(data))
data['d2T1'] = np.zeros(len(data))
for i in range(len(data)):
if i<len(data)-10:
x = data['Time'][i:i+10]-data['Time'][i]
y = data['T1'][i:i+10]
p = np.polyfit(x,y,3)
# evaluate derivatives at mid-point (5 sec)
t = 5.0
data['dT1'][i] = 3.0*p[0]*t**2 + 2.0*p[1]*t+p[2]
data['d2T1'][i] = 6.0*p[0]*t + 2.0*p[1]
else:
data['dT1'][i] = np.nan
data['d2T1'][i] = np.nan
# Remove last 10 values
X = np.array(data[['T1','dT1','d2T1']][0:-10])
y = np.array(data[['Q1']][0:-10])
# Scale data
# Input features (Temperature and 2nd derivative at 5 sec)
s1 = MinMaxScaler(feature_range=(0,1))
Xs = s1.fit_transform(X)
# Output labels (heater On / Off)
ys = [True if y[i]>50.0 else False for i in range(len(y))]
# Split into train and test subsets (50% each)
XA, XB, yA, yB = train_test_split(Xs, ys, \
test_size=0.5, shuffle=False)
# Supervised Classification
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import SGDClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from sklearn.neural_network import MLPClassifier
# Create supervised classification models
lr = LogisticRegression(solver='lbfgs') # Logistic Regression
nb = GaussianNB() # Naïve Bayes
sgd = SGDClassifier(loss='modified_huber', shuffle=True,\
random_state=101) # Stochastic Gradient Descent
knn = KNeighborsClassifier(n_neighbors=5) # K-Nearest Neighbors
dtree = DecisionTreeClassifier(max_depth=10,random_state=101,\
max_features=None,min_samples_leaf=5) # Decision Tree
rfm = RandomForestClassifier(n_estimators=70,oob_score=True,n_jobs=1,\
random_state=101,max_features=None,min_samples_leaf=3) # Random Forest
svm = SVC(gamma='scale', C=1.0, random_state=101) # Support Vector Classifier
clf = MLPClassifier(solver='lbfgs',alpha=1e-5,max_iter=200,\
activation='relu',hidden_layer_sizes=(10,30,10),\
random_state=1, shuffle=True) # Neural Network
models = [lr,nb,sgd,knn,dtree,rfm,svm,clf]
# Supervised learning
yP = [None]*(len(models)+3) # 3 for unsupervised learning
for i,m in enumerate(models):
m.fit(XA,yA)
yP[i] = m.predict(XB)
# Unsupervised learning modules
from sklearn.cluster import KMeans
from sklearn.mixture import GaussianMixture
from sklearn.cluster import SpectralClustering
km = KMeans(n_clusters=2)
gmm = GaussianMixture(n_components=2)
sc = SpectralClustering(n_clusters=2,eigen_solver='arpack',\
affinity='nearest_neighbors')
km.fit(XA)
yP[8] = km.predict(XB)
gmm.fit(XA)
yP[9] = gmm.predict_proba(XB)[:,0]
yP[10] = sc.fit_predict(XB)
plt.figure(figsize=(10,7))
gs = gridspec.GridSpec(3, 1, height_ratios=[1,1,5])
plt.subplot(gs[0])
plt.plot(data['Time']/60,data['T1'],'r-',\
label='Temperature (°C)')
plt.ylabel('T (°C)')
plt.legend()
plt.subplot(gs[1])
plt.plot(data['Time']/60,data['dT1'],'b:',\
label='dT/dt (°C/sec)')
plt.plot(data['Time']/60,data['d2T1'],'k--',\
label=r'$d^2T/dt^2$ ($°C^2/sec^2$)')
plt.ylabel('Derivatives')
plt.legend()
plt.subplot(gs[2])
plt.plot(data['Time']/60,data['Q1']/100,'k-',\
label='Heater (On=1/Off=0)')
t2 = data['Time'][len(yA):-10].values
desc = ['Logistic Regression','Naïve Bayes','Stochastic Gradient Descent',\
'K-Nearest Neighbors','Decision Tree','Random Forest',\
'Support Vector Classifier','Neural Network',\
'K-Means Clustering','Gaussian Mixture Model','Spectral Clustering']
for i in range(11):
plt.plot(t2/60,yP[i]-i-1,label=desc[i])
plt.ylabel('Heater')
plt.legend()
plt.xlabel(r'Time (min)')
plt.legend()
plt.show() | _____no_output_____ | MIT | 08. Classification.ipynb | monocilindro/data_science |
Data Science - Regressão Linear Bônus Importando nosso modelo | import pickle
modelo = open('../Exercicio/modelo_preço','rb')
lm_new = pickle.load(modelo)
modelo.close()
area = 38
garagem = 2
banheiros = 4
lareira = 4
marmore = 0
andares = 1
entrada = [[area, garagem, banheiros, lareira, marmore, andares]]
print('$ {0:.2f}'.format(lm_new.predict(entrada)[0])) | _____no_output_____ | MIT | reg-linear/Bonus/Simulador Interativo.ipynb | DiegoVialle/Regressao-Linear-Testando-Relacoes-e-Prevendo-Resultados |
Exemplo de um simulador interativo para Jupyterhttps://ipywidgets.readthedocs.io/en/stable/index.htmlhttps://github.com/jupyter-widgets/ipywidgets | # Importando bibliotecas
from ipywidgets import widgets, HBox, VBox
from IPython.display import display
# Criando os controles do formulário
area = widgets.Text(description="Área")
garagem = widgets.Text(description="Garagem")
banheiros = widgets.Text(description="Banheiros")
lareira = widgets.Text(description="Lareira")
marmore = widgets.Text(description="Mármore?")
andares = widgets.Text(description="Andares?")
botao = widgets.Button(description="Simular")
# Posicionando os controles
left = VBox([area, banheiros, marmore])
right = VBox([garagem, lareira, andares])
inputs = HBox([left, right])
# Função de simulação
def simulador(sender):
entrada=[[
float(area.value if area.value else 0),
float(garagem.value if garagem.value else 0),
float(banheiros.value if banheiros.value else 0),
float(lareira.value if lareira.value else 0),
float(marmore.value if marmore.value else 0),
float(andares.value if andares.value else 0)
]]
print('$ {0:.2f}'.format(lm_new.predict(entrada)[0]))
# Atribuindo a função "simulador" ao evento click do botão
botao.on_click(simulador)
display(inputs, botao) | _____no_output_____ | MIT | reg-linear/Bonus/Simulador Interativo.ipynb | DiegoVialle/Regressao-Linear-Testando-Relacoes-e-Prevendo-Resultados |
Installing & importing necsessary libs | !pip install -q transformers
import numpy as np
import pandas as pd
from sklearn import metrics
import transformers
import torch
from torch.utils.data import Dataset, DataLoader, RandomSampler, SequentialSampler
from transformers import AlbertTokenizer, AlbertModel, AlbertConfig
from tqdm.notebook import tqdm
from transformers import get_linear_schedule_with_warmup
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
n_gpu = torch.cuda.device_count()
torch.cuda.get_device_name(0) | _____no_output_____ | MIT | albert-base/albert-baseline.ipynb | shanayghag/AV-Janatahack-Independence-Day-2020-ML-Hackathon |
Data Preprocessing | df = pd.read_csv("../input/avjantahack/data/train.csv")
df['list'] = df[df.columns[3:]].values.tolist()
new_df = df[['ABSTRACT', 'list']].copy()
new_df.head() | _____no_output_____ | MIT | albert-base/albert-baseline.ipynb | shanayghag/AV-Janatahack-Independence-Day-2020-ML-Hackathon |
Model configurations | # Defining some key variables that will be used later on in the training
MAX_LEN = 512
TRAIN_BATCH_SIZE = 16
VALID_BATCH_SIZE = 8
EPOCHS = 5
LEARNING_RATE = 3e-05
tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2') | _____no_output_____ | MIT | albert-base/albert-baseline.ipynb | shanayghag/AV-Janatahack-Independence-Day-2020-ML-Hackathon |
Custom Dataset Class | class CustomDataset(Dataset):
def __init__(self, dataframe, tokenizer, max_len):
self.tokenizer = tokenizer
self.data = dataframe
self.abstract = dataframe.ABSTRACT
self.targets = self.data.list
self.max_len = max_len
def __len__(self):
return len(self.abstract)
def __getitem__(self, index):
abstract = str(self.abstract[index])
abstract = " ".join(abstract.split())
inputs = self.tokenizer.encode_plus(
abstract,
None,
add_special_tokens = True,
max_length = self.max_len,
pad_to_max_length = True,
return_token_type_ids=True,
truncation = True
)
ids = inputs['input_ids']
mask = inputs['attention_mask']
token_type_ids = inputs['token_type_ids']
return{
'ids': torch.tensor(ids, dtype=torch.long),
'mask': torch.tensor(mask, dtype=torch.long),
'token_type_ids': torch.tensor(token_type_ids, dtype=torch.long),
'targets': torch.tensor(self.targets[index], dtype=torch.float)
}
train_size = 0.8
train_dataset=new_df.sample(frac=train_size,random_state=200)
test_dataset=new_df.drop(train_dataset.index).reset_index(drop=True)
train_dataset = train_dataset.reset_index(drop=True)
print("FULL Dataset: {}".format(new_df.shape))
print("TRAIN Dataset: {}".format(train_dataset.shape))
print("TEST Dataset: {}".format(test_dataset.shape))
training_set = CustomDataset(train_dataset, tokenizer, MAX_LEN)
testing_set = CustomDataset(test_dataset, tokenizer, MAX_LEN)
train_params = {'batch_size': TRAIN_BATCH_SIZE,
'shuffle': True,
'num_workers': 0
}
test_params = {'batch_size': VALID_BATCH_SIZE,
'shuffle': True,
'num_workers': 0
}
training_loader = DataLoader(training_set, **train_params)
testing_loader = DataLoader(testing_set, **test_params) | _____no_output_____ | MIT | albert-base/albert-baseline.ipynb | shanayghag/AV-Janatahack-Independence-Day-2020-ML-Hackathon |
Albert model | class AlbertClass(torch.nn.Module):
def __init__(self):
super(AlbertClass, self).__init__()
self.albert = transformers.AlbertModel.from_pretrained('albert-base-v2')
self.drop = torch.nn.Dropout(0.1)
self.linear = torch.nn.Linear(768, 6)
def forward(self, ids, mask, token_type_ids):
_, output= self.albert(ids, attention_mask = mask)
output = self.drop(output)
output = self.linear(output)
return output
model = AlbertClass()
model.to(device) | _____no_output_____ | MIT | albert-base/albert-baseline.ipynb | shanayghag/AV-Janatahack-Independence-Day-2020-ML-Hackathon |
Hyperparameters & Loss function | def loss_fn(outputs, targets):
return torch.nn.BCEWithLogitsLoss()(outputs, targets)
param_optimizer = list(model.named_parameters())
no_decay = ["bias", "LayerNorm.bias", "LayerNorm.weight"]
optimizer_parameters = [
{
"params": [
p for n, p in param_optimizer if not any(nd in n for nd in no_decay)
],
"weight_decay": 0.001,
},
{
"params": [
p for n, p in param_optimizer if any(nd in n for nd in no_decay)
],
"weight_decay": 0.0,
},
]
optimizer = torch.optim.AdamW(optimizer_parameters, lr=1e-5)
num_training_steps = int(len(train_dataset) / TRAIN_BATCH_SIZE * EPOCHS)
scheduler = get_linear_schedule_with_warmup(
optimizer,
num_warmup_steps = 0,
num_training_steps = num_training_steps
) | _____no_output_____ | MIT | albert-base/albert-baseline.ipynb | shanayghag/AV-Janatahack-Independence-Day-2020-ML-Hackathon |
Train & Eval Functions | def train(epoch):
model.train()
for _,data in tqdm(enumerate(training_loader, 0), total=len(training_loader)):
ids = data['ids'].to(device, dtype = torch.long)
mask = data['mask'].to(device, dtype = torch.long)
token_type_ids = data['token_type_ids'].to(device, dtype = torch.long)
targets = data['targets'].to(device, dtype = torch.float)
outputs = model(ids, mask, token_type_ids)
optimizer.zero_grad()
loss = loss_fn(outputs, targets)
if _%1000==0:
print(f'Epoch: {epoch}, Loss: {loss.item()}')
optimizer.zero_grad()
loss.backward()
optimizer.step()
scheduler.step()
def validation(epoch):
model.eval()
fin_targets=[]
fin_outputs=[]
with torch.no_grad():
for _, data in tqdm(enumerate(testing_loader, 0), total=len(testing_loader)):
ids = data['ids'].to(device, dtype = torch.long)
mask = data['mask'].to(device, dtype = torch.long)
token_type_ids = data['token_type_ids'].to(device, dtype = torch.long)
targets = data['targets'].to(device, dtype = torch.float)
outputs = model(ids, mask, token_type_ids)
fin_targets.extend(targets.cpu().detach().numpy().tolist())
fin_outputs.extend(torch.sigmoid(outputs).cpu().detach().numpy().tolist())
return fin_outputs, fin_targets | _____no_output_____ | MIT | albert-base/albert-baseline.ipynb | shanayghag/AV-Janatahack-Independence-Day-2020-ML-Hackathon |
Training Model | MODEL_PATH = "/kaggle/working/albert-multilabel-model.bin"
best_micro = 0
for epoch in range(EPOCHS):
train(epoch)
outputs, targets = validation(epoch)
outputs = np.array(outputs) >= 0.5
accuracy = metrics.accuracy_score(targets, outputs)
f1_score_micro = metrics.f1_score(targets, outputs, average='micro')
f1_score_macro = metrics.f1_score(targets, outputs, average='macro')
print(f"Accuracy Score = {accuracy}")
print(f"F1 Score (Micro) = {f1_score_micro}")
print(f"F1 Score (Macro) = {f1_score_macro}")
if f1_score_micro > best_micro:
torch.save(model.state_dict(), MODEL_PATH)
best_micro = f1_score_micro
def predict(id, abstract):
MAX_LENGTH = 512
inputs = tokenizer.encode_plus(
abstract,
None,
add_special_tokens=True,
max_length=512,
pad_to_max_length=True,
return_token_type_ids=True,
truncation = True
)
ids = inputs['input_ids']
mask = inputs['attention_mask']
token_type_ids = inputs['token_type_ids']
ids = torch.tensor(ids, dtype=torch.long).unsqueeze(0)
mask = torch.tensor(mask, dtype=torch.long).unsqueeze(0)
token_type_ids = torch.tensor(token_type_ids, dtype=torch.long).unsqueeze(0)
ids = ids.to(device)
mask = mask.to(device)
token_type_ids = token_type_ids.to(device)
with torch.no_grad():
outputs = model(ids, mask, token_type_ids)
outputs = torch.sigmoid(outputs).squeeze()
outputs = np.round(outputs.cpu().numpy())
out = np.insert(outputs, 0, id)
return out
def submit():
test_df = pd.read_csv('../input/avjantahack/data/test.csv')
sample_submission = pd.read_csv('../input/avjantahack/data/sample_submission_UVKGLZE.csv')
y = []
for id, abstract in tqdm(zip(test_df['ID'], test_df['ABSTRACT']),
total=len(test_df)):
out = predict(id, abstract)
y.append(out)
y = np.array(y)
submission = pd.DataFrame(y, columns=sample_submission.columns).astype(int)
return submission
submission = submit()
submission
submission.to_csv('/kaggle/working/alberta-tuned-lr-ws-dr.csv', index=False) | _____no_output_____ | MIT | albert-base/albert-baseline.ipynb | shanayghag/AV-Janatahack-Independence-Day-2020-ML-Hackathon |
Droplet Evaporation | import numpy as np
import matplotlib.pyplot as plt
from scipy import optimize
# Ethyl Acetate
#time_in_sec = np.array([0,5,10,15,20,25,30,35,40,45,50,55,60,65,70,75,80,85,90,95,100,105,110])
#diameter = np.array([2.79,2.697,2.573,2.542,2.573,2.48,2.449,2.449,2.387,2.356,2.263,2.232,2.201,2.139,1.82,1.426,1.178,1.085,0.992,0.496,0.403,0.372,0.11])
# Gasoline
#time_in_min = np.array([0,15,30,45,60,75,90,105,120,135,150,165,180,210,235,250,265])
#diameter = np.array([2,1.85,1.82,1.8,1.77,1.74,1.72,1.68,1.57,1.3,1.166,1.091,0.94,0.81,0.74,0.66,0.59]) | _____no_output_____ | MIT | AS2520 Propulsion Lab/Experiment 6 - Droplet Evaporation/re-work-notebook.ipynb | kirtan2605/Coursework-Codes |
Ethyl Acetate | time_in_sec = np.array([0,5,10,15,20,25,30,35,40,45,50,55,60,65,70,75,80,85,90,95,100,105,110])
diameter = np.array([2.79,2.697,2.573,2.542,2.573,2.48,2.449,2.449,2.387,2.356,2.263,2.232,2.201,2.139,1.82,1.426,1.178,1.085,0.992,0.496,0.403,0.372,0.11])
x = time_in_sec.tolist()
y = diameter.tolist()
polynomial_coeff_1=np.polyfit(x,y,1)
polynomial_coeff_2=np.polyfit(x,y,2)
polynomial_coeff_3=np.polyfit(x,y,3)
xnew=np.linspace(0,110 ,100)
ynew_1=np.poly1d(polynomial_coeff_1)
ynew_2=np.poly1d(polynomial_coeff_2)
ynew_3=np.poly1d(polynomial_coeff_3)
plt.plot(x,y,'o')
plt.plot(xnew,ynew_1(xnew))
plt.plot(xnew,ynew_2(xnew))
plt.plot(xnew,ynew_3(xnew))
print(ynew_1)
print(ynew_2)
print(ynew_3)
plt.title("Diameter vs Time(s)")
plt.xlabel("Time(s)")
plt.ylabel("Diameter")
plt.show()
# Coeficients
# LINEAR : -0.02386 x + 3.139
# QUADRATIC : -0.0002702 x^2 + 0.005868 x + 2.619
# CUBIC : -4.771e-07 x^3 - 0.0001915 x^2 + 0.002481 x + 2.646
#
# Using Desmos to find the roots of the best fit polynomials
# Root of linear fit = 131.559
# Root of quadratic fit = 109.908
# Root of cubic fit = 109.414
def d_square_law(x, C, n):
y = C/(x**n)
return y | _____no_output_____ | MIT | AS2520 Propulsion Lab/Experiment 6 - Droplet Evaporation/re-work-notebook.ipynb | kirtan2605/Coursework-Codes |
Linear Fit | # Calculating time taken for vaporization for different diameters. (LINEAR FIT)
diameter = np.array([2.79,2.697,2.573,2.542,2.573,2.48,2.449,2.449,2.387,2.356,2.263,2.232,2.201,2.139,1.82,1.426,1.178,1.085,0.992,0.496,0.403,0.372,0.11])
time_in_sec = np.array([0,5,10,15,20,25,30,35,40,45,50,55,60,65,70,75,80,85,90,95,100,105,110])
t_vap = time_in_sec
t_vap = t_vap*0
t_vap = t_vap + 131.559
t_vap = t_vap - time_in_sec
print(t_vap.tolist())
# Finding C and n for d-square law
#initial_diameter = np.array([2.79,2.697,2.573,2.542,2.573,2.48,2.449,2.449,2.387,2.356,2.263,2.232,2.201,2.139,1.82,1.426,1.178,1.085,0.992,0.496,0.403,0.372,0.11])
#vap_time = np.array([109.908, 104.908, 99.908, 94.908, 89.908, 84.908, 79.908, 74.908, 69.908, 64.908, 59.908, 54.908, 49.908, 44.908, 39.908, 34.908, 29.908, 24.908, 19.908, 14.908000000000001, 9.908000000000001, 4.908000000000001, -0.09199999999999875])
# Linear
initial_diameter = np.array([2.79,2.697,2.573,2.542,2.573,2.48,2.449,2.449,2.387,2.356,2.263,2.232,2.201,2.139,1.82,1.426,1.178,1.085,0.992,0.496,0.403,0.372,0.11])
vap_time_lin = np.array([131.559, 126.559, 121.559, 116.559, 111.559, 106.559, 101.559, 96.559, 91.559, 86.559, 81.559, 76.559, 71.559, 66.559, 61.559, 56.559, 51.559, 46.559, 41.559, 36.559, 31.558999999999997, 26.558999999999997, 21.558999999999997])
# Linear
parameters_lin = optimize.curve_fit(d_square_law, xdata = initial_diameter, ydata = vap_time_lin)[0]
print("Linear : ",parameters_lin)
#C = parameters_lin[0]
#n = parameters_lin[1] | _____no_output_____ | MIT | AS2520 Propulsion Lab/Experiment 6 - Droplet Evaporation/re-work-notebook.ipynb | kirtan2605/Coursework-Codes |
Quadratic Fit | # Calculating time taken for vaporization for different diameters. (QUADRATIC FIT)
diameter = np.array([2.79,2.697,2.573,2.542,2.573,2.48,2.449,2.449,2.387,2.356,2.263,2.232,2.201,2.139,1.82,1.426,1.178,1.085,0.992,0.496,0.403,0.372,0.11])
time_in_sec = np.array([0,5,10,15,20,25,30,35,40,45,50,55,60,65,70,75,80,85,90,95,100,105,110])
t_vap = time_in_sec
t_vap = t_vap*0
t_vap = t_vap + 109.908
t_vap = t_vap - time_in_sec
print(t_vap.tolist())
# Quadratic Fit
initial_diameter = np.array([2.79,2.697,2.573,2.542,2.573,2.48,2.449,2.449,2.387,2.356,2.263,2.232,2.201,2.139,1.82,1.426,1.178,1.085,0.992,0.496,0.403,0.372])
vap_time_quad = np.array([109.908, 104.908, 99.908, 94.908, 89.908, 84.908, 79.908, 74.908, 69.908, 64.908, 59.908, 54.908, 49.908, 44.908, 39.908, 34.908, 29.908, 24.908, 19.908, 14.908000000000001, 9.908000000000001, 4.908000000000001])
# Quadratic
parameters_quad = optimize.curve_fit(d_square_law, xdata = initial_diameter, ydata = vap_time_quad)[0]
print("Linear : ",parameters_quad)
#C = parameters_lin[0]
#n = parameters_lin[1] | _____no_output_____ | MIT | AS2520 Propulsion Lab/Experiment 6 - Droplet Evaporation/re-work-notebook.ipynb | kirtan2605/Coursework-Codes |
Ethyl Acetate - After finding d-square Law | # Linear
C = 41.72856231
n = -0.97941652
# Quadratic
# C = 11.6827828
# n = -2.13925924
x = vap_time.tolist()
y = initial_diameter.tolist()
ynew=np.linspace(0,3 ,100)
xnew=[]
for item in ynew:
v1 = C/(item**n)
xnew.append(v1)
plt.plot(x,y,'o')
plt.plot(xnew,ynew)
plt.title("Initial Diameter vs Vaporization Time(s)")
plt.xlabel("Vaporization Time(s)")
plt.ylabel("Initial Diameter")
plt.show() | _____no_output_____ | MIT | AS2520 Propulsion Lab/Experiment 6 - Droplet Evaporation/re-work-notebook.ipynb | kirtan2605/Coursework-Codes |
Gasoline | time_in_min = np.array([0,15,30,45,60,75,90,105,120,135,150,165,180,210,235,250,265])
diameter = np.array([2,1.85,1.82,1.8,1.77,1.74,1.72,1.68,1.57,1.3,1.166,1.091,0.94,0.81,0.74,0.66,0.59])
x = time_in_min.tolist()
y = diameter.tolist()
polynomial_coeff_1=np.polyfit(x,y,1)
polynomial_coeff_2=np.polyfit(x,y,2)
polynomial_coeff_3=np.polyfit(x,y,3)
xnew=np.linspace(0,300 ,100)
ynew_1=np.poly1d(polynomial_coeff_1)
ynew_2=np.poly1d(polynomial_coeff_2)
ynew_3=np.poly1d(polynomial_coeff_3)
plt.plot(x,y,'o')
plt.plot(xnew,ynew_1(xnew))
plt.plot(xnew,ynew_2(xnew))
plt.plot(xnew,ynew_3(xnew))
print(ynew_1)
print(ynew_2)
print(ynew_3)
plt.title("Diameter vs Time(min)")
plt.xlabel("Time(min)")
plt.ylabel("Diameter")
plt.show()
# Coeficients
# LINEAR : -0.005637 x + 2.074
# QUADRATIC : -6.67e-06 x^2 - 0.003865 x + 2
# CUBIC : 1.481e-07 x^3 - 6.531e-05 x^2 + 0.00207 x + 1.891
#
# Using Desmos to find the roots of the best fit polynomials
# Root of linear fit = 367.926
# Root of quadratic fit = 329.781
# Root of cubic fit = No Positive Root | _____no_output_____ | MIT | AS2520 Propulsion Lab/Experiment 6 - Droplet Evaporation/re-work-notebook.ipynb | kirtan2605/Coursework-Codes |
Linear Fit | # Calculating time taken for vaporization for different diameters. (LINEAR FIT)
time_in_min = np.array([0,15,30,45,60,75,90,105,120,135,150,165,180,210,235,250,265])
diameter = np.array([2,1.85,1.82,1.8,1.77,1.74,1.72,1.68,1.57,1.3,1.166,1.091,0.94,0.81,0.74,0.66,0.59])
t_vap = time_in_min
t_vap = t_vap*0
t_vap = t_vap + 367.926
t_vap = t_vap - time_in_min
print(t_vap.tolist())
initial_diameter_g_lin = np.array([2,1.85,1.82,1.8,1.77,1.74,1.72,1.68,1.57,1.3,1.166,1.091,0.94,0.81,0.74,0.66,0.59])
vap_time_g_lin = np.array([367.926, 352.926, 337.926, 322.926, 307.926, 292.926, 277.926, 262.926, 247.926, 232.926, 217.926, 202.926, 187.926, 157.926, 132.926, 117.92599999999999, 102.92599999999999])
parameters_g_lin = optimize.curve_fit(d_square_law, xdata = initial_diameter_g_lin, ydata = vap_time_g_lin)[0]
print(parameters_g_lin)
C_g = parameters_g_lin[0]
n_g = parameters_g_lin[1] | _____no_output_____ | MIT | AS2520 Propulsion Lab/Experiment 6 - Droplet Evaporation/re-work-notebook.ipynb | kirtan2605/Coursework-Codes |
Quadratic Fit | # Calculating time taken for vaporization for different diameters.
time_in_min = np.array([0,15,30,45,60,75,90,105,120,135,150,165,180,210,235,250,265])
diameter = np.array([2,1.85,1.82,1.8,1.77,1.74,1.72,1.68,1.57,1.3,1.166,1.091,0.94,0.81,0.74,0.66,0.59])
t_vap = time_in_min
t_vap = t_vap*0
t_vap = t_vap + 329.781
t_vap = t_vap - time_in_min
print(t_vap.tolist())
initial_diameter_g_quad = np.array([2,1.85,1.82,1.8,1.77,1.74,1.72,1.68,1.57,1.3,1.166,1.091,0.94,0.81,0.74,0.66,0.59])
vap_time_g_quad = np.array([329.781, 314.781, 299.781, 284.781, 269.781, 254.781, 239.781, 224.781, 209.781, 194.781, 179.781, 164.781, 149.781, 119.781, 94.781, 79.781, 64.781])
parameters_g_quad = optimize.curve_fit(d_square_law, xdata = initial_diameter_g_quad, ydata = vap_time_g_quad)[0]
print(parameters_g_quad)
C_g = parameters_g_quad[0]
n_g = parameters_g_quad[1] | _____no_output_____ | MIT | AS2520 Propulsion Lab/Experiment 6 - Droplet Evaporation/re-work-notebook.ipynb | kirtan2605/Coursework-Codes |
Gasoline - After finding Vaporization Time Data | #Linear
C_g = 140.10666889
n_g = -1.1686059
# Quadratic
C_g = 140.10666889
n_g = -1.1686059
x_g = vap_time_g.tolist()
y_g = initial_diameter_g.tolist()
ynew_g=np.linspace(0,2.2 ,100)
xnew_g=[]
for item in ynew_g:
v1 = C_g/(item**n_g)
xnew_g.append(v1)
print(ynew_g)
print(xnew_g)
plt.plot(x_g,y_g,'o')
plt.plot(xnew_g,ynew_g)
plt.title("Initial Diameter vs Vaporization Time(min)")
plt.xlabel("Vaporization Time(min)")
plt.ylabel("Initial Diameter")
plt.show() | _____no_output_____ | MIT | AS2520 Propulsion Lab/Experiment 6 - Droplet Evaporation/re-work-notebook.ipynb | kirtan2605/Coursework-Codes |
Optimization Methods (IGNORE) | import numpy as np
from scipy import optimize
import matplotlib.pyplot as plt
plt.style.use('seaborn-poster')
time_in_sec = np.array([5,10,15,20,25,30,35,40,45,50,55,60,65,70,75,80,85,90,95,100,105,110])
diameter = np.array([2.697,2.573,2.542,2.573,2.48,2.449,2.449,2.387,2.356,2.263,2.232,2.201,2.139,1.82,1.426,1.178,1.085,0.992,0.496,0.403,0.372,0.11])
def func(x, a, b):
y = a/(x**b)
return y
parameters = optimize.curve_fit(func, xdata = time_in_sec, ydata = diameter)[0]
print(parameters)
C = parameters[0]
n = parameters[1]
plt.plot(time_in_sec,diameter,'o',label='data')
y_new = []
for val in time_in_sec:
v1 = C/(val**n)
y_new.append(v1)
plt.plot(time_in_sec,y_new,'-',label='fit')
log_time = np.log(time_in_min)
log_d = np.log(diameter)
print(log_d)
print(log_time)
x = log_time.tolist()
y = log_d.tolist()
polynomial_coeff=np.polyfit(x,y,1)
xnew=np.linspace(2.5,6,100)
ynew=np.poly1d(polynomial_coeff)
plt.plot(xnew,ynew(xnew),x,y,'o')
print(ynew)
plt.title("log(diameter) vs log(Time(s))")
plt.xlabel("log(Time(s))")
plt.ylabel("log(diameter)")
plt.show() | _____no_output_____ | MIT | AS2520 Propulsion Lab/Experiment 6 - Droplet Evaporation/re-work-notebook.ipynb | kirtan2605/Coursework-Codes |
NLP with Bert for Sentiment Analysis Importing Libraries | !pip3 install ktrain
import os.path
import numpy as np
import pandas as pd
import tensorflow as tf
import ktrain
from ktrain import text | _____no_output_____ | Unlicense | Special. NLP_with_BERT.ipynb | Samrath49/AI_ML_DL |
Part 1: Data Preprocessing Loading the IMDB dataset | dataset = tf.keras.utils.get_file(fname = "aclImdb_v1.tar",
origin = "https://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar",
extract = True)
IMDB_DATADIR = os.path.join(os.path.dirname(dataset), 'aclImdb')
print(os.path.dirname(dataset))
print(IMDB_DATADIR) | /root/.keras/datasets
/root/.keras/datasets/aclImdb
| Unlicense | Special. NLP_with_BERT.ipynb | Samrath49/AI_ML_DL |
Creating the training & test sets | (X_train, y_train), (X_test, y_test), preproc = text.texts_from_folder(datadir = IMDB_DATADIR,
classes = ['pos','neg'],
maxlen = 500,
train_test_names = ['train', 'test'],
preprocess_mode = 'bert') | detected encoding: utf-8
downloading pretrained BERT model (uncased_L-12_H-768_A-12.zip)...
[██████████████████████████████████████████████████]
extracting pretrained BERT model...
done.
cleanup downloaded zip...
done.
preprocessing train...
language: en
| Unlicense | Special. NLP_with_BERT.ipynb | Samrath49/AI_ML_DL |
Part 2: Building the BERT model | model = text.text_classifier(name = 'bert',
train_data = (X_train, y_train),
preproc = preproc) | Is Multi-Label? False
maxlen is 500
done.
| Unlicense | Special. NLP_with_BERT.ipynb | Samrath49/AI_ML_DL |
Part 3: Training the BERT model | learner = ktrain.get_learner(model = model,
train_data = (X_train, y_train),
val_data = (X_test, y_test),
batch_size = 6)
learner.fit_onecycle(lr=2e-5,
epochs = 1) |
begin training using onecycle policy with max lr of 2e-05...
4167/4167 [==============================] - 3436s 820ms/step - loss: 0.3313 - accuracy: 0.8479 - val_loss: 0.1619 - val_accuracy: 0.9383
| Unlicense | Special. NLP_with_BERT.ipynb | Samrath49/AI_ML_DL |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.