rr commited on
Commit
eca1edf
·
1 Parent(s): 937f294

Delete jijifi.txt

Browse files
Files changed (1) hide show
  1. jijifi.txt +0 -247
jijifi.txt DELETED
@@ -1,247 +0,0 @@
1
- # K Means Clustering w elbow plot
2
- #importing libraries
3
- from sklearn.cluster import KMeans
4
- import pandas as pd
5
- from sklearn.preprocessing import MinMaxScaler
6
- import matplotlib.pyplot as plt
7
- #import dataset
8
- df = pd.read_csv("C:datasets\.csv")
9
- df.head()
10
- plt.scatter(df.Age, df.Income)
11
- plt.xlabel('Age')
12
- plt.ylabel('Income')
13
- scaler = MinMaxScaler()
14
- scaler.fit(df[['Income']])
15
- df['Income'] = scaler.transform(df[['Income']])
16
-
17
- scaler.fit(df[['Age']])
18
- df['Age'] = scaler.transform(df[['Age']])
19
-
20
- #Elbow plot
21
- sse = []
22
- k_rng = range(1, 10)
23
- for k in k_rng:
24
- km = KMeans(n_clusters = k)
25
- km.fit(df[['Age', 'Income']])
26
- sse.append(km.inertia_)
27
- plt.xlabel('K')
28
- plt.ylabel('Sum of squared error')
29
- plt.plot(k_rng,sse)
30
- km = KMeans(n_clusters = 3)
31
- y_predicted = km.fit_predict(df[['Age', 'Income']])
32
- y_predicted
33
- km.cluster_centers_
34
- df['cluster']=y_predicted
35
- df.head()
36
- df1 = df[df.cluster == 0]
37
- df2 = df[df.cluster == 1]
38
- df3 = df[df.cluster == 2]
39
-
40
- plt.scatter(df1.Age, df1.Income, color = 'black')
41
- plt.scatter(df2.Age, df2.Income, color = 'orange')
42
- plt.scatter(df3.Age, df3.Income, color = 'maroon')
43
- plt.scatter(km.cluster_centers_[:,0], km.cluster_centers_[:,1], color = 'purple', marker = '*', label = 'centroid')
44
- plt.xlabel('Age')
45
- plt.ylabel('Income in $')
46
-
47
- #Association Rule Mining
48
- #!pip install --no-binary :all: mlxtend
49
- import pandas as pd
50
- from mlxtend.preprocessing import TransactionEncoder
51
- from mlxtend.frequent_patterns import apriori
52
- dataset = [["Milk", "Cola", "Beer"],
53
- ["Milk", "Pepsi", "Juice"],
54
- ["Milk", "Beer"],
55
- ["Cola", "Juice"],
56
- ["Milk", "Pepsi", "Beer"]]
57
- dataset
58
- var = TransactionEncoder()
59
- var_arr = var.fit(dataset).transform(dataset)
60
- df = pd.DataFrame(var_arr, columns = var.columns_)
61
- df
62
- #from mlxtend.frequent_patterns import apriori
63
- fq_is = apriori(df, min_support = 0.5, use_colnames = True)
64
- fq_is
65
- from mlxtend.frequent_patterns import association_rules
66
- rules_ap = association_rules(fq_is, metric = "confidence", min_threshold = 0.01)
67
- rules_ap
68
-
69
- ##Decision Tree
70
-
71
- import pandas as pd
72
- from sklearn.preprocessing import LabelEncoder
73
- from sklearn import tree
74
- df = pd.read_csv('C:/Users/datasets/lung cancer.csv')
75
- df.head()
76
- df.isnull().any()
77
- le_GENDER = LabelEncoder()
78
- le_LUNG_CANCER = LabelEncoder()
79
-
80
- df['Gender'] = le_GENDER.fit_transform(df['GENDER'])
81
- df['LungCancer'] = le_LUNG_CANCER.fit_transform(df['LUNG_CANCER'])
82
-
83
- df.head()
84
- df = df.drop(['GENDER','LUNG_CANCER'], axis = 1)
85
- df.head()
86
- inputs = df.drop('LungCancer', axis = 1)
87
- target = df['LungCancer']
88
- inputs.head()
89
- print(target)
90
- from sklearn import tree
91
- model = tree.DecisionTreeClassifier()
92
- model.fit(inputs, target)
93
- model.score(inputs, target)
94
- print(model.predict([[63,1,2,2,1,2,1,1,2,1,2,1,1,2,2]]))
95
- tree.plot_tree(model)
96
-
97
- ##SVM bank loan
98
-
99
- # libraries
100
- import pandas as pd
101
- import numpy as np
102
- import matplotlib.pyplot as plt
103
- import seaborn
104
- import sklearn
105
- from sklearn import model_selection
106
- from sklearn.model_selection import train_test_split
107
- from sklearn import svm
108
- from sklearn.svm import SVC
109
- from sklearn import metrics
110
- from sklearn.metrics import confusion_matrix
111
- from sklearn.metrics import classification_report
112
- from sklearn.metrics import roc_curve, auc
113
- bl = pd.read_csv("C:/fifi.csv")
114
- bl = pd.DataFrame(bl)
115
- bl_data = bl.drop(['address','ed','debtinc','employ'],1)
116
- bl_data.head(3)
117
- x = bl_data.drop('default', axis = 1)
118
- y = bl_data.default
119
- x.head()
120
- y.head()
121
- x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.4, random_state=0)
122
- # building an SVM having penalty 10 and gamma auto optimized
123
- svct = SVC(kernel = 'linear',
124
- C = 10, gamma = 'auto',
125
- probability = True).fit(x_train, y_train)
126
-
127
- print(svct)
128
- y_pred = svct.predict(x_test)
129
- y_pred
130
- cnf = confusion_matrix(y_test, y_pred)
131
- cnf
132
- # ROC Curve with AUC Plotting
133
- # finding probabilities for 0 and 1 classses
134
- preds1 = svct.predict_proba(x_test)[:,1]
135
- print(preds1)
136
- from sklearn.metrics import roc_curve, auc
137
- fpr1, tpr1, thresholds1 = metrics.roc_curve(y_test, preds1)
138
- fpr1
139
- tpr1
140
- thresholds1
141
- df1 = pd.DataFrame(dict(fpr = fpr1, tpr = tpr1))
142
- auc = metrics.auc(fpr1, tpr1)
143
- # plotting
144
- plt.figure()
145
- plt.plot(fpr1, tpr1, color = 'darkorange', label = 'ROC CURVE(area = %0.2f)' % auc)
146
- plt.plot([0,1], [0,1], color = 'navy')
147
- plt.xlim([0.0, 1.0])
148
- plt.ylim([0.0, 1.05])
149
- plt.xlabel('False Positive Rate')
150
- plt.ylabel('True Positive Rate')
151
- plt.title('ROC example')
152
- plt.legend(loc = 'lower right')
153
- plt.show()
154
-
155
- ##SVM Iris
156
- from sklearn import svm
157
- import pandas as pd
158
- df = pd.read_csv("C:datasets/iris_df.csv")
159
- df.head(5)
160
- df.columns = ['X4','X3','X1','X2','Y']
161
- df = df.drop(['X4','X3'],1)
162
- df.head()
163
- from sklearn.model_selection import train_test_split
164
- from sklearn import svm
165
- x = df.drop("Y", axis = 1)
166
- y = df.Y
167
-
168
- support = svm.SVC()
169
-
170
-
171
- trainx, testx, trainy, testy = train_test_split(x,y, test_size = 0.3)
172
- support.fit(trainx, trainy)
173
- print('Accuracy:/n', support.score(testx, testy))
174
-
175
- pred = support.predict(testx)
176
- import pandas as pd
177
- import numpy as np
178
- from sklearn import linear_model
179
- import matplotlib.pyplot as plt
180
- df = pd.read_csv("C:datasets/house_price.csv")
181
- df.head()
182
- plt.xlabel('area')
183
- plt.ylabel('price')
184
- plt.scatter(df.area,df.price,color = 'blue')
185
- reg = linear_model.LinearRegression()
186
- reg.fit(df[['area']], df.price)
187
- # prediction
188
- reg.predict([[3100]])
189
- reg.coef_
190
- reg.intercept_
191
- plt.xlabel('area')
192
- plt.ylabel('price')
193
- plt.scatter(df.area,df.price,color='green')
194
- plt.plot(df.area,reg.predict(df[['area']]), color = 'red')
195
-
196
- ##Linear Regression Multiple Variable
197
- # pandas numpy
198
- from sklearn import linear_model
199
- df = pd.read_csv("C:/house_pricemodel.csv")
200
- df.head()
201
- df.isnull()
202
- df.bedrooms.median()
203
- df.bedrooms = df.bedrooms.fillna(df.bedrooms.median())
204
- df
205
- reg = linear_model.LinearRegression()
206
- reg.fit(df.drop('price', axis = 1), df.price)
207
- reg.coef_
208
- reg.predict([[3200,3,10]])
209
- ##SImple Logistic Regression
210
- # pandas, matplotlib
211
- from sklearn.model_selection import train_test_split
212
- from sklearn.linear_model import LogisticRegression
213
- df = pd.read_csv("C:/Users//Datasets/insurance_age.csv")
214
- df.head()
215
- plt.scatter(df.age,df.bought_insurance, marker ='*', color = 'purple')
216
- x_train, x_test, y_train, y_test = train_test_split(df[['age']], df.bought_insurance, train_size=0.8)
217
- model = LogisticRegression()
218
- model.fit(x_train, y_train)
219
- y_predicted = model.predict(x_test)
220
- model.predict_proba(x_test)
221
- model.score(x_test, y_test)
222
- y_predicted
223
- model.predict([[45]])
224
- ##Gradient descent simple
225
- import numpy as np
226
- import matplotlib.pyplot as plt
227
- def gradient_descent(x,y):
228
- m_curr = b_curr = 0
229
- iterations = 10000
230
- n = len(x) # length of x
231
- learning_rate = 0.001
232
- #learning rate = .00001
233
-
234
-
235
-
236
- for i in range(iterations):
237
- y_predicted = m_curr * x + b_curr
238
- cost = (1/n) * sum([val**2 for val in (y-y_predicted)])
239
- md = -(2/n)*sum(x*(y-y_predicted)) # m derivative
240
- bd = -(2/n)*sum(y-y_predicted) # b derivative
241
- m_curr = m_curr - learning_rate * md
242
- b_curr = b_curr - learning_rate * bd
243
- #print ("m {}, b {}, iteration {}".format(m_curr,b_curr, i))
244
- print ("m {}, b {}, cost {} iteration {}".format(m_curr,b_curr,cost, i)) # each iteration to print the values
245
- x = np.array([1,2,3,4,5])
246
- y = np.array([5,7,9,11,13])
247
- gradient_descent(x,y)