forked from sundareswarp/CHDProject
-
Notifications
You must be signed in to change notification settings - Fork 0
/
CHD.py
75 lines (58 loc) · 2.49 KB
/
CHD.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
import pandas as pd
import numpy as np
dataset = pd.read_csv("framingham_heart_disease.csv")
#dataset = dataset.dropna()
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import SelectPercentile, f_classif
X = dataset.iloc[:,:15]
Y = dataset.iloc[:,15:16]
X = X.drop(columns = ['education', "currentSmoker"])
from sklearn.impute import SimpleImputer
imputer = SimpleImputer(strategy = 'most_frequent')
X.iloc[:,1:14] = imputer.fit_transform(X.iloc[:,1:14])
'''Feature Selection 1'''
bestfeatures = SelectKBest(score_func=f_classif, k='all')
fit = bestfeatures.fit(X,Y)
scores1 = pd.DataFrame(fit.scores_)
scores1.to_csv('scorereport1.csv',index=False)
'''Feature Selection 2'''
from sklearn.ensemble import ExtraTreesClassifier
model = ExtraTreesClassifier()
model.fit(X,Y)
scores2 = model.feature_importances_
scores2 = pd.DataFrame(scores2)
scores2.to_csv('fs2.csv',index=False)
'''Feature Selection 3'''
import sklearn.feature_selection
f=sklearn.feature_selection.mutual_info_classif(X, Y, discrete_features='auto', n_neighbors=3, copy=True, random_state=None)
scores3 =pd.DataFrame(f)
scores3.to_csv('fs3.csv',index=False)
#X = X.drop(columns = [ 'education', 'heartRate', 'cigsPerDay'])
from sklearn.model_selection import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.2, random_state = 0)
'''
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.fit_transform(X_test)
'''
from sklearn.linear_model import LogisticRegression
classifier = LogisticRegression(random_state = 0)
classifier.fit(X_train, Y_train)
from sklearn.ensemble import RandomForestClassifier
classifier = RandomForestClassifier(random_state = 0)
classifier.fit(X_train, Y_train)
from sklearn.tree import DecisionTreeClassifier
classifier = DecisionTreeClassifier(random_state = 0)
classifier.fit(X_train, Y_train)
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report,f1_score
# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
acc_train=accuracy_score(Y_train,classifier.predict(X_train))
cm_train = confusion_matrix(Y_train,classifier.predict(X_train))
print(classification_report(Y_train,classifier.predict(X_train)))
Y_pred = classifier.predict(X_test)
cm_test = confusion_matrix(Y_test, Y_pred)
acc_test=accuracy_score(Y_test, Y_pred)
print(classification_report(Y_test, Y_pred))