bagging_model
=
BaggingClassifier(base_estimator
=
RandomForestClassifier(), n_estimators
=
10
)
bagging_model.fit(X_train, y_train)
bagging_predictions
=
bagging_model.predict(X_test)
adaboost_model
=
AdaBoostClassifier(base_estimator
=
DecisionTreeClassifier(), n_estimators
=
50
)
adaboost_model.fit(X_train, y_train)
adaboost_predictions
=
adaboost_model.predict(X_test)
gradient_boost_model
=
GradientBoostingClassifier(n_estimators
=
100
, learning_rate
=
1.0
, max_depth
=
1
, random_state
=
42
)
gradient_boost_model.fit(X_train, y_train)
gradient_boost_predictions
=
gradient_boost_model.predict(X_test)
xgboost_model
=
XGBClassifier(n_estimators
=
100
, learning_rate
=
0.1
, max_depth
=
3
, random_state
=
42
)
xgboost_model.fit(X_train, y_train)
xgboost_predictions
=
xgboost_model.predict(X_test)
base_models
=
[(
'rf'
, RandomForestClassifier()), (
'svc'
, SVC()), (
'lr'
, LogisticRegression())]
stacking_model
=
StackingClassifier(estimators
=
base_models, final_estimator
=
LogisticRegression())
stacking_model.fit(X_train, y_train)
stacking_predictions
=
stacking_model.predict(X_test)
dropout_model
=
Sequential([
Dense(
128
, input_dim
=
4
, activation
=
'relu'
),
Dropout(
0.5
),
Dense(
64
, activation
=
'relu'
),
Dense(
3
, activation
=
'softmax'
)
])
dropout_model.
compile
(optimizer
=
'adam'
, loss
=
'sparse_categorical_crossentropy'
, metrics
=
[
'accuracy'
])
dropout_model.fit(X_train, y_train, epochs
=
50
, batch_size
=
32
, verbose
=
0
)
_, dropout_accuracy
=
dropout_model.evaluate(X_test, y_test)
voting_model
=
VotingClassifier(estimators
=
base_models, voting
=
'hard'
)
voting_model.fit(X_train, y_train)
voting_predictions
=
voting_model.predict(X_test)
svm_model
=
SVC()
svm_model.fit(X_train, y_train)
svm_predictions
=
svm_model.predict(X_test)
dt_model
=
DecisionTreeClassifier()
dt_model.fit(X_train, y_train)
dt_predictions
=
dt_model.predict(X_test)