diff --git a/02_activities/assignments/assignment_2.ipynb b/02_activities/assignments/assignment_2.ipynb index cae78bbbe..5008f2cdd 100644 --- a/02_activities/assignments/assignment_2.ipynb +++ b/02_activities/assignments/assignment_2.ipynb @@ -403,10 +403,34 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 53, "metadata": {}, - "outputs": [], - "source": [] + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Test Performance Metrics:\n", + "{'neg_log_loss': 0.3248655516773495, 'roc_auc': np.float64(0.9101514010597728), 'accuracy': 0.8592486436687481, 'balanced_accuracy': np.float64(0.7513226869137615)}\n" + ] + } + ], + "source": [ + "model_pipeline.fit(X_train, Y_train)\n", + "Y_pred_proba = model_pipeline.predict_proba(X_test)\n", + "\n", + "# Calculating the required metrics\n", + "test_metrics = {\n", + " 'neg_log_loss': log_loss(Y_test, Y_pred_proba),\n", + " 'roc_auc': roc_auc_score(Y_test, Y_pred_proba[:, 1]),\n", + " 'accuracy': accuracy_score(Y_test, model_pipeline.predict(X_test)),\n", + " 'balanced_accuracy': balanced_accuracy_score(Y_test, model_pipeline.predict(X_test))\n", + "}\n", + "\n", + "# Displaying the test metrics as a dictionary\n", + "print(\"Test Performance Metrics:\")\n", + "print(test_metrics)" + ] }, { "cell_type": "markdown",