{ "cells": [ { "cell_type": "code", "execution_count": 1, "metadata": { "_uuid": "a2ef2622d9d982f44f73097c44a0618969909c4c", "execution": { "iopub.execute_input": "2023-02-14T00:29:11.369209Z", "iopub.status.busy": "2023-02-14T00:29:11.368879Z", "iopub.status.idle": "2023-02-14T00:29:14.248926Z", "shell.execute_reply": "2023-02-14T00:29:14.247959Z", "shell.execute_reply.started": "2023-02-14T00:29:11.369151Z" } }, "outputs": [ { "data": { "application/javascript": [ "(function(root) {\n", " function now() {\n", " return new Date();\n", " }\n", "\n", " var force = true;\n", " var py_version = '3.4.3'.replace('rc', '-rc.').replace('.dev', '-dev.');\n", " var reloading = false;\n", " var Bokeh = root.Bokeh;\n", "\n", " if (typeof (root._bokeh_timeout) === \"undefined\" || force) {\n", " root._bokeh_timeout = Date.now() + 5000;\n", " root._bokeh_failed_load = false;\n", " }\n", "\n", " function run_callbacks() {\n", " try {\n", " root._bokeh_onload_callbacks.forEach(function(callback) {\n", " if (callback != null)\n", " callback();\n", " });\n", " } finally {\n", " delete root._bokeh_onload_callbacks;\n", " }\n", " console.debug(\"Bokeh: all callbacks have finished\");\n", " }\n", "\n", " function load_libs(css_urls, js_urls, js_modules, js_exports, callback) {\n", " if (css_urls == null) css_urls = [];\n", " if (js_urls == null) js_urls = [];\n", " if (js_modules == null) js_modules = [];\n", " if (js_exports == null) js_exports = {};\n", "\n", " root._bokeh_onload_callbacks.push(callback);\n", "\n", " if (root._bokeh_is_loading > 0) {\n", " console.debug(\"Bokeh: BokehJS is being loaded, scheduling callback at\", now());\n", " return null;\n", " }\n", " if (js_urls.length === 0 && js_modules.length === 0 && Object.keys(js_exports).length === 0) {\n", " run_callbacks();\n", " return null;\n", " }\n", " if (!reloading) {\n", " console.debug(\"Bokeh: BokehJS not loaded, scheduling load and callback at\", now());\n", " }\n", "\n", " function on_load() {\n", " root._bokeh_is_loading--;\n", " if (root._bokeh_is_loading === 0) {\n", " console.debug(\"Bokeh: all BokehJS libraries/stylesheets loaded\");\n", " run_callbacks()\n", " }\n", " }\n", " window._bokeh_on_load = on_load\n", "\n", " function on_error() {\n", " console.error(\"failed to load \" + url);\n", " }\n", "\n", " var skip = [];\n", " if (window.requirejs) {\n", " window.requirejs.config({'packages': {}, 'paths': {}, 'shim': {}});\n", " root._bokeh_is_loading = css_urls.length + 0;\n", " } else {\n", " root._bokeh_is_loading = css_urls.length + js_urls.length + js_modules.length + Object.keys(js_exports).length;\n", " }\n", "\n", " var existing_stylesheets = []\n", " var links = document.getElementsByTagName('link')\n", " for (var i = 0; i < links.length; i++) {\n", " var link = links[i]\n", " if (link.href != null) {\n", "\texisting_stylesheets.push(link.href)\n", " }\n", " }\n", " for (var i = 0; i < css_urls.length; i++) {\n", " var url = css_urls[i];\n", " if (existing_stylesheets.indexOf(url) !== -1) {\n", "\ton_load()\n", "\tcontinue;\n", " }\n", " const element = document.createElement(\"link\");\n", " element.onload = on_load;\n", " element.onerror = on_error;\n", " element.rel = \"stylesheet\";\n", " element.type = \"text/css\";\n", " element.href = url;\n", " console.debug(\"Bokeh: injecting link tag for BokehJS stylesheet: \", url);\n", " document.body.appendChild(element);\n", " } var existing_scripts = []\n", " var scripts = document.getElementsByTagName('script')\n", " for (var i = 0; i < scripts.length; i++) {\n", " var script = scripts[i]\n", " if (script.src != null) {\n", "\texisting_scripts.push(script.src)\n", " }\n", " }\n", " for (var i = 0; i < js_urls.length; i++) {\n", " var url = js_urls[i];\n", " if (skip.indexOf(url) !== -1 || existing_scripts.indexOf(url) !== -1) {\n", "\tif (!window.requirejs) {\n", "\t on_load();\n", "\t}\n", "\tcontinue;\n", " }\n", " var element = document.createElement('script');\n", " element.onload = on_load;\n", " element.onerror = on_error;\n", " element.async = false;\n", " element.src = url;\n", " console.debug(\"Bokeh: injecting script tag for BokehJS library: \", url);\n", " document.head.appendChild(element);\n", " }\n", " for (var i = 0; i < js_modules.length; i++) {\n", " var url = js_modules[i];\n", " if (skip.indexOf(url) !== -1 || existing_scripts.indexOf(url) !== -1) {\n", "\tif (!window.requirejs) {\n", "\t on_load();\n", "\t}\n", "\tcontinue;\n", " }\n", " var element = document.createElement('script');\n", " element.onload = on_load;\n", " element.onerror = on_error;\n", " element.async = false;\n", " element.src = url;\n", " element.type = \"module\";\n", " console.debug(\"Bokeh: injecting script tag for BokehJS library: \", url);\n", " document.head.appendChild(element);\n", " }\n", " for (const name in js_exports) {\n", " var url = js_exports[name];\n", " if (skip.indexOf(url) >= 0 || root[name] != null) {\n", "\tif (!window.requirejs) {\n", "\t on_load();\n", "\t}\n", "\tcontinue;\n", " }\n", " var element = document.createElement('script');\n", " element.onerror = on_error;\n", " element.async = false;\n", " element.type = \"module\";\n", " console.debug(\"Bokeh: injecting script tag for BokehJS library: \", url);\n", " element.textContent = `\n", " import ${name} from \"${url}\"\n", " window.${name} = ${name}\n", " window._bokeh_on_load()\n", " `\n", " document.head.appendChild(element);\n", " }\n", " if (!js_urls.length && !js_modules.length) {\n", " on_load()\n", " }\n", " };\n", "\n", " function inject_raw_css(css) {\n", " const element = document.createElement(\"style\");\n", " element.appendChild(document.createTextNode(css));\n", " document.body.appendChild(element);\n", " }\n", "\n", " var js_urls = [\"https://cdn.bokeh.org/bokeh/release/bokeh-3.4.3.min.js\", \"https://cdn.bokeh.org/bokeh/release/bokeh-gl-3.4.3.min.js\", \"https://cdn.bokeh.org/bokeh/release/bokeh-widgets-3.4.3.min.js\", \"https://cdn.bokeh.org/bokeh/release/bokeh-tables-3.4.3.min.js\", \"https://cdn.holoviz.org/panel/1.4.5/dist/panel.min.js\"];\n", " var js_modules = [];\n", " var js_exports = {};\n", " var css_urls = [];\n", " var inline_js = [ function(Bokeh) {\n", " Bokeh.set_log_level(\"info\");\n", " },\n", "function(Bokeh) {} // ensure no trailing comma for IE\n", " ];\n", "\n", " function run_inline_js() {\n", " if ((root.Bokeh !== undefined) || (force === true)) {\n", " for (var i = 0; i < inline_js.length; i++) {\n", "\ttry {\n", " inline_js[i].call(root, root.Bokeh);\n", "\t} catch(e) {\n", "\t if (!reloading) {\n", "\t throw e;\n", "\t }\n", "\t}\n", " }\n", " // Cache old bokeh versions\n", " if (Bokeh != undefined && !reloading) {\n", "\tvar NewBokeh = root.Bokeh;\n", "\tif (Bokeh.versions === undefined) {\n", "\t Bokeh.versions = new Map();\n", "\t}\n", "\tif (NewBokeh.version !== Bokeh.version) {\n", "\t Bokeh.versions.set(NewBokeh.version, NewBokeh)\n", "\t}\n", "\troot.Bokeh = Bokeh;\n", " }} else if (Date.now() < root._bokeh_timeout) {\n", " setTimeout(run_inline_js, 100);\n", " } else if (!root._bokeh_failed_load) {\n", " console.log(\"Bokeh: BokehJS failed to load within specified timeout.\");\n", " root._bokeh_failed_load = true;\n", " }\n", " root._bokeh_is_initializing = false\n", " }\n", "\n", " function load_or_wait() {\n", " // Implement a backoff loop that tries to ensure we do not load multiple\n", " // versions of Bokeh and its dependencies at the same time.\n", " // In recent versions we use the root._bokeh_is_initializing flag\n", " // to determine whether there is an ongoing attempt to initialize\n", " // bokeh, however for backward compatibility we also try to ensure\n", " // that we do not start loading a newer (Panel>=1.0 and Bokeh>3) version\n", " // before older versions are fully initialized.\n", " if (root._bokeh_is_initializing && Date.now() > root._bokeh_timeout) {\n", " root._bokeh_is_initializing = false;\n", " root._bokeh_onload_callbacks = undefined;\n", " console.log(\"Bokeh: BokehJS was loaded multiple times but one version failed to initialize.\");\n", " load_or_wait();\n", " } else if (root._bokeh_is_initializing || (typeof root._bokeh_is_initializing === \"undefined\" && root._bokeh_onload_callbacks !== undefined)) {\n", " setTimeout(load_or_wait, 100);\n", " } else {\n", " root._bokeh_is_initializing = true\n", " root._bokeh_onload_callbacks = []\n", " var bokeh_loaded = Bokeh != null && (Bokeh.version === py_version || (Bokeh.versions !== undefined && Bokeh.versions.has(py_version)));\n", " if (!reloading && !bokeh_loaded) {\n", "\troot.Bokeh = undefined;\n", " }\n", " load_libs(css_urls, js_urls, js_modules, js_exports, function() {\n", "\tconsole.debug(\"Bokeh: BokehJS plotting callback run at\", now());\n", "\trun_inline_js();\n", " });\n", " }\n", " }\n", " // Give older versions of the autoload script a head-start to ensure\n", " // they initialize before we start loading newer version.\n", " setTimeout(load_or_wait, 100)\n", "}(window));" ], "application/vnd.holoviews_load.v0+json": "(function(root) {\n function now() {\n return new Date();\n }\n\n var force = true;\n var py_version = '3.4.3'.replace('rc', '-rc.').replace('.dev', '-dev.');\n var reloading = false;\n var Bokeh = root.Bokeh;\n\n if (typeof (root._bokeh_timeout) === \"undefined\" || force) {\n root._bokeh_timeout = Date.now() + 5000;\n root._bokeh_failed_load = false;\n }\n\n function run_callbacks() {\n try {\n root._bokeh_onload_callbacks.forEach(function(callback) {\n if (callback != null)\n callback();\n });\n } finally {\n delete root._bokeh_onload_callbacks;\n }\n console.debug(\"Bokeh: all callbacks have finished\");\n }\n\n function load_libs(css_urls, js_urls, js_modules, js_exports, callback) {\n if (css_urls == null) css_urls = [];\n if (js_urls == null) js_urls = [];\n if (js_modules == null) js_modules = [];\n if (js_exports == null) js_exports = {};\n\n root._bokeh_onload_callbacks.push(callback);\n\n if (root._bokeh_is_loading > 0) {\n console.debug(\"Bokeh: BokehJS is being loaded, scheduling callback at\", now());\n return null;\n }\n if (js_urls.length === 0 && js_modules.length === 0 && Object.keys(js_exports).length === 0) {\n run_callbacks();\n return null;\n }\n if (!reloading) {\n console.debug(\"Bokeh: BokehJS not loaded, scheduling load and callback at\", now());\n }\n\n function on_load() {\n root._bokeh_is_loading--;\n if (root._bokeh_is_loading === 0) {\n console.debug(\"Bokeh: all BokehJS libraries/stylesheets loaded\");\n run_callbacks()\n }\n }\n window._bokeh_on_load = on_load\n\n function on_error() {\n console.error(\"failed to load \" + url);\n }\n\n var skip = [];\n if (window.requirejs) {\n window.requirejs.config({'packages': {}, 'paths': {}, 'shim': {}});\n root._bokeh_is_loading = css_urls.length + 0;\n } else {\n root._bokeh_is_loading = css_urls.length + js_urls.length + js_modules.length + Object.keys(js_exports).length;\n }\n\n var existing_stylesheets = []\n var links = document.getElementsByTagName('link')\n for (var i = 0; i < links.length; i++) {\n var link = links[i]\n if (link.href != null) {\n\texisting_stylesheets.push(link.href)\n }\n }\n for (var i = 0; i < css_urls.length; i++) {\n var url = css_urls[i];\n if (existing_stylesheets.indexOf(url) !== -1) {\n\ton_load()\n\tcontinue;\n }\n const element = document.createElement(\"link\");\n element.onload = on_load;\n element.onerror = on_error;\n element.rel = \"stylesheet\";\n element.type = \"text/css\";\n element.href = url;\n console.debug(\"Bokeh: injecting link tag for BokehJS stylesheet: \", url);\n document.body.appendChild(element);\n } var existing_scripts = []\n var scripts = document.getElementsByTagName('script')\n for (var i = 0; i < scripts.length; i++) {\n var script = scripts[i]\n if (script.src != null) {\n\texisting_scripts.push(script.src)\n }\n }\n for (var i = 0; i < js_urls.length; i++) {\n var url = js_urls[i];\n if (skip.indexOf(url) !== -1 || existing_scripts.indexOf(url) !== -1) {\n\tif (!window.requirejs) {\n\t on_load();\n\t}\n\tcontinue;\n }\n var element = document.createElement('script');\n element.onload = on_load;\n element.onerror = on_error;\n element.async = false;\n element.src = url;\n console.debug(\"Bokeh: injecting script tag for BokehJS library: \", url);\n document.head.appendChild(element);\n }\n for (var i = 0; i < js_modules.length; i++) {\n var url = js_modules[i];\n if (skip.indexOf(url) !== -1 || existing_scripts.indexOf(url) !== -1) {\n\tif (!window.requirejs) {\n\t on_load();\n\t}\n\tcontinue;\n }\n var element = document.createElement('script');\n element.onload = on_load;\n element.onerror = on_error;\n element.async = false;\n element.src = url;\n element.type = \"module\";\n console.debug(\"Bokeh: injecting script tag for BokehJS library: \", url);\n document.head.appendChild(element);\n }\n for (const name in js_exports) {\n var url = js_exports[name];\n if (skip.indexOf(url) >= 0 || root[name] != null) {\n\tif (!window.requirejs) {\n\t on_load();\n\t}\n\tcontinue;\n }\n var element = document.createElement('script');\n element.onerror = on_error;\n element.async = false;\n element.type = \"module\";\n console.debug(\"Bokeh: injecting script tag for BokehJS library: \", url);\n element.textContent = `\n import ${name} from \"${url}\"\n window.${name} = ${name}\n window._bokeh_on_load()\n `\n document.head.appendChild(element);\n }\n if (!js_urls.length && !js_modules.length) {\n on_load()\n }\n };\n\n function inject_raw_css(css) {\n const element = document.createElement(\"style\");\n element.appendChild(document.createTextNode(css));\n document.body.appendChild(element);\n }\n\n var js_urls = [\"https://cdn.bokeh.org/bokeh/release/bokeh-3.4.3.min.js\", \"https://cdn.bokeh.org/bokeh/release/bokeh-gl-3.4.3.min.js\", \"https://cdn.bokeh.org/bokeh/release/bokeh-widgets-3.4.3.min.js\", \"https://cdn.bokeh.org/bokeh/release/bokeh-tables-3.4.3.min.js\", \"https://cdn.holoviz.org/panel/1.4.5/dist/panel.min.js\"];\n var js_modules = [];\n var js_exports = {};\n var css_urls = [];\n var inline_js = [ function(Bokeh) {\n Bokeh.set_log_level(\"info\");\n },\nfunction(Bokeh) {} // ensure no trailing comma for IE\n ];\n\n function run_inline_js() {\n if ((root.Bokeh !== undefined) || (force === true)) {\n for (var i = 0; i < inline_js.length; i++) {\n\ttry {\n inline_js[i].call(root, root.Bokeh);\n\t} catch(e) {\n\t if (!reloading) {\n\t throw e;\n\t }\n\t}\n }\n // Cache old bokeh versions\n if (Bokeh != undefined && !reloading) {\n\tvar NewBokeh = root.Bokeh;\n\tif (Bokeh.versions === undefined) {\n\t Bokeh.versions = new Map();\n\t}\n\tif (NewBokeh.version !== Bokeh.version) {\n\t Bokeh.versions.set(NewBokeh.version, NewBokeh)\n\t}\n\troot.Bokeh = Bokeh;\n }} else if (Date.now() < root._bokeh_timeout) {\n setTimeout(run_inline_js, 100);\n } else if (!root._bokeh_failed_load) {\n console.log(\"Bokeh: BokehJS failed to load within specified timeout.\");\n root._bokeh_failed_load = true;\n }\n root._bokeh_is_initializing = false\n }\n\n function load_or_wait() {\n // Implement a backoff loop that tries to ensure we do not load multiple\n // versions of Bokeh and its dependencies at the same time.\n // In recent versions we use the root._bokeh_is_initializing flag\n // to determine whether there is an ongoing attempt to initialize\n // bokeh, however for backward compatibility we also try to ensure\n // that we do not start loading a newer (Panel>=1.0 and Bokeh>3) version\n // before older versions are fully initialized.\n if (root._bokeh_is_initializing && Date.now() > root._bokeh_timeout) {\n root._bokeh_is_initializing = false;\n root._bokeh_onload_callbacks = undefined;\n console.log(\"Bokeh: BokehJS was loaded multiple times but one version failed to initialize.\");\n load_or_wait();\n } else if (root._bokeh_is_initializing || (typeof root._bokeh_is_initializing === \"undefined\" && root._bokeh_onload_callbacks !== undefined)) {\n setTimeout(load_or_wait, 100);\n } else {\n root._bokeh_is_initializing = true\n root._bokeh_onload_callbacks = []\n var bokeh_loaded = Bokeh != null && (Bokeh.version === py_version || (Bokeh.versions !== undefined && Bokeh.versions.has(py_version)));\n if (!reloading && !bokeh_loaded) {\n\troot.Bokeh = undefined;\n }\n load_libs(css_urls, js_urls, js_modules, js_exports, function() {\n\tconsole.debug(\"Bokeh: BokehJS plotting callback run at\", now());\n\trun_inline_js();\n });\n }\n }\n // Give older versions of the autoload script a head-start to ensure\n // they initialize before we start loading newer version.\n setTimeout(load_or_wait, 100)\n}(window));" }, "metadata": {}, "output_type": "display_data" }, { "data": { "application/javascript": [ "\n", "if ((window.PyViz === undefined) || (window.PyViz instanceof HTMLElement)) {\n", " window.PyViz = {comms: {}, comm_status:{}, kernels:{}, receivers: {}, plot_index: []}\n", "}\n", "\n", "\n", " function JupyterCommManager() {\n", " }\n", "\n", " JupyterCommManager.prototype.register_target = function(plot_id, comm_id, msg_handler) {\n", " if (window.comm_manager || ((window.Jupyter !== undefined) && (Jupyter.notebook.kernel != null))) {\n", " var comm_manager = window.comm_manager || Jupyter.notebook.kernel.comm_manager;\n", " comm_manager.register_target(comm_id, function(comm) {\n", " comm.on_msg(msg_handler);\n", " });\n", " } else if ((plot_id in window.PyViz.kernels) && (window.PyViz.kernels[plot_id])) {\n", " window.PyViz.kernels[plot_id].registerCommTarget(comm_id, function(comm) {\n", " comm.onMsg = msg_handler;\n", " });\n", " } else if (typeof google != 'undefined' && google.colab.kernel != null) {\n", " google.colab.kernel.comms.registerTarget(comm_id, (comm) => {\n", " var messages = comm.messages[Symbol.asyncIterator]();\n", " function processIteratorResult(result) {\n", " var message = result.value;\n", " console.log(message)\n", " var content = {data: message.data, comm_id};\n", " var buffers = []\n", " for (var buffer of message.buffers || []) {\n", " buffers.push(new DataView(buffer))\n", " }\n", " var metadata = message.metadata || {};\n", " var msg = {content, buffers, metadata}\n", " msg_handler(msg);\n", " return messages.next().then(processIteratorResult);\n", " }\n", " return messages.next().then(processIteratorResult);\n", " })\n", " }\n", " }\n", "\n", " JupyterCommManager.prototype.get_client_comm = function(plot_id, comm_id, msg_handler) {\n", " if (comm_id in window.PyViz.comms) {\n", " return window.PyViz.comms[comm_id];\n", " } else if (window.comm_manager || ((window.Jupyter !== undefined) && (Jupyter.notebook.kernel != null))) {\n", " var comm_manager = window.comm_manager || Jupyter.notebook.kernel.comm_manager;\n", " var comm = comm_manager.new_comm(comm_id, {}, {}, {}, comm_id);\n", " if (msg_handler) {\n", " comm.on_msg(msg_handler);\n", " }\n", " } else if ((plot_id in window.PyViz.kernels) && (window.PyViz.kernels[plot_id])) {\n", " var comm = window.PyViz.kernels[plot_id].connectToComm(comm_id);\n", " comm.open();\n", " if (msg_handler) {\n", " comm.onMsg = msg_handler;\n", " }\n", " } else if (typeof google != 'undefined' && google.colab.kernel != null) {\n", " var comm_promise = google.colab.kernel.comms.open(comm_id)\n", " comm_promise.then((comm) => {\n", " window.PyViz.comms[comm_id] = comm;\n", " if (msg_handler) {\n", " var messages = comm.messages[Symbol.asyncIterator]();\n", " function processIteratorResult(result) {\n", " var message = result.value;\n", " var content = {data: message.data};\n", " var metadata = message.metadata || {comm_id};\n", " var msg = {content, metadata}\n", " msg_handler(msg);\n", " return messages.next().then(processIteratorResult);\n", " }\n", " return messages.next().then(processIteratorResult);\n", " }\n", " }) \n", " var sendClosure = (data, metadata, buffers, disposeOnDone) => {\n", " return comm_promise.then((comm) => {\n", " comm.send(data, metadata, buffers, disposeOnDone);\n", " });\n", " };\n", " var comm = {\n", " send: sendClosure\n", " };\n", " }\n", " window.PyViz.comms[comm_id] = comm;\n", " return comm;\n", " }\n", " window.PyViz.comm_manager = new JupyterCommManager();\n", " \n", "\n", "\n", "var JS_MIME_TYPE = 'application/javascript';\n", "var HTML_MIME_TYPE = 'text/html';\n", "var EXEC_MIME_TYPE = 'application/vnd.holoviews_exec.v0+json';\n", "var CLASS_NAME = 'output';\n", "\n", "/**\n", " * Render data to the DOM node\n", " */\n", "function render(props, node) {\n", " var div = document.createElement(\"div\");\n", " var script = document.createElement(\"script\");\n", " node.appendChild(div);\n", " node.appendChild(script);\n", "}\n", "\n", "/**\n", " * Handle when a new output is added\n", " */\n", "function handle_add_output(event, handle) {\n", " var output_area = handle.output_area;\n", " var output = handle.output;\n", " if ((output.data == undefined) || (!output.data.hasOwnProperty(EXEC_MIME_TYPE))) {\n", " return\n", " }\n", " var id = output.metadata[EXEC_MIME_TYPE][\"id\"];\n", " var toinsert = output_area.element.find(\".\" + CLASS_NAME.split(' ')[0]);\n", " if (id !== undefined) {\n", " var nchildren = toinsert.length;\n", " var html_node = toinsert[nchildren-1].children[0];\n", " html_node.innerHTML = output.data[HTML_MIME_TYPE];\n", " var scripts = [];\n", " var nodelist = html_node.querySelectorAll(\"script\");\n", " for (var i in nodelist) {\n", " if (nodelist.hasOwnProperty(i)) {\n", " scripts.push(nodelist[i])\n", " }\n", " }\n", "\n", " scripts.forEach( function (oldScript) {\n", " var newScript = document.createElement(\"script\");\n", " var attrs = [];\n", " var nodemap = oldScript.attributes;\n", " for (var j in nodemap) {\n", " if (nodemap.hasOwnProperty(j)) {\n", " attrs.push(nodemap[j])\n", " }\n", " }\n", " attrs.forEach(function(attr) { newScript.setAttribute(attr.name, attr.value) });\n", " newScript.appendChild(document.createTextNode(oldScript.innerHTML));\n", " oldScript.parentNode.replaceChild(newScript, oldScript);\n", " });\n", " if (JS_MIME_TYPE in output.data) {\n", " toinsert[nchildren-1].children[1].textContent = output.data[JS_MIME_TYPE];\n", " }\n", " output_area._hv_plot_id = id;\n", " if ((window.Bokeh !== undefined) && (id in Bokeh.index)) {\n", " window.PyViz.plot_index[id] = Bokeh.index[id];\n", " } else {\n", " window.PyViz.plot_index[id] = null;\n", " }\n", " } else if (output.metadata[EXEC_MIME_TYPE][\"server_id\"] !== undefined) {\n", " var bk_div = document.createElement(\"div\");\n", " bk_div.innerHTML = output.data[HTML_MIME_TYPE];\n", " var script_attrs = bk_div.children[0].attributes;\n", " for (var i = 0; i < script_attrs.length; i++) {\n", " toinsert[toinsert.length - 1].childNodes[1].setAttribute(script_attrs[i].name, script_attrs[i].value);\n", " }\n", " // store reference to server id on output_area\n", " output_area._bokeh_server_id = output.metadata[EXEC_MIME_TYPE][\"server_id\"];\n", " }\n", "}\n", "\n", "/**\n", " * Handle when an output is cleared or removed\n", " */\n", "function handle_clear_output(event, handle) {\n", " var id = handle.cell.output_area._hv_plot_id;\n", " var server_id = handle.cell.output_area._bokeh_server_id;\n", " if (((id === undefined) || !(id in PyViz.plot_index)) && (server_id !== undefined)) { return; }\n", " var comm = window.PyViz.comm_manager.get_client_comm(\"hv-extension-comm\", \"hv-extension-comm\", function () {});\n", " if (server_id !== null) {\n", " comm.send({event_type: 'server_delete', 'id': server_id});\n", " return;\n", " } else if (comm !== null) {\n", " comm.send({event_type: 'delete', 'id': id});\n", " }\n", " delete PyViz.plot_index[id];\n", " if ((window.Bokeh !== undefined) & (id in window.Bokeh.index)) {\n", " var doc = window.Bokeh.index[id].model.document\n", " doc.clear();\n", " const i = window.Bokeh.documents.indexOf(doc);\n", " if (i > -1) {\n", " window.Bokeh.documents.splice(i, 1);\n", " }\n", " }\n", "}\n", "\n", "/**\n", " * Handle kernel restart event\n", " */\n", "function handle_kernel_cleanup(event, handle) {\n", " delete PyViz.comms[\"hv-extension-comm\"];\n", " window.PyViz.plot_index = {}\n", "}\n", "\n", "/**\n", " * Handle update_display_data messages\n", " */\n", "function handle_update_output(event, handle) {\n", " handle_clear_output(event, {cell: {output_area: handle.output_area}})\n", " handle_add_output(event, handle)\n", "}\n", "\n", "function register_renderer(events, OutputArea) {\n", " function append_mime(data, metadata, element) {\n", " // create a DOM node to render to\n", " var toinsert = this.create_output_subarea(\n", " metadata,\n", " CLASS_NAME,\n", " EXEC_MIME_TYPE\n", " );\n", " this.keyboard_manager.register_events(toinsert);\n", " // Render to node\n", " var props = {data: data, metadata: metadata[EXEC_MIME_TYPE]};\n", " render(props, toinsert[0]);\n", " element.append(toinsert);\n", " return toinsert\n", " }\n", "\n", " events.on('output_added.OutputArea', handle_add_output);\n", " events.on('output_updated.OutputArea', handle_update_output);\n", " events.on('clear_output.CodeCell', handle_clear_output);\n", " events.on('delete.Cell', handle_clear_output);\n", " events.on('kernel_ready.Kernel', handle_kernel_cleanup);\n", "\n", " OutputArea.prototype.register_mime_type(EXEC_MIME_TYPE, append_mime, {\n", " safe: true,\n", " index: 0\n", " });\n", "}\n", "\n", "if (window.Jupyter !== undefined) {\n", " try {\n", " var events = require('base/js/events');\n", " var OutputArea = require('notebook/js/outputarea').OutputArea;\n", " if (OutputArea.prototype.mime_types().indexOf(EXEC_MIME_TYPE) == -1) {\n", " register_renderer(events, OutputArea);\n", " }\n", " } catch(err) {\n", " }\n", "}\n" ], "application/vnd.holoviews_load.v0+json": "\nif ((window.PyViz === undefined) || (window.PyViz instanceof HTMLElement)) {\n window.PyViz = {comms: {}, comm_status:{}, kernels:{}, receivers: {}, plot_index: []}\n}\n\n\n function JupyterCommManager() {\n }\n\n JupyterCommManager.prototype.register_target = function(plot_id, comm_id, msg_handler) {\n if (window.comm_manager || ((window.Jupyter !== undefined) && (Jupyter.notebook.kernel != null))) {\n var comm_manager = window.comm_manager || Jupyter.notebook.kernel.comm_manager;\n comm_manager.register_target(comm_id, function(comm) {\n comm.on_msg(msg_handler);\n });\n } else if ((plot_id in window.PyViz.kernels) && (window.PyViz.kernels[plot_id])) {\n window.PyViz.kernels[plot_id].registerCommTarget(comm_id, function(comm) {\n comm.onMsg = msg_handler;\n });\n } else if (typeof google != 'undefined' && google.colab.kernel != null) {\n google.colab.kernel.comms.registerTarget(comm_id, (comm) => {\n var messages = comm.messages[Symbol.asyncIterator]();\n function processIteratorResult(result) {\n var message = result.value;\n console.log(message)\n var content = {data: message.data, comm_id};\n var buffers = []\n for (var buffer of message.buffers || []) {\n buffers.push(new DataView(buffer))\n }\n var metadata = message.metadata || {};\n var msg = {content, buffers, metadata}\n msg_handler(msg);\n return messages.next().then(processIteratorResult);\n }\n return messages.next().then(processIteratorResult);\n })\n }\n }\n\n JupyterCommManager.prototype.get_client_comm = function(plot_id, comm_id, msg_handler) {\n if (comm_id in window.PyViz.comms) {\n return window.PyViz.comms[comm_id];\n } else if (window.comm_manager || ((window.Jupyter !== undefined) && (Jupyter.notebook.kernel != null))) {\n var comm_manager = window.comm_manager || Jupyter.notebook.kernel.comm_manager;\n var comm = comm_manager.new_comm(comm_id, {}, {}, {}, comm_id);\n if (msg_handler) {\n comm.on_msg(msg_handler);\n }\n } else if ((plot_id in window.PyViz.kernels) && (window.PyViz.kernels[plot_id])) {\n var comm = window.PyViz.kernels[plot_id].connectToComm(comm_id);\n comm.open();\n if (msg_handler) {\n comm.onMsg = msg_handler;\n }\n } else if (typeof google != 'undefined' && google.colab.kernel != null) {\n var comm_promise = google.colab.kernel.comms.open(comm_id)\n comm_promise.then((comm) => {\n window.PyViz.comms[comm_id] = comm;\n if (msg_handler) {\n var messages = comm.messages[Symbol.asyncIterator]();\n function processIteratorResult(result) {\n var message = result.value;\n var content = {data: message.data};\n var metadata = message.metadata || {comm_id};\n var msg = {content, metadata}\n msg_handler(msg);\n return messages.next().then(processIteratorResult);\n }\n return messages.next().then(processIteratorResult);\n }\n }) \n var sendClosure = (data, metadata, buffers, disposeOnDone) => {\n return comm_promise.then((comm) => {\n comm.send(data, metadata, buffers, disposeOnDone);\n });\n };\n var comm = {\n send: sendClosure\n };\n }\n window.PyViz.comms[comm_id] = comm;\n return comm;\n }\n window.PyViz.comm_manager = new JupyterCommManager();\n \n\n\nvar JS_MIME_TYPE = 'application/javascript';\nvar HTML_MIME_TYPE = 'text/html';\nvar EXEC_MIME_TYPE = 'application/vnd.holoviews_exec.v0+json';\nvar CLASS_NAME = 'output';\n\n/**\n * Render data to the DOM node\n */\nfunction render(props, node) {\n var div = document.createElement(\"div\");\n var script = document.createElement(\"script\");\n node.appendChild(div);\n node.appendChild(script);\n}\n\n/**\n * Handle when a new output is added\n */\nfunction handle_add_output(event, handle) {\n var output_area = handle.output_area;\n var output = handle.output;\n if ((output.data == undefined) || (!output.data.hasOwnProperty(EXEC_MIME_TYPE))) {\n return\n }\n var id = output.metadata[EXEC_MIME_TYPE][\"id\"];\n var toinsert = output_area.element.find(\".\" + CLASS_NAME.split(' ')[0]);\n if (id !== undefined) {\n var nchildren = toinsert.length;\n var html_node = toinsert[nchildren-1].children[0];\n html_node.innerHTML = output.data[HTML_MIME_TYPE];\n var scripts = [];\n var nodelist = html_node.querySelectorAll(\"script\");\n for (var i in nodelist) {\n if (nodelist.hasOwnProperty(i)) {\n scripts.push(nodelist[i])\n }\n }\n\n scripts.forEach( function (oldScript) {\n var newScript = document.createElement(\"script\");\n var attrs = [];\n var nodemap = oldScript.attributes;\n for (var j in nodemap) {\n if (nodemap.hasOwnProperty(j)) {\n attrs.push(nodemap[j])\n }\n }\n attrs.forEach(function(attr) { newScript.setAttribute(attr.name, attr.value) });\n newScript.appendChild(document.createTextNode(oldScript.innerHTML));\n oldScript.parentNode.replaceChild(newScript, oldScript);\n });\n if (JS_MIME_TYPE in output.data) {\n toinsert[nchildren-1].children[1].textContent = output.data[JS_MIME_TYPE];\n }\n output_area._hv_plot_id = id;\n if ((window.Bokeh !== undefined) && (id in Bokeh.index)) {\n window.PyViz.plot_index[id] = Bokeh.index[id];\n } else {\n window.PyViz.plot_index[id] = null;\n }\n } else if (output.metadata[EXEC_MIME_TYPE][\"server_id\"] !== undefined) {\n var bk_div = document.createElement(\"div\");\n bk_div.innerHTML = output.data[HTML_MIME_TYPE];\n var script_attrs = bk_div.children[0].attributes;\n for (var i = 0; i < script_attrs.length; i++) {\n toinsert[toinsert.length - 1].childNodes[1].setAttribute(script_attrs[i].name, script_attrs[i].value);\n }\n // store reference to server id on output_area\n output_area._bokeh_server_id = output.metadata[EXEC_MIME_TYPE][\"server_id\"];\n }\n}\n\n/**\n * Handle when an output is cleared or removed\n */\nfunction handle_clear_output(event, handle) {\n var id = handle.cell.output_area._hv_plot_id;\n var server_id = handle.cell.output_area._bokeh_server_id;\n if (((id === undefined) || !(id in PyViz.plot_index)) && (server_id !== undefined)) { return; }\n var comm = window.PyViz.comm_manager.get_client_comm(\"hv-extension-comm\", \"hv-extension-comm\", function () {});\n if (server_id !== null) {\n comm.send({event_type: 'server_delete', 'id': server_id});\n return;\n } else if (comm !== null) {\n comm.send({event_type: 'delete', 'id': id});\n }\n delete PyViz.plot_index[id];\n if ((window.Bokeh !== undefined) & (id in window.Bokeh.index)) {\n var doc = window.Bokeh.index[id].model.document\n doc.clear();\n const i = window.Bokeh.documents.indexOf(doc);\n if (i > -1) {\n window.Bokeh.documents.splice(i, 1);\n }\n }\n}\n\n/**\n * Handle kernel restart event\n */\nfunction handle_kernel_cleanup(event, handle) {\n delete PyViz.comms[\"hv-extension-comm\"];\n window.PyViz.plot_index = {}\n}\n\n/**\n * Handle update_display_data messages\n */\nfunction handle_update_output(event, handle) {\n handle_clear_output(event, {cell: {output_area: handle.output_area}})\n handle_add_output(event, handle)\n}\n\nfunction register_renderer(events, OutputArea) {\n function append_mime(data, metadata, element) {\n // create a DOM node to render to\n var toinsert = this.create_output_subarea(\n metadata,\n CLASS_NAME,\n EXEC_MIME_TYPE\n );\n this.keyboard_manager.register_events(toinsert);\n // Render to node\n var props = {data: data, metadata: metadata[EXEC_MIME_TYPE]};\n render(props, toinsert[0]);\n element.append(toinsert);\n return toinsert\n }\n\n events.on('output_added.OutputArea', handle_add_output);\n events.on('output_updated.OutputArea', handle_update_output);\n events.on('clear_output.CodeCell', handle_clear_output);\n events.on('delete.Cell', handle_clear_output);\n events.on('kernel_ready.Kernel', handle_kernel_cleanup);\n\n OutputArea.prototype.register_mime_type(EXEC_MIME_TYPE, append_mime, {\n safe: true,\n index: 0\n });\n}\n\nif (window.Jupyter !== undefined) {\n try {\n var events = require('base/js/events');\n var OutputArea = require('notebook/js/outputarea').OutputArea;\n if (OutputArea.prototype.mime_types().indexOf(EXEC_MIME_TYPE) == -1) {\n register_renderer(events, OutputArea);\n }\n } catch(err) {\n }\n}\n" }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/html": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "application/vnd.holoviews_exec.v0+json": "", "text/html": [ "
\n", "
\n", "
\n", "" ] }, "metadata": { "application/vnd.holoviews_exec.v0+json": { "id": "p1002" } }, "output_type": "display_data" } ], "source": [ "import pandas as pd\n", "import numpy as np\n", "from pprint import pprint\n", "import matplotlib.pyplot as plt\n", "import seaborn as sns\n", "\n", "import holoviews as hv\n", "hv.extension('bokeh', 'matplotlib', logo=False)\n", "\n", "# Avoid warnings to show up (trick for the final notebook on kaggle)\n", "import warnings\n", "warnings.filterwarnings('ignore')" ] }, { "cell_type": "markdown", "metadata": { "_uuid": "043e78ba82baf8748dcc07876d7b2f4a8a0678ee" }, "source": [ "# Credit risk case study\n", "*DISCLAIMER: This is not 100% my own code*\n", "\n", "## Table of content\n", "\n", "* [Dataset overview](#ds)\n", "* [Exploratory analysis](#explo)\n", " * [Descritive statistics for PAID loans](#descp)\n", " * [Descritive statistics for DEFAULT loans](#descd)\n", " * [DEFAULT as a function of reason for aquiring the loans](#reason)\n", " * [DEFAULT as a function of occupation](#occupation)\n", " * [Graphical overview](#graph)\n", " * [Violin plot](#violin)\n", " * [Correlation matrix](#corr)\n", "* [Test of default classifiers](#classification)\n", "* [Model evaluation](#eval)\n", " * [Precision & recall](#per)\n", " * [F1](#f1)\n", " * [Receiver operating characteristic](#roc)\n", " * [Confusion matrix](#confusion)\n", " * [Classification probability](#prob)\n", "* [Logistic regression](#logit)\n", "* [SGD classifier](#sgd)\n", "* [Supporting vector classifier](#svc)\n", "* [Gradient boosting classifier](#gbrt)\n", "* [Forest of randomized tree](#frt)\n", " * [Randm forest classifier](#rfc)\n", " * [Extremely randomized tree](#ert)\n", "* [Model comparison and conclusion](#conclusion)\n", "\n", "## Dataset overview\n", "\n", "\n", "The dataset contains baseline and loan performance information for 5,960 recent home equity loans. A home equity loan is a loan where the obligor uses the equity of his or her home as the underlying collateral. The target (BAD) is a binary variable indicating whether an applicant eventually defaulted or was seriously delinquent. This adverse outcome occurred in 1,189 cases (20%). \n", "\n", "For each applicant, 11 input variables were recorded:\n", "\n", "* BAD: 1 = applicant defaulted on loan or seriously delinquent; 0 = applicant paid loan\n", "* LOAN: Amount of the loan request\n", "* MORTDUE: Amount due on existing mortgage\n", "* VALUE: Value of current property\n", "* REASON: DebtCon = debt consolidation; HomeImp = home improvement\n", "* JOB: Occupational categories\n", "* YOJ: Years at present job\n", "* DEROG: Number of major derogatory reports\n", "* DELINQ: Number of delinquent credit lines\n", "* CLAGE: Age of oldest credit line in months\n", "* NINQ: Number of recent credit inquiries\n", "* CLNO: Number of credit lines" ] }, { "cell_type": "code", "execution_count": 2, "metadata": { "_uuid": "99f7547021739168db64988549a9535a5bc06a72", "execution": { "iopub.execute_input": "2023-02-14T00:30:25.487839Z", "iopub.status.busy": "2023-02-14T00:30:25.487260Z", "iopub.status.idle": "2023-02-14T00:30:25.536426Z", "shell.execute_reply": "2023-02-14T00:30:25.535269Z", "shell.execute_reply.started": "2023-02-14T00:30:25.487777Z" } }, "outputs": [ { "data": { "text/html": [ "
\n", "\n", "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
BADLOANMORTDUEVALUEREASONJOBYOJDEROGDELINQCLAGENINQCLNODEBTINC
01110025860.039025.0HomeImpOther10.50.00.094.3666671.09.0NaN
11130070053.068400.0HomeImpOther7.00.02.0121.8333330.014.0NaN
21150013500.016700.0HomeImpOther4.00.00.0149.4666671.010.0NaN
311500NaNNaNNaNNaNNaNNaNNaNNaNNaNNaNNaN
40170097800.0112000.0HomeImpOffice3.00.00.093.3333330.014.0NaN
\n", "
" ], "text/plain": [ " BAD LOAN MORTDUE VALUE REASON JOB YOJ DEROG DELINQ \\\n", "0 1 1100 25860.0 39025.0 HomeImp Other 10.5 0.0 0.0 \n", "1 1 1300 70053.0 68400.0 HomeImp Other 7.0 0.0 2.0 \n", "2 1 1500 13500.0 16700.0 HomeImp Other 4.0 0.0 0.0 \n", "3 1 1500 NaN NaN NaN NaN NaN NaN NaN \n", "4 0 1700 97800.0 112000.0 HomeImp Office 3.0 0.0 0.0 \n", "\n", " CLAGE NINQ CLNO DEBTINC \n", "0 94.366667 1.0 9.0 NaN \n", "1 121.833333 0.0 14.0 NaN \n", "2 149.466667 1.0 10.0 NaN \n", "3 NaN NaN NaN NaN \n", "4 93.333333 0.0 14.0 NaN " ] }, "execution_count": 2, "metadata": {}, "output_type": "execute_result" } ], "source": [ "df=pd.read_csv('data/hmeq.csv')\n", "df.head()" ] }, { "cell_type": "markdown", "metadata": { "_uuid": "689f230ab38b172609ebbf62c0810b6403b2cb14" }, "source": [ "## Exploratory analysis\n", "\n", "\n", "I summarize the main characteristics of the dataset with visual methods and summary statistics. I use the target variable (BAD) to divide the data set into sub-samples and I specifically look for variables, features and correlation which contain classification power.\n", "\n", "### Descritive statistics for PAID loans\n", "" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "_uuid": "7714ab305dd9e853ab266438144d91dc857d38c1", "execution": { "iopub.execute_input": "2023-02-14T00:30:26.341838Z", "iopub.status.busy": "2023-02-14T00:30:26.341154Z", "iopub.status.idle": "2023-02-14T00:30:26.433883Z", "shell.execute_reply": "2023-02-14T00:30:26.432942Z", "shell.execute_reply.started": "2023-02-14T00:30:26.341465Z" } }, "outputs": [], "source": [ "df[df['BAD']==0].drop('BAD', axis=1).describe().style.format(\"{:.2f}\")" ] }, { "cell_type": "markdown", "metadata": { "_uuid": "bf50d833946ba558c3a5a85b9a02a7b08b60f068" }, "source": [ "### Descritive statistics for DEFAULT loans\n", "" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "_uuid": "7ae114f0450eb4ca523d9dfb4b548d37e07bd769", "execution": { "iopub.execute_input": "2023-02-14T00:30:27.840011Z", "iopub.status.busy": "2023-02-14T00:30:27.839251Z", "iopub.status.idle": "2023-02-14T00:30:27.892289Z", "shell.execute_reply": "2023-02-14T00:30:27.891359Z", "shell.execute_reply.started": "2023-02-14T00:30:27.839547Z" } }, "outputs": [], "source": [ "df[df['BAD']==1].drop('BAD', axis=1).describe().style.format(\"{:.2f}\")" ] }, { "cell_type": "markdown", "metadata": { "_uuid": "15b6b46578e4bf2999ef1a750d086f8824809d28" }, "source": [ "1. From the descriptive statistics above I can draw the following consideration:\n", "\n", "* The amount of requested loan, the amount of due mortgage and the value of the underlying collateral are statistically consistent for both loans that been PAID and that resulted in a DEFAULT. This suggests that those variables may not provide significant discrimination power to separate the two classes.\n", "\n", "\n", "* The number of years at the present job (YOJ) seems to discriminate the two classes as DEFAULTs seem more frequent in contractors which have a shorter seniority. This tendency is supported by the correspoding average value quantiles which indicate a distribution skewed toward shorter seniority.\n", "\n", "* A similar considerations apply to variables related to the contractor credit history such as: the number of major derogatory reports (DEROG), the number of delinquent credit lines (DELINQ), the age of oldest credit line in months (CLAGE), and the number of recent credit inquiries (NINQ). In the case of DEFAULT the distribution of these variables is skewed toward values that suggest a credit hystory that is worse than the corresponding distribution for PAID loan contractors.\n", "\n", "\n", "* Finally, the number of open credit line (CLNO) seems statistically consistent in both case, suggesting that this variable has no significant discrimination power." ] }, { "cell_type": "code", "execution_count": 3, "metadata": { "_uuid": "f8539626c720ae398e548fa3ae509317aa8b5bf1", "execution": { "iopub.execute_input": "2023-02-14T00:30:29.896064Z", "iopub.status.busy": "2023-02-14T00:30:29.895514Z", "iopub.status.idle": "2023-02-14T00:30:29.912013Z", "shell.execute_reply": "2023-02-14T00:30:29.911291Z", "shell.execute_reply.started": "2023-02-14T00:30:29.895990Z" } }, "outputs": [], "source": [ "df.loc[df.BAD == 1, 'STATUS'] = 'DEFAULT'\n", "df.loc[df.BAD == 0, 'STATUS'] = 'PAID'" ] }, { "cell_type": "markdown", "metadata": { "_uuid": "cd6a0d12befc4ce3733c3f0c340e393bd512da9f" }, "source": [ "### DEFAULT as a function of the reason for aquiring the loans\n", "\n", "The fraction of PAID and DEFAULT loans do not seem to depend strongly on the reason for acquiring the loan. On average, 80% of the loans have been payed while about the 20% DEFAULT. The 2% discrepancy observed is not statistically significant given the amount of loans in the dataset." ] }, { "cell_type": "code", "execution_count": null, "metadata": { "_uuid": "b6ea91d888b48ee9274cd26f85456665fb293ce1", "execution": { "iopub.execute_input": "2023-02-14T00:30:32.224034Z", "iopub.status.busy": "2023-02-14T00:30:32.223550Z", "iopub.status.idle": "2023-02-14T00:30:32.239315Z", "shell.execute_reply": "2023-02-14T00:30:32.238323Z", "shell.execute_reply.started": "2023-02-14T00:30:32.223986Z" } }, "outputs": [], "source": [ "g = df.groupby('REASON')\n", "g['STATUS'].value_counts(normalize=True).to_frame().style.format(\"{:.1%}\")" ] }, { "cell_type": "markdown", "metadata": { "_uuid": "4ae118a39513b77d1748e7c0aaeefd5f693e801b" }, "source": [ "### DEFAULT as a function of the occupation\n", "\n", "The fraction of PAID and DEFAULT loans show some dependence on the occupation of the contractor. Office worker and professional executives have the highest probability to pay their loans while sales and self employed have the highest probability to default. The occupation shows a good discriminating power and it will most likely be an important feature of our classification model." ] }, { "cell_type": "code", "execution_count": null, "metadata": { "_uuid": "14c58774c8872b1e0a932d70dd121eebbbbac460", "execution": { "iopub.execute_input": "2023-02-14T00:52:26.321779Z", "iopub.status.busy": "2023-02-14T00:52:26.321447Z", "iopub.status.idle": "2023-02-14T00:52:26.336967Z", "shell.execute_reply": "2023-02-14T00:52:26.336097Z", "shell.execute_reply.started": "2023-02-14T00:52:26.321738Z" } }, "outputs": [], "source": [ "g = df.groupby('JOB')\n", "g['STATUS'].value_counts(normalize=True).to_frame().style.format(\"{:.1%}\")" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "_uuid": "62bc0016776d966c4b7069677998f6cd634da494", "execution": { "iopub.execute_input": "2023-02-14T00:53:41.486446Z", "iopub.status.busy": "2023-02-14T00:53:41.486074Z", "iopub.status.idle": "2023-02-14T00:53:41.818567Z", "shell.execute_reply": "2023-02-14T00:53:41.817589Z", "shell.execute_reply.started": "2023-02-14T00:53:41.486393Z" } }, "outputs": [], "source": [ "%%opts Bars[width=700 height=400 tools=['hover'] xrotation=45]{+axiswise +framewise}\n", "\n", "# Categorical\n", "\n", "cols = ['REASON', 'JOB']\n", "\n", "dd={}\n", "\n", "for col in cols:\n", "\n", " counts=df.groupby(col)['STATUS'].value_counts(normalize=True).to_frame('val').reset_index()\n", " dd[col] = hv.Bars(counts, [col, 'STATUS'], 'val') \n", " \n", "var = [*dd]\n", "kdims=hv.Dimension(('var', 'Variable'), values=var) \n", "hv.HoloMap(dd, kdims=kdims)" ] }, { "cell_type": "markdown", "metadata": { "_uuid": "145c45f430d806214a84bf90a0855bd0fb97d937" }, "source": [ "### Graphical overview\n", "\n", "A coherent graphical overview of the dataset is shown below. For each variable I show an histogram for the whole dataset, for the PAID, and DEFUALT loans, respectively. The correlations among variables are also sumamrized in 2-dimensinal scatter plots." ] }, { "cell_type": "code", "execution_count": 4, "metadata": { "_uuid": "e6b5c5ed3ca124dfa07a73cd3137e845b6539f5d", "execution": { "iopub.execute_input": "2023-02-14T00:54:01.566453Z", "iopub.status.busy": "2023-02-14T00:54:01.565746Z", "iopub.status.idle": "2023-02-14T00:54:01.819990Z", "shell.execute_reply": "2023-02-14T00:54:01.819175Z", "shell.execute_reply.started": "2023-02-14T00:54:01.566008Z" } }, "outputs": [ { "data": {}, "metadata": {}, "output_type": "display_data" }, { "data": { "application/vnd.holoviews_exec.v0+json": "", "text/html": [ "
\n", "
\n", "
\n", "" ], "text/plain": [ ":HoloMap [var]\n", " :Overlay\n", " .Histogram.ALL_Loans :Histogram [x] (Frequency)\n", " .Histogram.PAID_Loans :Histogram [x] (Frequency)\n", " .Histogram.DEFAULT_Loans :Histogram [x] (Frequency)" ] }, "execution_count": 4, "metadata": { "application/vnd.holoviews_exec.v0+json": { "id": "p1004" } }, "output_type": "execute_result" } ], "source": [ "# %%opts Histogram[width=700 height=400 tools=['hover'] xrotation=0]{+axiswise +framewise}\n", "\n", "g = df.groupby('STATUS')\n", "\n", "cols = ['LOAN',\n", " 'MORTDUE', \n", " 'VALUE',\n", " 'YOJ',\n", " 'DEROG',\n", " 'DELINQ',\n", " 'CLAGE',\n", " 'NINQ',\n", " 'CLNO']\n", "dd={}\n", "\n", "# Histograms\n", "for col in cols:\n", " \n", " # fix --- np.histogram cannot compute a valid range for nan values\n", "# freq, edges = np.histogram(df[col].values)\n", " values = df[col].dropna().values\n", " if len(values) == 0:\n", " print(f\"Skipping column '{col}' – all values are NaN\")\n", " continue\n", " \n", " freq, edges = np.histogram(values)\n", " \n", " \n", " dd[col] = hv.Histogram((edges, freq), label='ALL Loans').redim.label(x=' ')\n", " \n", " freq, edges = np.histogram(g.get_group('PAID')[col].values, bins=edges)\n", " dd[col] *= hv.Histogram((edges, freq), label='PAID Loans').redim.label(x=' ')\n", " \n", " freq, edges = np.histogram(g.get_group('DEFAULT')[col].values, bins=edges)\n", " dd[col] *= hv.Histogram((edges, freq), label='DEFAULT Loans' ).redim.label(x=' ') \n", " \n", "var = [*dd]\n", "kdims=hv.Dimension(('var', 'Variable'), values=var) \n", "hv.HoloMap(dd, kdims=kdims)" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "_uuid": "bba53af90d733c7a8593cbe8e76538dce6a013e0" }, "outputs": [], "source": [ "%%opts Scatter[width=500 height=500 tools=['hover'] xrotation=0]{+axiswise +framewise}\n", "\n", "g = df.groupby('STATUS')\n", "\n", "cols = ['LOAN',\n", " 'MORTDUE',\n", " 'VALUE',\n", " 'YOJ',\n", " 'DEROG',\n", " 'DELINQ',\n", " 'CLAGE',\n", " 'NINQ',\n", " 'CLNO']\n", "\n", "import itertools\n", "prod = list(itertools.combinations(cols,2))\n", "\n", "dd = {}\n", "\n", "for p in prod:\n", " dd['_'.join(p)] = hv.Scatter(g.get_group('PAID')[list(p)], label='PAID Loans').options(size=5)\n", " dd['_'.join(p)] *= hv.Scatter(g.get_group('DEFAULT')[list(p)], label='DEFAULT Loans').options(size=5, marker='x')\n", " \n", "var = [*dd]\n", "kdims=hv.Dimension(('var', 'Variable'), values=var) \n", "hv.HoloMap(dd, kdims=kdims).collate()" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "_uuid": "48f49cd93a8b662849f10842b907eaff97c61213" }, "outputs": [], "source": [ "g=sns.PairGrid(df.drop('BAD',axis=1), hue='STATUS', diag_sharey=False, palette={'PAID': 'C0', 'DEFAULT':'C1'})\n", "g.map_lower(sns.kdeplot)\n", "g.map_upper(sns.scatterplot)\n", "g.map_diag(sns.kdeplot, lw=3)\n", "g.add_legend()\n", "plt.show()" ] }, { "cell_type": "markdown", "metadata": { "_uuid": "7351e8abefcfcf52ffb2da6d4dc11de6c9147f5f" }, "source": [ "### Violin plot\n", "\n", "Violin plot shows the different shapes of the probability density function for some of the variables discussed previously that seem the most promising for the classification task. The plot shows, in different colors, the PAID and the DEFAULT loans. The horizontal dashed lines indecate the position of the mean and the quantiles of the different distributions. Since there is a dependency of the DEFAULT probability on the occupation categories, the \"violins\" are shown for each of them." ] }, { "cell_type": "code", "execution_count": null, "metadata": { "_uuid": "c3f2d2b3a9d2d10120b9655b3b7b6fc6158bd20b" }, "outputs": [], "source": [ "cols=['YOJ', 'CLAGE', 'NINQ']\n", "\n", "for col in cols:\n", " \n", " plt.figure(figsize=(15,5))\n", "\n", " sns.violinplot(x='JOB', y=col, hue='STATUS',\n", " split=True, inner=\"quart\", palette={'PAID': 'C0', 'DEFAULT':'C1'},\n", " data=df)\n", " \n", " sns.despine(left=True)" ] }, { "cell_type": "markdown", "metadata": { "_uuid": "8204474b9c4bf0681597d41b6f7a84733854ec27" }, "source": [ "### Correlation matrix\n", "\n", "Finally I show the correlation matrix among the variables discussed so far. Correlations are useful because they can indicate a predictive relationship that can be exploited in the classification task. \n", "\n", "The plot is color coded: colder colors correspond to low correlation while warmer color correspond to high correlation. The variables are also grouped according to their correlation, i.e. variables with higher correlation are close to each other.\n", "\n", "Variables related to the credit history (DELINQ, DEROG, NINQ) are the most correlated with the loan status (BAD), suggesting that these will be the most discriminating variables. These variables are also slightly correlated among them, suggesting that some of the information might be redoundant.\n", "\n", "As already discussed, the amount of the loan or the underlying collateral do not seem related to the loan status. They anyhow form another correlation cluster with other variables such as the age of oldest credit line (CLAGE) and the number of credit lines (CLNO). This is expected since those variables are clearly related." ] }, { "cell_type": "code", "execution_count": null, "metadata": { "_uuid": "62dcf0c50169195a204178098de9043005fc9b4d" }, "outputs": [], "source": [ "def compute_corr(df,size=10):\n", " '''Function plots a graphical correlation matrix for each pair of columns in the dataframe.\n", "\n", " Input:\n", " df: pandas DataFrame\n", " size: vertical and horizontal size of the plot'''\n", " import scipy\n", " import scipy.cluster.hierarchy as sch\n", " \n", " corr = df.corr()\n", " \n", " # Clustering\n", " d = sch.distance.pdist(corr) # vector of ('55' choose 2) pairwise distances\n", " L = sch.linkage(d, method='complete')\n", " ind = sch.fcluster(L, 0.5*d.max(), 'distance')\n", " columns = [df.select_dtypes(include=[np.number]).columns.tolist()[i] for i in list((np.argsort(ind)))]\n", " \n", " # Reordered df upon custering results\n", " df = df.reindex(columns, axis=1)\n", " \n", " # Recompute correlation matrix w/ clustering\n", " corr = df.corr()\n", " #corr.dropna(axis=0, how='all', inplace=True)\n", " #corr.dropna(axis=1, how='all', inplace=True)\n", " #corr.fillna(0, inplace=True)\n", " \n", " #fig, ax = plt.subplots(figsize=(size, size))\n", " #img = ax.matshow(corr)\n", " #plt.xticks(range(len(corr.columns)), corr.columns, rotation=45);\n", " #plt.yticks(range(len(corr.columns)), corr.columns);\n", " #fig.colorbar(img)\n", " \n", " return corr" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "_uuid": "d5e9979ed5d368f2c877b2ac70c13d709e6ae9e2" }, "outputs": [], "source": [ "%%opts HeatMap [tools=['hover'] colorbar=True width=500 height=500 toolbar='above', xrotation=45, yrotation=45]\n", "\n", "corr=compute_corr(df)\n", "corr=corr.stack(level=0).to_frame('value').reset_index()\n", "hv.HeatMap(corr).options(cmap='Viridis')" ] }, { "cell_type": "markdown", "metadata": { "_uuid": "b5e8db9cd86ea60f3b63cfe9a0c758ad611a138f" }, "source": [ "\n", "# Test of default classifiers\n", "The exploratory analysis described above provides good insights on the dataset and higlights the most promising variables with good discrimination power to identify loans resulting in DEFAULT. In this section I develop and investigate supervided machine learning classifiers to predict the outcome of loans. Given the large amount of algorithms available in literature, I begin from the simple methods, such as logistc regression, and gradually increase the model complexity up to randomized trees techniques. Finally I compare the performance of each model and discuss the most appropriate for this loan classification task. In this section, the following models are developed:\n", "* [Logistic regression](#logit)\n", "* [SGD classifier](#sgd)\n", "* [Supporting vector classifier](#svc)\n", "* [Gradient boosting classifier](#gbrt)\n", "* [Forest of randomized tree](#frt)\n", " * [Randm forest classifier](#rfc)\n", " * [Extremely randomized tree](#ert)\n", "* [Model comparison and conclusion](#conclusion)\n", "\n", "\n", "## Model Evaluation\n", "The evaluation of classifiers performance is relatively complex and depenends on many factors, some of which are model dependent. In order to indetify the best model for our classification task, I adopt different evaluation metrics that are briefly summarized in the following.\n", "\n", "To avoid overtraining, the performance of our classification model are evaluated using cross-validation. The training set is randomly splited in $N$ distinct subsets called folds, then the model is trained and evaluated $N$ times by using a different fold for the evaluation of a model that is trained on the other $N-1$ folds. The results of the procedure consist in $N$ evaluation scores for each metric that are then averaged. These averages are fianlly used to compare the different techniques considered in this study.\n", "\n", "\n", "### Precision & recall\n", "Precision-Recall is a useful performance metric to evaluate a models in those cases when the classes are very imbalanced. In information retrieval, precision is a measure of result relevancy, while recall is a measure of how many truly relevant results are returned. Intuitively, precision is the ability of the classifier not to label as positive a sample that is negative, and recall is the ability of the classifier to find all the positive samples. \n", "\n", "A system with high recall but low precision returns many labels that tend to be predicted incorrectly when compared to the training labels. A system with high precision but low recall is just the opposite, returning very few results, but most of its predicted labels are correct when compared to the training labels. An ideal system with high precision and high recall will return many results, with many results labeled correctly.\n", "\n", "Precision ($P$) is defined as the number of true positives ($T_{p}$) over the number of true positives plus the number of false positives ($T_{p}+F_{p}$):\n", "\n", "$P = \\frac{T_{p}}{T_{p}+F_{p}}$ \n", "\n", "Recall ($R$) is defined as the number of true positives ($T_{p}$) over the number of true positives plus the number of false negatives ($T_{p}+F_{n}$):\n", "\n", "$R = \\frac{T_{p}}{T_{p}+F_{n}}$\n", "\n", "\n", "### F1 measure\n", "It is often convenient to combine precision and recall into a single metric called the $F_{1}$ score, defined as a weighted harmonic mean of the precision and recall:\n", "\n", "$F_{1} = 2\\times \\frac{P \\times R}{P+R}$\n", "\n", "Whereas the regular mean treats all values equally, the harmonic mean gives much more weight to low values. As a result, the classifier will only get a high F1 score if both recall and precision are high.\n", "The $F_{1}$ score favors classifiers that have similar precision and recall. This is not always what you want: in some contexts you mostly care about precision, and in other contexts you really care about recall.\n", "\n", "\n", "### Receiver operating characteristic\n", "A receiver operating characteristic (ROC), or simply ROC curve, is a graphical plot which illustrates the performance of a binary classifier system as its discrimination threshold is varied. It is created by plotting the fraction of true positives out of the positives (TPR = true positive rate) vs. the fraction of false positives out of the negatives (FPR = false positive rate), at various threshold settings. TPR is also known as sensitivity, and FPR is one minus the specificity or true negative rate.\n", "There is a tradeoff: the higher the recall (TPR), the more false positives (FPR) the classifier produces. The dotted line represents the ROC curve of a purely random classifier; a good classifier stays as far away from that line as possible (toward the top-left corner). \n", "\n", "The area under the ROC curve, which is also denoted by AUC, summarise the curve information in one number. The AUC should be interpreted as the probability that a classifier will rank a randomly chosen positive istance higher than a randomly chosen negative one. A perfect classifier will have a ROC AUC equal to 1, whereas a purely random classifier will have a ROC AUC equal to 0.5.\n", "\n", "\n", "### Confusion matrix\n", "The confusion matrix evaluates classification accuracy by computing the confusion matrix with each row corresponding to the true class. By definition, entry $i,j$ in a confusion matrix is the number of observations actually in group $i$, but predicted to be in group $j$. The confusion matrix is not used for model evaluation but it provide a good grasp on the overall model performance.\n", "\n", "\n", "### Classification probability\n", "The classification probability provides an estimation of the probability that a given instance of the data belongs to the given class. In a binary classification problem like the one being considered, the histogram of the classification probability for the two class provide a good visual grasp on the model performance. The more the peak of the classification probability are far from each other, the higher the separation power of the model." ] }, { "cell_type": "code", "execution_count": null, "metadata": { "_uuid": "5ce7c5a234213bbcaa3ece610a34ec4ebf437b35" }, "outputs": [], "source": [ "import pandas as pd\n", "import numpy as np\n", "from pprint import pprint\n", "import matplotlib.pyplot as plt\n", "from sklearn.pipeline import Pipeline\n", "from sklearn.preprocessing import StandardScaler\n", "\n", "from sklearn.model_selection import train_test_split\n", "from sklearn.model_selection import cross_val_score\n", "from sklearn.model_selection import cross_validate\n", "from sklearn.metrics import classification_report" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "_uuid": "c5efc75e35bcb100cfb18af07653ebb4e2640b04" }, "outputs": [], "source": [ "df=pd.read_csv('../input/hmeq.csv', low_memory=False) # No duplicated columns, no highly correlated columns\n", "df=pd.get_dummies(df, columns=['REASON','JOB'])\n", "df.drop('DEBTINC', axis=1, inplace=True)\n", "df.dropna(axis=0, how='any', inplace=True)\n", "y = df['BAD']\n", "X = df.drop(['BAD'], axis=1)" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "_uuid": "ac954ae9704fb17b42b548fb0ade5e91ca917c3c" }, "outputs": [], "source": [ "def cross_validate_model(model, X, y, \n", " scoring=['f1', 'precision', 'recall', 'roc_auc'], \n", " cv=12, n_jobs=-1, verbose=True):\n", " \n", " scores = cross_validate(pipe, \n", " X, y, \n", " scoring=scoring,\n", " cv=cv, n_jobs=n_jobs, \n", " verbose=verbose,\n", " return_train_score=False)\n", "\n", " #sorted(scores.keys())\n", " dd={}\n", " \n", " for key, val in scores.items():\n", " if key in ['fit_time', 'score_time']:\n", " continue\n", " #print('{:>30}: {:>6.5f} +/- {:.5f}'.format(key, np.mean(val), np.std(val)) )\n", " name = \" \".join(key.split('_')[1:]).capitalize()\n", " \n", " dd[name] = {'value' : np.mean(val), 'error' : np.std(val)}\n", " \n", " return pd.DataFrame(dd) \n", " #print()\n", " #pprint(scores)\n", " #print()" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "_uuid": "8bc2942ce77db1e02bb570bd7613afaba51a75bb" }, "outputs": [], "source": [ "def plot_roc(model, X_test ,y_test, n_classes=0):\n", " \n", " from sklearn.metrics import roc_curve, auc\n", " \n", " \"\"\"\n", " Target scores, can either be probability estimates \n", " of the positive class, confidence values, or \n", " non-thresholded measure of decisions (as returned \n", " by “decision_function” on some classifiers).\n", " \"\"\"\n", " try:\n", " y_score = model.decision_function(X_test)\n", " except Exception as e:\n", " y_score = model.predict_proba(X_test)[:,1]\n", " \n", " \n", " fpr, tpr, _ = roc_curve(y_test.ravel(), y_score.ravel())\n", " roc_auc = auc(fpr, tpr)\n", "\n", " # Compute micro-average ROC curve and ROC area\n", " #fpr[\"micro\"], tpr[\"micro\"], _ = roc_curve(y_test.ravel(), y_score.ravel())\n", " #roc_auc[\"micro\"] = auc(fpr[\"micro\"], tpr[\"micro\"])\n", " \n", " #plt.figure()\n", " lw = 2\n", " plt.plot(fpr, tpr, color='darkorange',\n", " lw=lw, label='ROC curve (area = %0.2f)' % roc_auc)\n", "\n", " plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')\n", " plt.xlim([0.0, 1.0])\n", " plt.ylim([0.0, 1.05])\n", " plt.xlabel('False Positive Rate')\n", " plt.ylabel('True Positive Rate')\n", " plt.title('Receiver operating characteristic example')\n", " plt.legend(loc=\"lower right\")\n", " #plt.show()\n", " \n", "# shuffle and split training and test sets\n", "#X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,\n", "# random_state=0)" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "_uuid": "c321fe50f6eb4498dd30597623f996803dc741aa" }, "outputs": [], "source": [ "def plot_confusion_matrix(model, X_test ,y_test,\n", " classes=[0,1],\n", " normalize=False,\n", " title='Confusion matrix',\n", " cmap=plt.cm.Blues):\n", " \n", " import itertools\n", " from sklearn.metrics import confusion_matrix\n", " \n", " y_pred = model.predict(X_test)\n", " \n", " # Compute confusion matrix\n", " cm = confusion_matrix(y_test, y_pred)\n", " np.set_printoptions(precision=2)\n", " \n", " \"\"\"\n", " This function prints and plots the confusion matrix.\n", " Normalization can be applied by setting `normalize=True`.\n", " \"\"\"\n", " if normalize:\n", " cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n", " # print(\"Normalized confusion matrix\")\n", " #else:\n", " # print('Confusion matrix, without normalization')\n", "\n", " #print(cm)\n", "\n", " plt.imshow(cm, interpolation='nearest', cmap=cmap)\n", " plt.title(title)\n", " plt.colorbar()\n", " tick_marks = np.arange(len(classes))\n", " plt.xticks(tick_marks, classes, rotation=45)\n", " plt.yticks(tick_marks, classes)\n", "\n", " fmt = '.2f' if normalize else 'd'\n", " thresh = cm.max() / 2.\n", " for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n", " plt.text(j, i, format(cm[i, j], fmt),\n", " horizontalalignment=\"center\",\n", " color=\"white\" if cm[i, j] > thresh else \"black\")\n", "\n", " plt.tight_layout()\n", " plt.ylabel('True label')\n", " plt.xlabel('Predicted label')" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "_uuid": "8395e8a2f03ef68d865d6a58dfa8924c143448b6" }, "outputs": [], "source": [ "def feature_importance(coef, names, verbose=False, plot=True):\n", " \n", " #importances = model.feature_importances_\n", "\n", " \n", " \n", " #std = np.std([tree.feature_importances_ for tree in model.estimators_],\n", " # axis=0)\n", " indices = np.argsort(coef)[::-1]\n", " \n", " if verbose:\n", " \n", " # Print the feature ranking\n", " print(\"Feature ranking:\")\n", " \n", " for f in range(len(names)):\n", " print(\"{:>2d}. {:>15}: {:.5f}\".format(f + 1, names[indices[f]], coef[indices[f]]))\n", " \n", " if plot:\n", " \n", " # Plot the feature importances of the forest\n", " #plt.figure(figsize=(5,10))\n", " plt.title(\"Feature importances\")\n", " plt.barh(range(len(names)), coef[indices][::-1], align=\"center\")\n", " #plt.barh(range(X.shape[1]), importances[indices][::-1],\n", " # xerr=std[indices][::-1], align=\"center\")\n", " plt.yticks(range(len(names)), names[indices][::-1])\n", " #plt.xlim([-0.001, 1.1])\n", " #plt.show()" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "_uuid": "4b82915abe9c07f603d19a0a4bdd2cd4279cbdd8" }, "outputs": [], "source": [ "def plot_proba(model, X, y, bins=40, show_class = 1):\n", " \n", " from sklearn.calibration import CalibratedClassifierCV\n", " \n", " model = CalibratedClassifierCV(model)#, cv='prefit')\n", " \n", " model.fit(X, y)\n", " \n", " proba=model.predict_proba(X)\n", " \n", " if show_class == 0:\n", " sns.kdeplot(proba[y==0,0], shade=True, color=\"r\", label='True class')\n", " sns.kdeplot(proba[y==0,1], shade=True, color=\"b\", label='Wrong class')\n", " plt.title('Classification probability: Class 0')\n", " elif show_class == 1:\n", " sns.kdeplot(proba[y==1,1], shade=True, color=\"r\", label='True class')\n", " sns.kdeplot(proba[y==1,0], shade=True, color=\"b\", label='Wrong class')\n", " plt.title('Classification probability: Class 1')\n", " plt.legend()" ] }, { "cell_type": "markdown", "metadata": { "_uuid": "e940ba20f513b744d2fa5132d7c14206b7901680" }, "source": [ "## Logistic regression\n", "\n", "\n", "Logistic regression is the simplest linear model for classification. Logistic regression is also known in the literature as logit regression, maximum-entropy classification (MaxEnt) or the log-linear classifier. In this model, the probabilities describing the possible outcomes of a single trial are modeled using a logistic function. The optimization problem is solved minimizing a cost function using an highly optimized coordinate descent algorithm." ] }, { "cell_type": "code", "execution_count": null, "metadata": { "_uuid": "d526e2619034ae819155db168a52baa4db11545e" }, "outputs": [], "source": [ "from sklearn.linear_model import LogisticRegression\n", "\n", "steps = [('scaler', StandardScaler(copy=True, with_mean=True, with_std=True)),\n", " ('model', LogisticRegression(random_state=0))]\n", "\n", "pipe = Pipeline(steps)\n", "\n", "X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.2, random_state=0)\n", "pipe.fit(X_train, y_train)" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "_uuid": "1238203218684c84f0435627e32531a58af94807" }, "outputs": [], "source": [ "plt.figure(figsize=(15,10))\n", "\n", "plt.subplot(221)\n", "plot_roc(pipe, X_test ,y_test)\n", "\n", "plt.subplot(222)\n", "plot_confusion_matrix(pipe, X_test ,y_test, normalize=True)\n", "\n", "plt.subplot(223)\n", "plot_proba(pipe, X_test, y_test)\n", "\n", "plt.subplot(224)\n", "feature_importance(pipe.named_steps['model'].coef_[0], X.columns)\n", "\n", "plt.tight_layout()" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "_uuid": "9b7e4bdcf812f7851e5022ad91c9645cbad26ba3" }, "outputs": [], "source": [ "logit_xval_res = cross_validate_model(pipe, X, y, verbose=False)\n", "logit_xval_res.T[['value','error']].style.format(\"{:.2f}\")" ] }, { "cell_type": "markdown", "metadata": { "_uuid": "2a57986faf29fb12dfe52cf38e48588a85f1329e" }, "source": [ "\n", "### Forests of randomized trees\n", "Decision Trees (DTs) are a non-parametric supervised learning method used for classification and regression. The goal is to create a model that predicts the value of a target variable by learning simple decision rules inferred from the data features.\n", "\n", "The forest of randomized tree technique includes two averaging algorithms based on randomized decision trees: the RandomForest algorithm and the Extra-Trees method. Both algorithms are perturb-and-combine techniques specifically designed for trees. This means a diverse set of classifiers is created by introducing randomness in the classifier construction. The prediction of the ensemble is given as the averaged prediction of the individual classifiers.\n", "\n", "\n", "#### Random Forest Classifier\n", "In random forests, each tree in the ensemble is built from a sample drawn with replacement (i.e., a bootstrap sample) from the training set. In addition, when splitting a node during the construction of the tree, the split that is chosen is no longer the best split among all features. Instead, the split that is picked is the best split among a random subset of the features. As a result of this randomness, the bias of the forest usually slightly increases (with respect to the bias of a single non-random tree) but, due to averaging, its variance also decreases, usually more than compensating for the increase in bias, hence yielding an overall better model." ] }, { "cell_type": "code", "execution_count": null, "metadata": { "_uuid": "6f98e2f06aea23bbcd54a7d110ada03a5b4f1d24", "scrolled": true }, "outputs": [], "source": [ "from sklearn.ensemble import RandomForestClassifier\n", "\n", "steps = [('scaler', StandardScaler(copy=True, with_mean=True, with_std=True)),\n", " ('model', RandomForestClassifier(n_estimators=250, n_jobs=-1, random_state=0))]\n", "\n", "pipe = Pipeline(steps)\n", "\n", "X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.2, random_state=0)\n", "pipe.fit(X_train, y_train)" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "_uuid": "bdb13c15335b04789b591de91dd606a57ac755a9" }, "outputs": [], "source": [ "plt.figure(figsize=(15,10))\n", "\n", "plt.subplot(221)\n", "plot_roc(pipe, X_test ,y_test)\n", "\n", "plt.subplot(222)\n", "plot_confusion_matrix(pipe, X_test ,y_test, normalize=True)\n", "\n", "plt.subplot(223)\n", "plot_proba(pipe, X_test, y_test)\n", "\n", "plt.subplot(224)\n", "feature_importance(pipe.named_steps['model'].feature_importances_, X.columns)\n", "\n", "plt.tight_layout()" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "_uuid": "4bb62c9ba333c45f9d6e58a28c4e6a27c0d0e8ff" }, "outputs": [], "source": [ "rfc_xval_res = cross_validate_model(pipe, X, y, verbose=False)\n", "rfc_xval_res.T[['value','error']].style.format(\"{:.2f}\")" ] }, { "cell_type": "markdown", "metadata": { "_uuid": "32fac0f2dc0970d4c7b60cb02229b97df5897bc1" }, "source": [ "\n", "#### Extremely Randomized Trees\n", "In extremely randomized trees, randomness goes one step further in the way splits are computed. As in random forests, a random subset of candidate features is used, but instead of looking for the most discriminative thresholds, thresholds are drawn at random for each candidate feature and the best of these randomly-generated thresholds is picked as the splitting rule. This usually allows to reduce the variance of the model a bit more, at the expense of a slightly greater increase in bias" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "_uuid": "687c336968c42abd165a735641f5cfbd9de03be9" }, "outputs": [], "source": [ "from sklearn.ensemble import ExtraTreesClassifier\n", "\n", "steps = [('scaler', StandardScaler(copy=True, with_mean=True, with_std=True)),\n", " ('model', ExtraTreesClassifier(n_estimators=250, n_jobs=-1, random_state=0, class_weight='balanced'))]\n", "\n", "pipe = Pipeline(steps)\n", "\n", "X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.2, random_state=0)\n", "pipe.fit(X_train, y_train)" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "_uuid": "97a8c723c30fdcdf7d040989c5094d7c762ba85b" }, "outputs": [], "source": [ "plt.figure(figsize=(15,10))\n", "\n", "plt.subplot(221)\n", "plot_roc(pipe, X_test ,y_test)\n", "\n", "plt.subplot(222)\n", "plot_confusion_matrix(pipe, X_test ,y_test, normalize=True)\n", "\n", "plt.subplot(223)\n", "plot_proba(pipe, X_test, y_test)\n", "\n", "plt.subplot(224)\n", "feature_importance(pipe.named_steps['model'].feature_importances_, X.columns)\n", "\n", "plt.tight_layout()" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "_uuid": "eb13d68e18139ed3ca7fe282547d99838f7f2d8b" }, "outputs": [], "source": [ "ert_xval_res = cross_validate_model(pipe, X, y, verbose=False)\n", "ert_xval_res.T[['value','error']].style.format(\"{:.2f}\")" ] }, { "cell_type": "markdown", "metadata": { "_uuid": "59ebb7aa871678f1f7cda4bd66c081e56012fd29" }, "source": [ "\n", "## Model comparison and conclusions\n", "The table below summarizes the performance of the classification models that I considered in this study. Performances are ordered by increasing value of $F_{1}$. The best performances are obtained by the **extremely randomized tree**, followed by the **random forest** and the **logistic regression**. \n", "\n", "The extremely randomized tree allow to identify up to 66% of loans which would cause a DEFAULT while retaining 91% of loans which would be PAID in time. The ROC AUC value is as high as 96%, indicating that the probabilty that the classifier would perform better by random choice is as low as 4%." ] }, { "cell_type": "code", "execution_count": null, "metadata": { "_uuid": "5dd30c501a228b91bb34fd8f04aeee7ebd9918f4" }, "outputs": [], "source": [ "from collections import OrderedDict\n", "\n", "res_comp = OrderedDict([\n", " ('Logistic regression' , logit_xval_res[1:]),\n", " ('SGD classifier' , sgd_xval_res[1:] ),\n", " ('Supporting vector classifier' , svc_xval_res[1:] ),\n", " ('Random forest classifier' , rfc_xval_res[1:] ),\n", " ('Extermely random tree classifier' , ert_xval_res[1:] ),\n", " ('Gradient boost classifier' , gbc_xval_res[1:] ),\n", "])\n", "\n", "new_columns = {'level_0' : 'Model'}\n", "\n", "pd.concat(res_comp).reset_index().drop('level_1', axis=1).rename(columns=new_columns).set_index('Model').sort_values('F1', ascending=False).style.format(\"{:.2f}\")" ] } ], "metadata": { "hide_input": true, "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.12" } }, "nbformat": 4, "nbformat_minor": 4 }