diff --git a/README.md b/README.md
index 3370cdf..dc5a24c 100644
--- a/README.md
+++ b/README.md
@@ -69,7 +69,7 @@ Transfer functions provided are:
* inv_multiquadric
MLPRandomLayer and RBFRandomLayer classes are just wrappers around the RandomLayer class, with the _alpha_ mixing parameter set to 1.0 and 0.0 respectively (for 100% MLP input activation, or 100% RBF input activation)
-
+
The RandomLayer, MLPRandomLayer, RBFRandomLayer classes can take a callable user
provided transfer function. See the docstrings and the example ipython
notebook for details.
@@ -96,7 +96,7 @@ An IPython notebook, illustrating several ways to use the __\*ELM\*__ and __\*Ra
Requirements
------------
-Written using Python 2.7.3, numpy 1.6.1, scipy 0.10.1, scikit-learn 0.13.1 and ipython 0.12.1
+Written using Python 3.5.2, numpy 1.12.1, scipy 0.19.0, scikit-learn 0.18.2 and jupyter 2.4.1
References
----------
diff --git a/elm_notebook.ipynb b/elm_notebook.ipynb
new file mode 100644
index 0000000..632c6d9
--- /dev/null
+++ b/elm_notebook.ipynb
@@ -0,0 +1,371 @@
+{
+ "cells": [
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "collapsed": true
+ },
+ "outputs": [],
+ "source": [
+ "# Demo python notebook for sklearn elm and random_hidden_layer modules\n",
+ "#\n",
+ "# Author: David C. Lambert [dcl -at- panix -dot- com]\n",
+ "# Copyright(c) 2013\n",
+ "# License: Simple BSD"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import numpy as np\n",
+ "import matplotlib.pyplot as plt\n",
+ "\n",
+ "from time import time\n",
+ "from sklearn.cluster import k_means\n",
+ "\n",
+ "from elm import ELMClassifier, ELMRegressor, GenELMClassifier, GenELMRegressor\n",
+ "from random_layer import RandomLayer, MLPRandomLayer, RBFRandomLayer, GRBFRandomLayer"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "collapsed": true
+ },
+ "outputs": [],
+ "source": [
+ "def make_toy():\n",
+ " x = np.arange(0.25,20,0.1)\n",
+ " y = x*np.cos(x)+0.5*np.sqrt(x)*np.random.randn(x.shape[0])\n",
+ " x = x.reshape(-1,1)\n",
+ " y = y.reshape(-1,1)\n",
+ " return x, y"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def res_dist(x, y, e, n_runs=100, random_state=None):\n",
+ " x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.4, random_state=random_state)\n",
+ "\n",
+ " test_res = []\n",
+ " train_res = []\n",
+ " start_time = time()\n",
+ "\n",
+ " for i in range(n_runs):\n",
+ " e.fit(x_train, y_train)\n",
+ " train_res.append(e.score(x_train, y_train))\n",
+ " test_res.append(e.score(x_test, y_test))\n",
+ " if (i%(n_runs/10) == 0): print(\"%d\"%i,)\n",
+ "\n",
+ " print(\"\\nTime: %.3f secs\" % (time() - start_time))\n",
+ "\n",
+ " print(\"Test Min: %.3f Mean: %.3f Max: %.3f SD: %.3f\" % (min(test_res), np.mean(test_res), np.max(test_res), np.std(test_res)))\n",
+ " print(\"Train Min: %.3f Mean: %.3f Max: %.3f SD: %.3f\" % (min(train_res), np.mean(train_res), np.max(train_res), np.std(train_res)))\n",
+ " print()\n",
+ " return (train_res, test_res)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from sklearn.cross_validation import train_test_split\n",
+ "from sklearn.preprocessing import StandardScaler\n",
+ "from sklearn.datasets import load_iris, load_digits, load_diabetes, make_regression\n",
+ "\n",
+ "stdsc = StandardScaler()\n",
+ "\n",
+ "iris = load_iris()\n",
+ "irx, iry = stdsc.fit_transform(iris.data), iris.target\n",
+ "irx_train, irx_test, iry_train, iry_test = train_test_split(irx, iry, test_size=0.2)\n",
+ "\n",
+ "digits = load_digits()\n",
+ "dgx, dgy = stdsc.fit_transform(digits.data/16.0), digits.target\n",
+ "dgx_train, dgx_test, dgy_train, dgy_test = train_test_split(dgx, dgy, test_size=0.2)\n",
+ "\n",
+ "diabetes = load_diabetes()\n",
+ "dbx, dby = stdsc.fit_transform(diabetes.data), diabetes.target\n",
+ "dbx_train, dbx_test, dby_train, dby_test = train_test_split(dbx, dby, test_size=0.2)\n",
+ "\n",
+ "mrx, mry = make_regression(n_samples=2000, n_targets=4)\n",
+ "mrx_train, mrx_test, mry_train, mry_test = train_test_split(mrx, mry, test_size=0.2)\n",
+ "\n",
+ "xtoy, ytoy = make_toy()\n",
+ "xtoy, ytoy = stdsc.fit_transform(xtoy), stdsc.fit_transform(ytoy)\n",
+ "xtoy_train, xtoy_test, ytoy_train, ytoy_test = train_test_split(xtoy, ytoy, test_size=0.2)\n",
+ "plt.plot(xtoy, ytoy)\n",
+ "plt.show()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "scrolled": false
+ },
+ "outputs": [],
+ "source": [
+ "# RBFRandomLayer tests\n",
+ "for af in RandomLayer.activation_func_names():\n",
+ " print(af),\n",
+ " elmc = ELMClassifier(activation_func=af)\n",
+ " tr,ts = res_dist(irx, iry, elmc, n_runs=200, random_state=0)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "elmc.classes_"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "scrolled": false
+ },
+ "outputs": [],
+ "source": [
+ "for af in RandomLayer.activation_func_names():\n",
+ " print(af)\n",
+ " elmc = ELMClassifier(activation_func=af, random_state=0)\n",
+ " tr,ts = res_dist(dgx, dgy, elmc, n_runs=100, random_state=0)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "elmc = ELMClassifier(n_hidden=500, activation_func='multiquadric')\n",
+ "tr,ts = res_dist(dgx, dgy, elmc, n_runs=100, random_state=0)\n",
+ "plt.scatter(tr, ts, alpha=0.1, marker='D', c='r')\n",
+ "plt.show()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "elmr = ELMRegressor(random_state=0, activation_func='gaussian', alpha=0.0)\n",
+ "elmr.fit(xtoy_train, ytoy_train)\n",
+ "print(elmr.score(xtoy_train, ytoy_train), elmr.score(xtoy_test, ytoy_test))\n",
+ "plt.plot(xtoy, ytoy, xtoy, elmr.predict(xtoy))\n",
+ "plt.show()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from sklearn import pipeline\n",
+ "from sklearn.linear_model import LinearRegression\n",
+ "elmr = pipeline.Pipeline([('rhl', RandomLayer(random_state=0, activation_func='multiquadric')),\n",
+ " ('lr', LinearRegression(fit_intercept=False))])\n",
+ "elmr.fit(xtoy_train, ytoy_train)\n",
+ "print(elmr.score(xtoy_train, ytoy_train), elmr.score(xtoy_test, ytoy_test))\n",
+ "plt.plot(xtoy, ytoy, xtoy, elmr.predict(xtoy))\n",
+ "plt.show()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "rhl = RandomLayer(n_hidden=200, alpha=1.0)\n",
+ "elmr = GenELMRegressor(hidden_layer=rhl)\n",
+ "tr, ts = res_dist(mrx, mry, elmr, n_runs=200, random_state=0)\n",
+ "plt.scatter(tr, ts, alpha=0.1, marker='D', c='r')\n",
+ "plt.show()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "rhl = RBFRandomLayer(n_hidden=15, rbf_width=0.8)\n",
+ "elmr = GenELMRegressor(hidden_layer=rhl)\n",
+ "elmr.fit(xtoy_train, ytoy_train)\n",
+ "print(elmr.score(xtoy_train, ytoy_train), elmr.score(xtoy_test, ytoy_test))\n",
+ "plt.plot(xtoy, ytoy, xtoy, elmr.predict(xtoy))\n",
+ "plt.show()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "nh = 15\n",
+ "(ctrs, _, _) = k_means(xtoy_train, nh)\n",
+ "unit_rs = np.ones(nh)\n",
+ "\n",
+ "#rhl = RBFRandomLayer(n_hidden=nh, activation_func='inv_multiquadric')\n",
+ "#rhl = RBFRandomLayer(n_hidden=nh, centers=ctrs, radii=unit_rs)\n",
+ "rhl = GRBFRandomLayer(n_hidden=nh, grbf_lambda=.0001, centers=ctrs)\n",
+ "elmr = GenELMRegressor(hidden_layer=rhl)\n",
+ "elmr.fit(xtoy_train, ytoy_train)\n",
+ "print(elmr.score(xtoy_train, ytoy_train), elmr.score(xtoy_test, ytoy_test))\n",
+ "plt.plot(xtoy, ytoy, xtoy, elmr.predict(xtoy))\n",
+ "plt.show()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "rbf_rhl = RBFRandomLayer(n_hidden=100, random_state=0, rbf_width=0.01)\n",
+ "elmc_rbf = GenELMClassifier(hidden_layer=rbf_rhl)\n",
+ "elmc_rbf.fit(dgx_train, dgy_train)\n",
+ "print(elmc_rbf.score(dgx_train, dgy_train), elmc_rbf.score(dgx_test, dgy_test))\n",
+ "\n",
+ "def powtanh_xfer(activations, power=1.0):\n",
+ " return pow(np.tanh(activations), power)\n",
+ "\n",
+ "tanh_rhl = MLPRandomLayer(n_hidden=100, activation_func=powtanh_xfer, activation_args={'power':3.0})\n",
+ "elmc_tanh = GenELMClassifier(hidden_layer=tanh_rhl)\n",
+ "elmc_tanh.fit(dgx_train, dgy_train)\n",
+ "print(elmc_tanh.score(dgx_train, dgy_train), elmc_tanh.score(dgx_test, dgy_test))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "rbf_rhl = RBFRandomLayer(n_hidden=100, rbf_width=0.01)\n",
+ "tr, ts = res_dist(dgx, dgy, GenELMClassifier(hidden_layer=rbf_rhl), n_runs=100, random_state=0)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "plt.hist(ts); plt.hist(tr); plt.show()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from sklearn.svm import SVR\n",
+ "from sklearn.ensemble import RandomForestRegressor\n",
+ "tr, ts = res_dist(dbx, dby, RandomForestRegressor(n_estimators=15), n_runs=100, random_state=0)\n",
+ "plt.hist(ts); plt.hist(tr); plt.show()\n",
+ "\n",
+ "rhl = RBFRandomLayer(n_hidden=15, rbf_width=0.1)\n",
+ "tr,ts = res_dist(dbx, dby, GenELMRegressor(rhl), n_runs=100, random_state=0)\n",
+ "plt.hist(ts); plt.hist(tr); plt.show()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "elmc = ELMClassifier(n_hidden=1000, activation_func='gaussian', alpha=0.0, random_state=0)\n",
+ "elmc.fit(dgx_train, dgy_train)\n",
+ "print(elmc.score(dgx_train, dgy_train), elmc.score(dgx_test, dgy_test))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "elmc = ELMClassifier(n_hidden=500, activation_func='hardlim', alpha=1.0, random_state=0)\n",
+ "elmc.fit(dgx_train, dgy_train)\n",
+ "print(elmc.score(dgx_train, dgy_train), elmc.score(dgx_test, dgy_test))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "elmr = ELMRegressor(random_state=0)\n",
+ "elmr.fit(xtoy_train, ytoy_train)\n",
+ "print(elmr.score(xtoy_train, ytoy_train), elmr.score(xtoy_test, ytoy_test))\n",
+ "plt.plot(xtoy, ytoy, xtoy, elmr.predict(xtoy))\n",
+ "plt.show()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "elmr = ELMRegressor(activation_func='inv_tribas', random_state=0)\n",
+ "elmr.fit(xtoy_train, ytoy_train)\n",
+ "print(elmr.score(xtoy_train, ytoy_train), elmr.score(xtoy_test, ytoy_test))\n",
+ "plt.plot(xtoy, ytoy, xtoy, elmr.predict(xtoy))\n",
+ "plt.show()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "collapsed": true
+ },
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.5.2"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/elm_notebook.py b/elm_notebook.py
deleted file mode 100644
index ff05acf..0000000
--- a/elm_notebook.py
+++ /dev/null
@@ -1,213 +0,0 @@
-# -*- coding: utf-8 -*-
-# 2
-
-#
-
-# Demo python notebook for sklearn elm and random_hidden_layer modules
-#
-# Author: David C. Lambert [dcl -at- panix -dot- com]
-# Copyright(c) 2013
-# License: Simple BSD
-
-#
-
-from time import time
-from sklearn.cluster import k_means
-
-from elm import ELMClassifier, ELMRegressor, GenELMClassifier, GenELMRegressor
-from random_layer import RandomLayer, MLPRandomLayer, RBFRandomLayer, GRBFRandomLayer
-
-#
-
-def make_toy():
- x = np.arange(0.25,20,0.1)
- y = x*np.cos(x)+0.5*sqrt(x)*np.random.randn(x.shape[0])
- x = x.reshape(-1,1)
- y = y.reshape(-1,1)
- return x, y
-
-#
-
-def res_dist(x, y, e, n_runs=100, random_state=None):
- x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.4, random_state=random_state)
-
- test_res = []
- train_res = []
- start_time = time()
-
- for i in xrange(n_runs):
- e.fit(x_train, y_train)
- train_res.append(e.score(x_train, y_train))
- test_res.append(e.score(x_test, y_test))
- if (i%(n_runs/10) == 0): print "%d"%i,
-
- print "\nTime: %.3f secs" % (time() - start_time)
-
- print "Test Min: %.3f Mean: %.3f Max: %.3f SD: %.3f" % (min(test_res), mean(test_res), max(test_res), std(test_res))
- print "Train Min: %.3f Mean: %.3f Max: %.3f SD: %.3f" % (min(train_res), mean(train_res), max(train_res), std(train_res))
- print
- return (train_res, test_res)
-
-#
-
-from sklearn.cross_validation import train_test_split
-from sklearn.preprocessing import StandardScaler
-from sklearn.datasets import load_iris, load_digits, load_diabetes, make_regression
-
-stdsc = StandardScaler()
-
-iris = load_iris()
-irx, iry = stdsc.fit_transform(iris.data), iris.target
-irx_train, irx_test, iry_train, iry_test = train_test_split(irx, iry, test_size=0.2)
-
-digits = load_digits()
-dgx, dgy = stdsc.fit_transform(digits.data/16.0), digits.target
-dgx_train, dgx_test, dgy_train, dgy_test = train_test_split(dgx, dgy, test_size=0.2)
-
-diabetes = load_diabetes()
-dbx, dby = stdsc.fit_transform(diabetes.data), diabetes.target
-dbx_train, dbx_test, dby_train, dby_test = train_test_split(dbx, dby, test_size=0.2)
-
-mrx, mry = make_regression(n_samples=2000, n_targets=4)
-mrx_train, mrx_test, mry_train, mry_test = train_test_split(mrx, mry, test_size=0.2)
-
-xtoy, ytoy = make_toy()
-xtoy, ytoy = stdsc.fit_transform(xtoy), stdsc.fit_transform(ytoy)
-xtoy_train, xtoy_test, ytoy_train, ytoy_test = train_test_split(xtoy, ytoy, test_size=0.2)
-plot(xtoy, ytoy)
-
-#
-
-# RBFRandomLayer tests
-for af in RandomLayer.activation_func_names():
- print af,
- elmc = ELMClassifier(activation_func=af)
- tr,ts = res_dist(irx, iry, elmc, n_runs=200, random_state=0)
-
-#
-
-elmc.classes_
-
-#
-
-for af in RandomLayer.activation_func_names():
- print af
- elmc = ELMClassifier(activation_func=af, random_state=0)
- tr,ts = res_dist(dgx, dgy, elmc, n_runs=100, random_state=0)
-
-#
-
-elmc = ELMClassifier(n_hidden=500, activation_func='multiquadric')
-tr,ts = res_dist(dgx, dgy, elmc, n_runs=100, random_state=0)
-scatter(tr, ts, alpha=0.1, marker='D', c='r')
-
-#
-
-elmr = ELMRegressor(random_state=0, activation_func='gaussian', alpha=0.0)
-elmr.fit(xtoy_train, ytoy_train)
-print elmr.score(xtoy_train, ytoy_train), elmr.score(xtoy_test, ytoy_test)
-plot(xtoy, ytoy, xtoy, elmr.predict(xtoy))
-
-#
-
-from sklearn import pipeline
-from sklearn.linear_model import LinearRegression
-elmr = pipeline.Pipeline([('rhl', RandomLayer(random_state=0, activation_func='multiquadric')),
- ('lr', LinearRegression(fit_intercept=False))])
-elmr.fit(xtoy_train, ytoy_train)
-print elmr.score(xtoy_train, ytoy_train), elmr.score(xtoy_test, ytoy_test)
-plot(xtoy, ytoy, xtoy, elmr.predict(xtoy))
-
-#
-
-rhl = RandomLayer(n_hidden=200, alpha=1.0)
-elmr = GenELMRegressor(hidden_layer=rhl)
-tr, ts = res_dist(mrx, mry, elmr, n_runs=200, random_state=0)
-scatter(tr, ts, alpha=0.1, marker='D', c='r')
-
-#
-
-rhl = RBFRandomLayer(n_hidden=15, rbf_width=0.8)
-elmr = GenELMRegressor(hidden_layer=rhl)
-elmr.fit(xtoy_train, ytoy_train)
-print elmr.score(xtoy_train, ytoy_train), elmr.score(xtoy_test, ytoy_test)
-plot(xtoy, ytoy, xtoy, elmr.predict(xtoy))
-
-#
-
-nh = 15
-(ctrs, _, _) = k_means(xtoy_train, nh)
-unit_rs = np.ones(nh)
-
-#rhl = RBFRandomLayer(n_hidden=nh, activation_func='inv_multiquadric')
-#rhl = RBFRandomLayer(n_hidden=nh, centers=ctrs, radii=unit_rs)
-rhl = GRBFRandomLayer(n_hidden=nh, grbf_lambda=.0001, centers=ctrs)
-elmr = GenELMRegressor(hidden_layer=rhl)
-elmr.fit(xtoy_train, ytoy_train)
-print elmr.score(xtoy_train, ytoy_train), elmr.score(xtoy_test, ytoy_test)
-plot(xtoy, ytoy, xtoy, elmr.predict(xtoy))
-
-#
-
-rbf_rhl = RBFRandomLayer(n_hidden=100, random_state=0, rbf_width=0.01)
-elmc_rbf = GenELMClassifier(hidden_layer=rbf_rhl)
-elmc_rbf.fit(dgx_train, dgy_train)
-print elmc_rbf.score(dgx_train, dgy_train), elmc_rbf.score(dgx_test, dgy_test)
-
-def powtanh_xfer(activations, power=1.0):
- return pow(np.tanh(activations), power)
-
-tanh_rhl = MLPRandomLayer(n_hidden=100, activation_func=powtanh_xfer, activation_args={'power':3.0})
-elmc_tanh = GenELMClassifier(hidden_layer=tanh_rhl)
-elmc_tanh.fit(dgx_train, dgy_train)
-print elmc_tanh.score(dgx_train, dgy_train), elmc_tanh.score(dgx_test, dgy_test)
-
-#
-
-rbf_rhl = RBFRandomLayer(n_hidden=100, rbf_width=0.01)
-tr, ts = res_dist(dgx, dgy, GenELMClassifier(hidden_layer=rbf_rhl), n_runs=100, random_state=0)
-
-#
-
-hist(ts), hist(tr)
-print
-
-#
-
-from sklearn.svm import SVR
-from sklearn.ensemble import RandomForestRegressor
-tr, ts = res_dist(dbx, dby, RandomForestRegressor(n_estimators=15), n_runs=100, random_state=0)
-hist(tr), hist(ts)
-print
-
-rhl = RBFRandomLayer(n_hidden=15, rbf_width=0.1)
-tr,ts = res_dist(dbx, dby, GenELMRegressor(rhl), n_runs=100, random_state=0)
-hist(tr), hist(ts)
-print
-
-#
-
-elmc = ELMClassifier(n_hidden=1000, activation_func='gaussian', alpha=0.0, random_state=0)
-elmc.fit(dgx_train, dgy_train)
-print elmc.score(dgx_train, dgy_train), elmc.score(dgx_test, dgy_test)
-
-#
-
-elmc = ELMClassifier(n_hidden=500, activation_func='hardlim', alpha=1.0, random_state=0)
-elmc.fit(dgx_train, dgy_train)
-print elmc.score(dgx_train, dgy_train), elmc.score(dgx_test, dgy_test)
-
-#
-
-elmr = ELMRegressor(random_state=0)
-elmr.fit(xtoy_train, ytoy_train)
-print elmr.score(xtoy_train, ytoy_train), elmr.score(xtoy_test, ytoy_test)
-plot(xtoy, ytoy, xtoy, elmr.predict(xtoy))
-
-#
-
-elmr = ELMRegressor(activation_func='inv_tribas', random_state=0)
-elmr.fit(xtoy_train, ytoy_train)
-print elmr.score(xtoy_train, ytoy_train), elmr.score(xtoy_test, ytoy_test)
-plot(xtoy, ytoy, xtoy, elmr.predict(xtoy))
-
diff --git a/random_layer.py b/random_layer.py
index 713a795..52f459e 100644
--- a/random_layer.py
+++ b/random_layer.py
@@ -24,7 +24,7 @@
from scipy.spatial.distance import cdist, pdist, squareform
from sklearn.metrics import pairwise_distances
-from sklearn.utils import check_random_state, atleast2d_or_csr
+from sklearn.utils import check_random_state, check_array
from sklearn.utils.extmath import safe_sparse_dot
from sklearn.base import BaseEstimator, TransformerMixin
@@ -108,7 +108,7 @@ def fit(self, X, y=None):
-------
self
"""
- X = atleast2d_or_csr(X)
+ X = check_array(X)
self._generate_components(X)
@@ -130,7 +130,7 @@ def transform(self, X, y=None):
-------
X_new : numpy array of shape [n_samples, n_components]
"""
- X = atleast2d_or_csr(X)
+ X = check_array(X)
if (self.components_ is None):
raise ValueError('No components initialized')